summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/Kconfig12
-rw-r--r--drivers/ata/Kconfig13
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ata_generic.c6
-rw-r--r--drivers/ata/ata_piix.c25
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-sff.c6
-rw-r--r--drivers/ata/pata_acpi.c6
-rw-r--r--drivers/ata/pata_sch.c206
-rw-r--r--drivers/ata/sata_inic162x.c646
-rw-r--r--drivers/ata/sata_mv.c690
-rw-r--r--drivers/atm/iphase.h3
-rw-r--r--drivers/base/class.c1
-rw-r--r--drivers/base/core.c8
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/sys.c3
-rw-r--r--drivers/block/aoe/aoecmd.c10
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c10
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/char/sx.c9
-rw-r--r--drivers/char/synclink_gt.c2
-rw-r--r--drivers/char/tty_io.c7
-rw-r--r--drivers/char/vt.c6
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/edac_device.c6
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_pci.c6
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c16
-rw-r--r--drivers/i2c/busses/i2c-piix4.c47
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c6
-rw-r--r--drivers/i2c/i2c-core.c14
-rw-r--r--drivers/ide/Kconfig54
-rw-r--r--drivers/ide/ide-iops.c2
-rw-r--r--drivers/ide/legacy/falconide.c2
-rw-r--r--drivers/ide/mips/swarm.c16
-rw-r--r--drivers/ide/pci/alim15x3.c42
-rw-r--r--drivers/ide/pci/cs5520.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c107
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c36
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c75
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c13
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c158
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c91
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c26
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c95
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c80
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h18
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c237
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c291
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c332
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c44
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c57
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c66
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c178
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h64
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c47
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/serio/hp_sdc.c1
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/isdn/capi/capiutil.c6
-rw-r--r--drivers/isdn/hysdn/Kconfig2
-rw-r--r--drivers/isdn/hysdn/boardergo.c14
-rw-r--r--drivers/macintosh/adb.c30
-rw-r--r--drivers/macintosh/therm_pm72.c31
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c10
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c26
-rw-r--r--drivers/media/Kconfig3
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/common/tuners/Kconfig50
-rw-r--r--drivers/media/common/tuners/Makefile1
-rw-r--r--drivers/media/common/tuners/mxl5005s.c4110
-rw-r--r--drivers/media/common/tuners/mxl5005s.h131
-rw-r--r--drivers/media/common/tuners/tda18271-common.c24
-rw-r--r--drivers/media/common/tuners/tda18271-fe.c168
-rw-r--r--drivers/media/common/tuners/tda18271-priv.h9
-rw-r--r--drivers/media/common/tuners/tea5767.c6
-rw-r--r--drivers/media/common/tuners/xc5000.c9
-rw-r--r--drivers/media/common/tuners/xc5000.h22
-rw-r--r--drivers/media/common/tuners/xc5000_priv.h2
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c2
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig1
-rw-r--r--drivers/media/dvb/cinergyT2/Kconfig2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c28
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/dvb/frontends/Kconfig18
-rw-r--r--drivers/media/dvb/frontends/itd1000.c2
-rw-r--r--drivers/media/dvb/frontends/mt312.c9
-rw-r--r--drivers/media/dvb/frontends/mt312.h4
-rw-r--r--drivers/media/dvb/ttpci/Kconfig2
-rw-r--r--drivers/media/dvb/ttusb-dec/Kconfig1
-rw-r--r--drivers/media/video/Kconfig10
-rw-r--r--drivers/media/video/Makefile2
-rw-r--r--drivers/media/video/au0828/Kconfig3
-rw-r--r--drivers/media/video/au0828/au0828-dvb.c6
-rw-r--r--drivers/media/video/bt8xx/Kconfig3
-rw-r--r--drivers/media/video/cx18/Kconfig5
-rw-r--r--drivers/media/video/cx18/cx18-cards.c25
-rw-r--r--drivers/media/video/cx18/cx18-cards.h5
-rw-r--r--drivers/media/video/cx18/cx18-driver.c31
-rw-r--r--drivers/media/video/cx18/cx18-driver.h3
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c40
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c6
-rw-r--r--drivers/media/video/cx18/cx18-fileops.h9
-rw-r--r--drivers/media/video/cx18/cx18-gpio.c47
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c1
-rw-r--r--drivers/media/video/cx18/cx18-queue.c22
-rw-r--r--drivers/media/video/cx18/cx18-queue.h4
-rw-r--r--drivers/media/video/cx18/cx18-streams.c13
-rw-r--r--drivers/media/video/cx18/cx18-streams.h2
-rw-r--r--drivers/media/video/cx23885/Kconfig6
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c36
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c7
-rw-r--r--drivers/media/video/cx25840/Kconfig1
-rw-r--r--drivers/media/video/cx88/Kconfig6
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c253
-rw-r--r--drivers/media/video/em28xx/Kconfig3
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c8
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c1
-rw-r--r--drivers/media/video/ivtv/Kconfig4
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c16
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h6
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c13
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c3
-rw-r--r--drivers/media/video/ivtv/ivtv-yuv.c2
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c6
-rw-r--r--drivers/media/video/mt9m001.c5
-rw-r--r--drivers/media/video/mt9v022.c7
-rw-r--r--drivers/media/video/pvrusb2/Kconfig4
-rw-r--r--drivers/media/video/saa7134/Kconfig3
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c140
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/stk-webcam.c7
-rw-r--r--drivers/media/video/tcm825x.c7
-rw-r--r--drivers/media/video/tlv320aic23b.c6
-rw-r--r--drivers/media/video/tuner-core.c38
-rw-r--r--drivers/media/video/tvaudio.c13
-rw-r--r--drivers/media/video/tveeprom.c10
-rw-r--r--drivers/media/video/usbvision/Kconfig2
-rw-r--r--drivers/misc/kgdbts.c75
-rw-r--r--drivers/misc/sgi-xp/xp.h305
-rw-r--r--drivers/misc/sgi-xp/xp_main.c44
-rw-r--r--drivers/misc/sgi-xp/xpc.h83
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c186
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c60
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c74
-rw-r--r--drivers/misc/sgi-xp/xpnet.c22
-rw-r--r--drivers/mmc/host/mmci.c14
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/mpc1211.c80
-rw-r--r--drivers/net/3c59x.c73
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/appletalk/cops.c16
-rw-r--r--drivers/net/atlx/atl1.c157
-rw-r--r--drivers/net/atlx/atl1.h2
-rw-r--r--drivers/net/atlx/atlx.c2
-rw-r--r--drivers/net/atlx/atlx.h7
-rw-r--r--drivers/net/bonding/bond_main.c24
-rw-r--r--drivers/net/bonding/bond_sysfs.c28
-rw-r--r--drivers/net/cxgb3/adapter.h1
-rw-r--r--drivers/net/cxgb3/common.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c44
-rw-r--r--drivers/net/cxgb3/regs.h8
-rw-r--r--drivers/net/cxgb3/sge.c29
-rw-r--r--drivers/net/cxgb3/t3_hw.c28
-rw-r--r--drivers/net/dm9000.c37
-rw-r--r--drivers/net/e1000e/defines.h10
-rw-r--r--drivers/net/e1000e/e1000.h7
-rw-r--r--drivers/net/e1000e/ethtool.c45
-rw-r--r--drivers/net/e1000e/hw.h22
-rw-r--r--drivers/net/e1000e/ich8lan.c83
-rw-r--r--drivers/net/e1000e/netdev.c330
-rw-r--r--drivers/net/e1000e/phy.c278
-rw-r--r--drivers/net/eexpress.c11
-rw-r--r--drivers/net/ehea/ehea.h27
-rw-r--r--drivers/net/ehea/ehea_main.c25
-rw-r--r--drivers/net/ehea/ehea_qmr.c286
-rw-r--r--drivers/net/fs_enet/mii-fec.c3
-rw-r--r--drivers/net/gianfar.c7
-rw-r--r--drivers/net/gianfar.h3
-rw-r--r--drivers/net/gianfar_sysfs.c10
-rw-r--r--drivers/net/irda/nsc-ircc.c6
-rw-r--r--drivers/net/irda/smsc-ircc2.c5
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mv643xx_eth.c160
-rw-r--r--drivers/net/myri10ge/myri10ge.c730
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h56
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h39
-rw-r--r--drivers/net/niu.c64
-rw-r--r--drivers/net/niu.h9
-rw-r--r--drivers/net/pcnet32.c61
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/ppp_generic.c1
-rw-r--r--drivers/net/pppol2tp.c13
-rw-r--r--drivers/net/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/sfc/Makefile4
-rw-r--r--drivers/net/sfc/boards.h2
-rw-r--r--drivers/net/sfc/efx.c4
-rw-r--r--drivers/net/sfc/enum.h49
-rw-r--r--drivers/net/sfc/ethtool.c259
-rw-r--r--drivers/net/sfc/falcon.c8
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h16
-rw-r--r--drivers/net/sfc/falcon_xmac.c82
-rw-r--r--drivers/net/sfc/mdio_10g.c78
-rw-r--r--drivers/net/sfc/mdio_10g.h24
-rw-r--r--drivers/net/sfc/net_driver.h28
-rw-r--r--drivers/net/sfc/rx.c11
-rw-r--r--drivers/net/sfc/selftest.c717
-rw-r--r--drivers/net/sfc/selftest.h50
-rw-r--r--drivers/net/sfc/sfe4001.c14
-rw-r--r--drivers/net/sfc/tenxpress.c91
-rw-r--r--drivers/net/sfc/tx.c664
-rw-r--r--drivers/net/sfc/xfp_phy.c36
-rw-r--r--drivers/net/sky2.h4
-rw-r--r--drivers/net/tulip/uli526x.c38
-rw-r--r--drivers/net/ucc_geth.c278
-rw-r--r--drivers/net/ucc_geth.h48
-rw-r--r--drivers/net/ucc_geth_ethtool.c6
-rw-r--r--drivers/net/ucc_geth_mii.c4
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/net/wan/cosa.c14
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wan/hostess_sv11.c12
-rw-r--r--drivers/net/wan/lapbether.c1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wan/sealevel.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c31
-rw-r--r--drivers/net/wireless/strip.c2
-rw-r--r--drivers/net/wireless/wavelan.c4
-rw-r--r--drivers/net/wireless/wavelan_cs.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c6
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--drivers/oprofile/cpu_buffer.h2
-rw-r--r--drivers/pci/intel-iommu.c6
-rw-r--r--drivers/pci/pci-acpi.c109
-rw-r--r--drivers/pci/probe.c33
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pnp/base.h1
-rw-r--r--drivers/pnp/interface.c2
-rw-r--r--drivers/pnp/quirks.c132
-rw-r--r--drivers/pnp/resource.c2
-rw-r--r--drivers/pnp/support.c8
-rw-r--r--drivers/rtc/rtc-ds1511.c4
-rw-r--r--drivers/rtc/rtc-lib.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c3
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-sh.c14
-rw-r--r--drivers/s390/char/tty3270.c15
-rw-r--r--drivers/s390/cio/blacklist.c323
-rw-r--r--drivers/s390/cio/cio.c39
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/cio_debug.h6
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/s390/cio/device.c25
-rw-r--r--drivers/s390/cio/device_fsm.c44
-rw-r--r--drivers/s390/cio/device_id.c4
-rw-r--r--drivers/s390/cio/device_pgid.c12
-rw-r--r--drivers/s390/s390mach.c3
-rw-r--r--drivers/sbus/char/bpp.c2
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/aha152x.c8
-rw-r--r--drivers/scsi/dpt_i2o.c78
-rw-r--r--drivers/scsi/dpti.h13
-rw-r--r--drivers/scsi/gdth.c51
-rw-r--r--drivers/scsi/libiscsi.c29
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/serial/Kconfig6
-rw-r--r--drivers/serial/bfin_5xx.c98
-rw-r--r--drivers/serial/crisv10.c2
-rw-r--r--drivers/serial/mcfserial.c1
-rw-r--r--drivers/serial/serial_core.c3
-rw-r--r--drivers/serial/sh-sci.c32
-rw-r--r--drivers/serial/sh-sci.h27
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/spi_mpc83xx.c411
-rw-r--r--drivers/usb/c67x00/c67x00-ll-hpi.c1
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/endpoint.c11
-rw-r--r--drivers/usb/core/message.c1
-rw-r--r--drivers/usb/core/sysfs.c137
-rw-r--r--drivers/usb/core/usb.c1
-rw-r--r--drivers/usb/core/usb.h4
-rw-r--r--drivers/usb/gadget/amd5536udc.c10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c48
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c17
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h8
-rw-r--r--drivers/usb/gadget/serial.c778
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/isp1760-hcd.c4
-rw-r--r--drivers/usb/host/isp1760-if.c2
-rw-r--r--drivers/usb/host/ohci-sm501.c2
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/usbtest.c5
-rw-r--r--drivers/usb/serial/Kconfig9
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp2101.c1
-rw-r--r--drivers/usb/serial/iuu_phoenix.c6
-rw-r--r--drivers/usb/serial/moto_modem.c70
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/storage/unusual_devs.h71
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/atmel_lcdfb.c11
-rw-r--r--drivers/video/bw2.c8
-rw-r--r--drivers/video/cg14.c5
-rw-r--r--drivers/video/cg3.c8
-rw-r--r--drivers/video/cg6.c6
-rw-r--r--drivers/video/console/fbcon.c31
-rw-r--r--drivers/video/ffb.c7
-rw-r--r--drivers/video/geode/lxfb_ops.c22
-rw-r--r--drivers/video/leo.c7
-rw-r--r--drivers/video/logo/Kconfig10
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--drivers/video/logo/logo.c10
-rw-r--r--drivers/video/logo/logo_blackfin_clut224.ppm1127
-rw-r--r--drivers/video/logo/logo_blackfin_vga16.ppm1127
-rw-r--r--drivers/video/p9100.c7
-rw-r--r--drivers/video/pnx4008/pnxrgbfb.c11
-rw-r--r--drivers/video/pxafb.c8
-rw-r--r--drivers/video/sbuslib.c9
-rw-r--r--drivers/video/sbuslib.h5
-rw-r--r--drivers/video/sunxvr2500.c3
-rw-r--r--drivers/video/sunxvr500.c3
-rw-r--r--drivers/video/tcx.c25
-rw-r--r--drivers/video/tridentfb.c25
366 files changed, 16360 insertions, 4944 deletions
diff --git a/drivers/accessibility/Kconfig b/drivers/accessibility/Kconfig
index 1264c4b9809..ef3b65bfdd0 100644
--- a/drivers/accessibility/Kconfig
+++ b/drivers/accessibility/Kconfig
@@ -1,7 +1,17 @@
menuconfig ACCESSIBILITY
bool "Accessibility support"
---help---
- Enable a submenu where accessibility items may be enabled.
+ Accessibility handles all special kinds of hardware devices or
+ software adapters which help people with disabilities (e.g.
+ blindness) to use computers.
+
+ That includes braille devices, speech synthesis, keyboard
+ remapping, etc.
+
+ Say Y here to get to see options for accessibility.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
If unsure, say N.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 1c11df9a5f3..9bf2986a278 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -205,8 +205,8 @@ config SATA_VITESSE
If unsure, say N.
config SATA_INIC162X
- tristate "Initio 162x SATA support (HIGHLY EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Initio 162x SATA support"
+ depends on PCI
help
This option enables support for Initio 162x Serial ATA.
@@ -697,6 +697,15 @@ config PATA_SCC
If unsure, say N.
+config PATA_SCH
+ tristate "Intel SCH PATA support"
+ depends on PCI
+ help
+ This option enables support for Intel SCH PATA on the Intel
+ SCH (US15W, US15L, UL11L) series host controllers.
+
+ If unsure, say N.
+
config PATA_BF54X
tristate "Blackfin 54x ATAPI support"
depends on BF542 || BF548 || BF549
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index b693d829383..674965fa326 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_PATA_SIS) += pata_sis.o
obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
obj-$(CONFIG_PATA_SCC) += pata_scc.o
+obj-$(CONFIG_PATA_SCH) += pata_sch.o
obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 8cace9aa9c0..97f83fb2ee2 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1267,9 +1267,7 @@ static int ahci_check_ready(struct ata_link *link)
void __iomem *port_mmio = ahci_port_base(link->ap);
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
- if (!(status & ATA_BUSY))
- return 1;
- return 0;
+ return ata_check_ready(status);
}
static int ahci_softreset(struct ata_link *link, unsigned int *class,
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 47aeccd52fa..75a406f5e69 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -152,6 +152,12 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
if (dev->vendor == PCI_VENDOR_ID_AL)
ata_pci_bmdma_clear_simplex(dev);
+ if (dev->vendor == PCI_VENDOR_ID_ATI) {
+ int rc = pcim_enable_device(dev);
+ if (rc < 0)
+ return rc;
+ pcim_pin_device(dev);
+ }
return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL);
}
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ea2c7649d39..a9027b8fbdd 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1348,6 +1348,8 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct piix_host_priv *hpriv = host->private_data;
+ struct ata_device *dev0 = &host->ports[0]->link.device[0];
+ u32 scontrol;
int i;
/* check for availability */
@@ -1366,6 +1368,29 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
return;
hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
+
+ /* SCR access via SIDPR doesn't work on some configurations.
+ * Give it a test drive by inhibiting power save modes which
+ * we'll do anyway.
+ */
+ scontrol = piix_sidpr_read(dev0, SCR_CONTROL);
+
+ /* if IPM is already 3, SCR access is probably working. Don't
+ * un-inhibit power save modes as BIOS might have inhibited
+ * them for a reason.
+ */
+ if ((scontrol & 0xf00) != 0x300) {
+ scontrol |= 0x300;
+ piix_sidpr_write(dev0, SCR_CONTROL, scontrol);
+ scontrol = piix_sidpr_read(dev0, SCR_CONTROL);
+
+ if ((scontrol & 0xf00) != 0x300) {
+ dev_printk(KERN_INFO, host->dev, "SCR access via "
+ "SIDPR is available but doesn't work\n");
+ return;
+ }
+ }
+
host->ports[0]->ops = &piix_sidpr_sata_ops;
host->ports[1]->ops = &piix_sidpr_sata_ops;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3bc48853820..927b692d723 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6292,6 +6292,7 @@ EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
+EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
EXPORT_SYMBOL_GPL(ata_do_eh);
EXPORT_SYMBOL_GPL(ata_std_error_handler);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 61dcd0026c6..62e033146be 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1357,7 +1357,7 @@ static void ata_eh_analyze_serror(struct ata_link *link)
* LOCKING:
* Kernel thread context (may sleep).
*/
-static void ata_eh_analyze_ncq_error(struct ata_link *link)
+void ata_eh_analyze_ncq_error(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2ec65a8fda7..3c2d2289f85 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -314,11 +314,7 @@ static int ata_sff_check_ready(struct ata_link *link)
{
u8 status = link->ap->ops->sff_check_status(link->ap);
- if (!(status & ATA_BUSY))
- return 1;
- if (status == 0xff)
- return -ENODEV;
- return 0;
+ return ata_check_ready(status);
}
/**
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index c5f91e62994..fbe60571155 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -259,6 +259,12 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
.port_ops = &pacpi_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
+ if (pdev->vendor == PCI_VENDOR_ID_ATI) {
+ int rc = pcim_enable_device(pdev);
+ if (rc < 0)
+ return rc;
+ pcim_pin_device(pdev);
+ }
return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL);
}
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
new file mode 100644
index 00000000000..c8cc027789f
--- /dev/null
+++ b/drivers/ata/pata_sch.c
@@ -0,0 +1,206 @@
+/*
+ * pata_sch.c - Intel SCH PATA controllers
+ *
+ * Copyright (c) 2008 Alek Du <alek.du@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/*
+ * Supports:
+ * Intel SCH (AF82US15W, AF82US15L, AF82UL11L) chipsets -- see spec at:
+ * http://download.intel.com/design/chipsets/embedded/datashts/319537.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_sch"
+#define DRV_VERSION "0.2"
+
+/* see SCH datasheet page 351 */
+enum {
+ D0TIM = 0x80, /* Device 0 Timing Register */
+ D1TIM = 0x84, /* Device 1 Timing Register */
+ PM = 0x07, /* PIO Mode Bit Mask */
+ MDM = (0x03 << 8), /* Multi-word DMA Mode Bit Mask */
+ UDM = (0x07 << 16), /* Ultra DMA Mode Bit Mask */
+ PPE = (1 << 30), /* Prefetch/Post Enable */
+ USD = (1 << 31), /* Use Synchronous DMA */
+};
+
+static int sch_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev);
+static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev);
+
+static const struct pci_device_id sch_pci_tbl[] = {
+ /* Intel SCH PATA Controller */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SCH_IDE), 0 },
+ { } /* terminate list */
+};
+
+static struct pci_driver sch_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sch_pci_tbl,
+ .probe = sch_init_one,
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
+};
+
+static struct scsi_host_template sch_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations sch_pata_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_unknown,
+ .set_piomode = sch_set_piomode,
+ .set_dmamode = sch_set_dmamode,
+};
+
+static struct ata_port_info sch_port_info = {
+ .flags = 0,
+ .pio_mask = ATA_PIO4, /* pio0-4 */
+ .mwdma_mask = ATA_MWDMA2, /* mwdma0-2 */
+ .udma_mask = ATA_UDMA5, /* udma0-5 */
+ .port_ops = &sch_pata_ops,
+};
+
+MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
+MODULE_DESCRIPTION("SCSI low-level driver for Intel SCH PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sch_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * sch_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: ATA device
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sch_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int port = adev->devno ? D1TIM : D0TIM;
+ unsigned int data;
+
+ pci_read_config_dword(dev, port, &data);
+ /* see SCH datasheet page 351 */
+ /* set PIO mode */
+ data &= ~(PM | PPE);
+ data |= pio;
+ /* enable PPE for block device */
+ if (adev->class == ATA_DEV_ATA)
+ data |= PPE;
+ pci_write_config_dword(dev, port, data);
+}
+
+/**
+ * sch_set_dmamode - Initialize host controller PATA DMA timings
+ * @ap: Port whose timings we are configuring
+ * @adev: ATA device
+ *
+ * Set MW/UDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int dma_mode = adev->dma_mode;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int port = adev->devno ? D1TIM : D0TIM;
+ unsigned int data;
+
+ pci_read_config_dword(dev, port, &data);
+ /* see SCH datasheet page 351 */
+ if (dma_mode >= XFER_UDMA_0) {
+ /* enable Synchronous DMA mode */
+ data |= USD;
+ data &= ~UDM;
+ data |= (dma_mode - XFER_UDMA_0) << 16;
+ } else { /* must be MWDMA mode, since we masked SWDMA already */
+ data &= ~(USD | MDM);
+ data |= (dma_mode - XFER_MW_DMA_0) << 8;
+ }
+ pci_write_config_dword(dev, port, data);
+}
+
+/**
+ * sch_init_one - Register SCH ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in sch_pci_tbl matching with @pdev
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int __devinit sch_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
+ struct ata_host *host;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ /* enable device and prepare host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+ pci_set_master(pdev);
+ return ata_pci_sff_activate_host(host, ata_sff_interrupt, &sch_sht);
+}
+
+static int __init sch_init(void)
+{
+ return pci_register_driver(&sch_pci_driver);
+}
+
+static void __exit sch_exit(void)
+{
+ pci_unregister_driver(&sch_pci_driver);
+}
+
+module_init(sch_init);
+module_exit(sch_exit);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index d27bb9a2568..3ead02fe379 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -10,13 +10,33 @@
* right. Documentation is available at initio's website but it only
* documents registers (not programming model).
*
- * - ATA disks work.
- * - Hotplug works.
- * - ATAPI read works but burning doesn't. This thing is really
- * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and
- * ATAPI DMA WRITE should be programmed. If you've got a clue, be
- * my guest.
- * - Both STR and STD work.
+ * This driver has interesting history. The first version was written
+ * from the documentation and a 2.4 IDE driver posted on a Taiwan
+ * company, which didn't use any IDMA features and couldn't handle
+ * LBA48. The resulting driver couldn't handle LBA48 devices either
+ * making it pretty useless.
+ *
+ * After a while, initio picked the driver up, renamed it to
+ * sata_initio162x, updated it to use IDMA for ATA DMA commands and
+ * posted it on their website. It only used ATA_PROT_DMA for IDMA and
+ * attaching both devices and issuing IDMA and !IDMA commands
+ * simultaneously broke it due to PIRQ masking interaction but it did
+ * show how to use the IDMA (ADMA + some initio specific twists)
+ * engine.
+ *
+ * Then, I picked up their changes again and here's the usable driver
+ * which uses IDMA for everything. Everything works now including
+ * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some
+ * issues tho. Result Tf is not resported properly, NCQ isn't
+ * supported yet and CD/DVD writing works with DMA assisted PIO
+ * protocol (which, for native SATA devices, shouldn't cause any
+ * noticeable difference).
+ *
+ * Anyways, so, here's finally a working driver for inic162x. Enjoy!
+ *
+ * initio: If you guys wanna improve the driver regarding result TF
+ * access and other stuff, please feel free to contact me. I'll be
+ * happy to assist.
*/
#include <linux/kernel.h>
@@ -28,13 +48,19 @@
#include <scsi/scsi_device.h>
#define DRV_NAME "sata_inic162x"
-#define DRV_VERSION "0.3"
+#define DRV_VERSION "0.4"
enum {
- MMIO_BAR = 5,
+ MMIO_BAR_PCI = 5,
+ MMIO_BAR_CARDBUS = 1,
NR_PORTS = 2,
+ IDMA_CPB_TBL_SIZE = 4 * 32,
+
+ INIC_DMA_BOUNDARY = 0xffffff,
+
+ HOST_ACTRL = 0x08,
HOST_CTL = 0x7c,
HOST_STAT = 0x7e,
HOST_IRQ_STAT = 0xbc,
@@ -43,22 +69,37 @@ enum {
PORT_SIZE = 0x40,
/* registers for ATA TF operation */
- PORT_TF = 0x00,
- PORT_ALT_STAT = 0x08,
+ PORT_TF_DATA = 0x00,
+ PORT_TF_FEATURE = 0x01,
+ PORT_TF_NSECT = 0x02,
+ PORT_TF_LBAL = 0x03,
+ PORT_TF_LBAM = 0x04,
+ PORT_TF_LBAH = 0x05,
+ PORT_TF_DEVICE = 0x06,
+ PORT_TF_COMMAND = 0x07,
+ PORT_TF_ALT_STAT = 0x08,
PORT_IRQ_STAT = 0x09,
PORT_IRQ_MASK = 0x0a,
PORT_PRD_CTL = 0x0b,
PORT_PRD_ADDR = 0x0c,
PORT_PRD_XFERLEN = 0x10,
+ PORT_CPB_CPBLAR = 0x18,
+ PORT_CPB_PTQFIFO = 0x1c,
/* IDMA register */
PORT_IDMA_CTL = 0x14,
+ PORT_IDMA_STAT = 0x16,
+
+ PORT_RPQ_FIFO = 0x1e,
+ PORT_RPQ_CNT = 0x1f,
PORT_SCR = 0x20,
/* HOST_CTL bits */
HCTL_IRQOFF = (1 << 8), /* global IRQ off */
- HCTL_PWRDWN = (1 << 13), /* power down PHYs */
+ HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */
+ HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/
+ HCTL_PWRDWN = (1 << 12), /* power down PHYs */
HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */
HCTL_RPGSEL = (1 << 15), /* register page select */
@@ -81,9 +122,7 @@ enum {
PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */
PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
-
- PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA,
- PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE,
+ PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA,
PIRQ_MASK_FREEZE = 0xff,
/* PORT_PRD_CTL bits */
@@ -96,20 +135,104 @@ enum {
IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */
IDMA_CTL_GO = (1 << 7), /* IDMA mode go */
IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */
+
+ /* PORT_IDMA_STAT bits */
+ IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */
+ IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */
+ IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */
+ IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */
+ IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */
+ IDMA_STAT_PSD = (1 << 6), /* ADMA pause */
+ IDMA_STAT_DONE = (1 << 7), /* ADMA done */
+
+ IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR,
+
+ /* CPB Control Flags*/
+ CPB_CTL_VALID = (1 << 0), /* CPB valid */
+ CPB_CTL_QUEUED = (1 << 1), /* queued command */
+ CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */
+ CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */
+ CPB_CTL_DEVDIR = (1 << 4), /* device direction control */
+
+ /* CPB Response Flags */
+ CPB_RESP_DONE = (1 << 0), /* ATA command complete */
+ CPB_RESP_REL = (1 << 1), /* ATA release */
+ CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */
+ CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */
+ CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */
+ CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */
+ CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */
+ CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */
+
+ /* PRD Control Flags */
+ PRD_DRAIN = (1 << 1), /* ignore data excess */
+ PRD_CDB = (1 << 2), /* atapi packet command pointer */
+ PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */
+ PRD_DMA = (1 << 4), /* data transfer method */
+ PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */
+ PRD_IOM = (1 << 6), /* io/memory transfer */
+ PRD_END = (1 << 7), /* APRD chain end */
};
+/* Comman Parameter Block */
+struct inic_cpb {
+ u8 resp_flags; /* Response Flags */
+ u8 error; /* ATA Error */
+ u8 status; /* ATA Status */
+ u8 ctl_flags; /* Control Flags */
+ __le32 len; /* Total Transfer Length */
+ __le32 prd; /* First PRD pointer */
+ u8 rsvd[4];
+ /* 16 bytes */
+ u8 feature; /* ATA Feature */
+ u8 hob_feature; /* ATA Ex. Feature */
+ u8 device; /* ATA Device/Head */
+ u8 mirctl; /* Mirror Control */
+ u8 nsect; /* ATA Sector Count */
+ u8 hob_nsect; /* ATA Ex. Sector Count */
+ u8 lbal; /* ATA Sector Number */
+ u8 hob_lbal; /* ATA Ex. Sector Number */
+ u8 lbam; /* ATA Cylinder Low */
+ u8 hob_lbam; /* ATA Ex. Cylinder Low */
+ u8 lbah; /* ATA Cylinder High */
+ u8 hob_lbah; /* ATA Ex. Cylinder High */
+ u8 command; /* ATA Command */
+ u8 ctl; /* ATA Control */
+ u8 slave_error; /* Slave ATA Error */
+ u8 slave_status; /* Slave ATA Status */
+ /* 32 bytes */
+} __packed;
+
+/* Physical Region Descriptor */
+struct inic_prd {
+ __le32 mad; /* Physical Memory Address */
+ __le16 len; /* Transfer Length */
+ u8 rsvd;
+ u8 flags; /* Control Flags */
+} __packed;
+
+struct inic_pkt {
+ struct inic_cpb cpb;
+ struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */
+ u8 cdb[ATAPI_CDB_LEN];
+} __packed;
+
struct inic_host_priv {
- u16 cached_hctl;
+ void __iomem *mmio_base;
+ u16 cached_hctl;
};
struct inic_port_priv {
- u8 dfl_prdctl;
- u8 cached_prdctl;
- u8 cached_pirq_mask;
+ struct inic_pkt *pkt;
+ dma_addr_t pkt_dma;
+ u32 *cpb_tbl;
+ dma_addr_t cpb_tbl_dma;
};
static struct scsi_host_template inic_sht = {
- ATA_BMDMA_SHT(DRV_NAME),
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
+ .dma_boundary = INIC_DMA_BOUNDARY,
};
static const int scr_map[] = {
@@ -120,54 +243,34 @@ static const int scr_map[] = {
static void __iomem *inic_port_base(struct ata_port *ap)
{
- return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;
-}
-
-static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask)
-{
- void __iomem *port_base = inic_port_base(ap);
- struct inic_port_priv *pp = ap->private_data;
+ struct inic_host_priv *hpriv = ap->host->private_data;
- writeb(mask, port_base + PORT_IRQ_MASK);
- pp->cached_pirq_mask = mask;
-}
-
-static void inic_set_pirq_mask(struct ata_port *ap, u8 mask)
-{
- struct inic_port_priv *pp = ap->private_data;
-
- if (pp->cached_pirq_mask != mask)
- __inic_set_pirq_mask(ap, mask);
+ return hpriv->mmio_base + ap->port_no * PORT_SIZE;
}
static void inic_reset_port(void __iomem *port_base)
{
void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
- u16 ctl;
- ctl = readw(idma_ctl);
- ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO);
+ /* stop IDMA engine */
+ readw(idma_ctl); /* flush */
+ msleep(1);
/* mask IRQ and assert reset */
- writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl);
+ writew(IDMA_CTL_RST_IDMA, idma_ctl);
readw(idma_ctl); /* flush */
-
- /* give it some time */
msleep(1);
/* release reset */
- writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl);
+ writew(0, idma_ctl);
/* clear irq */
writeb(0xff, port_base + PORT_IRQ_STAT);
-
- /* reenable ATA IRQ, turn off IDMA mode */
- writew(ctl, idma_ctl);
}
static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
{
- void __iomem *scr_addr = ap->ioaddr.scr_addr;
+ void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
void __iomem *addr;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
@@ -184,120 +287,126 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
{
- void __iomem *scr_addr = ap->ioaddr.scr_addr;
- void __iomem *addr;
+ void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
return -EINVAL;
- addr = scr_addr + scr_map[sc_reg] * 4;
writel(val, scr_addr + scr_map[sc_reg] * 4);
return 0;
}
-/*
- * In TF mode, inic162x is very similar to SFF device. TF registers
- * function the same. DMA engine behaves similary using the same PRD
- * format as BMDMA but different command register, interrupt and event
- * notification methods are used. The following inic_bmdma_*()
- * functions do the impedance matching.
- */
-static void inic_bmdma_setup(struct ata_queued_cmd *qc)
+static void inic_stop_idma(struct ata_port *ap)
{
- struct ata_port *ap = qc->ap;
- struct inic_port_priv *pp = ap->private_data;
void __iomem *port_base = inic_port_base(ap);
- int rw = qc->tf.flags & ATA_TFLAG_WRITE;
-
- /* make sure device sees PRD table writes */
- wmb();
-
- /* load transfer length */
- writel(qc->nbytes, port_base + PORT_PRD_XFERLEN);
-
- /* turn on DMA and specify data direction */
- pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN;
- if (!rw)
- pp->cached_prdctl |= PRD_CTL_WR;
- writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
- /* issue r/w command */
- ap->ops->sff_exec_command(ap, &qc->tf);
+ readb(port_base + PORT_RPQ_FIFO);
+ readb(port_base + PORT_RPQ_CNT);
+ writew(0, port_base + PORT_IDMA_CTL);
}
-static void inic_bmdma_start(struct ata_queued_cmd *qc)
+static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat)
{
- struct ata_port *ap = qc->ap;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
struct inic_port_priv *pp = ap->private_data;
- void __iomem *port_base = inic_port_base(ap);
+ struct inic_cpb *cpb = &pp->pkt->cpb;
+ bool freeze = false;
- /* start host DMA transaction */
- pp->cached_prdctl |= PRD_CTL_START;
- writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
-}
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x",
+ irq_stat, idma_stat);
-static void inic_bmdma_stop(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct inic_port_priv *pp = ap->private_data;
- void __iomem *port_base = inic_port_base(ap);
+ inic_stop_idma(ap);
- /* stop DMA engine */
- writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
-}
+ if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
+ ata_ehi_push_desc(ehi, "hotplug");
+ ata_ehi_hotplugged(ehi);
+ freeze = true;
+ }
-static u8 inic_bmdma_status(struct ata_port *ap)
-{
- /* event is already verified by the interrupt handler */
- return ATA_DMA_INTR;
+ if (idma_stat & IDMA_STAT_PERR) {
+ ata_ehi_push_desc(ehi, "PCI error");
+ freeze = true;
+ }
+
+ if (idma_stat & IDMA_STAT_CPBERR) {
+ ata_ehi_push_desc(ehi, "CPB error");
+
+ if (cpb->resp_flags & CPB_RESP_IGNORED) {
+ __ata_ehi_push_desc(ehi, " ignored");
+ ehi->err_mask |= AC_ERR_INVALID;
+ freeze = true;
+ }
+
+ if (cpb->resp_flags & CPB_RESP_ATA_ERR)
+ ehi->err_mask |= AC_ERR_DEV;
+
+ if (cpb->resp_flags & CPB_RESP_SPURIOUS) {
+ __ata_ehi_push_desc(ehi, " spurious-intr");
+ ehi->err_mask |= AC_ERR_HSM;
+ freeze = true;
+ }
+
+ if (cpb->resp_flags &
+ (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) {
+ __ata_ehi_push_desc(ehi, " data-over/underflow");
+ ehi->err_mask |= AC_ERR_HSM;
+ freeze = true;
+ }
+ }
+
+ if (freeze)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
}
static void inic_host_intr(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
- struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
u8 irq_stat;
+ u16 idma_stat;
- /* fetch and clear irq */
+ /* read and clear IRQ status */
irq_stat = readb(port_base + PORT_IRQ_STAT);
writeb(irq_stat, port_base + PORT_IRQ_STAT);
+ idma_stat = readw(port_base + PORT_IDMA_STAT);
- if (likely(!(irq_stat & PIRQ_ERR))) {
- struct ata_queued_cmd *qc =
- ata_qc_from_tag(ap, ap->link.active_tag);
+ if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR)))
+ inic_host_err_intr(ap, irq_stat, idma_stat);
- if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
- ap->ops->sff_check_status(ap); /* clear ATA interrupt */
- return;
- }
+ if (unlikely(!qc))
+ goto spurious;
- if (likely(ata_sff_host_intr(ap, qc)))
- return;
+ if (likely(idma_stat & IDMA_STAT_DONE)) {
+ inic_stop_idma(ap);
- ap->ops->sff_check_status(ap); /* clear ATA interrupt */
- ata_port_printk(ap, KERN_WARNING, "unhandled "
- "interrupt, irq_stat=%x\n", irq_stat);
+ /* Depending on circumstances, device error
+ * isn't reported by IDMA, check it explicitly.
+ */
+ if (unlikely(readb(port_base + PORT_TF_COMMAND) &
+ (ATA_DF | ATA_ERR)))
+ qc->err_mask |= AC_ERR_DEV;
+
+ ata_qc_complete(qc);
return;
}
- /* error */
- ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat);
-
- if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
- ata_ehi_hotplugged(ehi);
- ata_port_freeze(ap);
- } else
- ata_port_abort(ap);
+ spurious:
+ ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: "
+ "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
+ qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
}
static irqreturn_t inic_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
- void __iomem *mmio_base = host->iomap[MMIO_BAR];
+ struct inic_host_priv *hpriv = host->private_data;
u16 host_irq_stat;
int i, handled = 0;;
- host_irq_stat = readw(mmio_base + HOST_IRQ_STAT);
+ host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
goto out;
@@ -327,60 +436,173 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
+static int inic_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ /* For some reason ATAPI_PROT_DMA doesn't work for some
+ * commands including writes and other misc ops. Use PIO
+ * protocol instead, which BTW is driven by the DMA engine
+ * anyway, so it shouldn't make much difference for native
+ * SATA devices.
+ */
+ if (atapi_cmd_type(qc->cdb[0]) == READ)
+ return 0;
+ return 1;
+}
+
+static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
+{
+ struct scatterlist *sg;
+ unsigned int si;
+ u8 flags = 0;
+
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ flags |= PRD_WRITE;
+
+ if (ata_is_dma(qc->tf.protocol))
+ flags |= PRD_DMA;
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ prd->mad = cpu_to_le32(sg_dma_address(sg));
+ prd->len = cpu_to_le16(sg_dma_len(sg));
+ prd->flags = flags;
+ prd++;
+ }
+
+ WARN_ON(!si);
+ prd[-1].flags |= PRD_END;
+}
+
+static void inic_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct inic_port_priv *pp = qc->ap->private_data;
+ struct inic_pkt *pkt = pp->pkt;
+ struct inic_cpb *cpb = &pkt->cpb;
+ struct inic_prd *prd = pkt->prd;
+ bool is_atapi = ata_is_atapi(qc->tf.protocol);
+ bool is_data = ata_is_data(qc->tf.protocol);
+ unsigned int cdb_len = 0;
+
+ VPRINTK("ENTER\n");
+
+ if (is_atapi)
+ cdb_len = qc->dev->cdb_len;
+
+ /* prepare packet, based on initio driver */
+ memset(pkt, 0, sizeof(struct inic_pkt));
+
+ cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN;
+ if (is_atapi || is_data)
+ cpb->ctl_flags |= CPB_CTL_DATA;
+
+ cpb->len = cpu_to_le32(qc->nbytes + cdb_len);
+ cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd));
+
+ cpb->device = qc->tf.device;
+ cpb->feature = qc->tf.feature;
+ cpb->nsect = qc->tf.nsect;
+ cpb->lbal = qc->tf.lbal;
+ cpb->lbam = qc->tf.lbam;
+ cpb->lbah = qc->tf.lbah;
+
+ if (qc->tf.flags & ATA_TFLAG_LBA48) {
+ cpb->hob_feature = qc->tf.hob_feature;
+ cpb->hob_nsect = qc->tf.hob_nsect;
+ cpb->hob_lbal = qc->tf.hob_lbal;
+ cpb->hob_lbam = qc->tf.hob_lbam;
+ cpb->hob_lbah = qc->tf.hob_lbah;
+ }
+
+ cpb->command = qc->tf.command;
+ /* don't load ctl - dunno why. it's like that in the initio driver */
+
+ /* setup PRD for CDB */
+ if (is_atapi) {
+ memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN);
+ prd->mad = cpu_to_le32(pp->pkt_dma +
+ offsetof(struct inic_pkt, cdb));
+ prd->len = cpu_to_le16(cdb_len);
+ prd->flags = PRD_CDB | PRD_WRITE;
+ if (!is_data)
+ prd->flags |= PRD_END;
+ prd++;
+ }
+
+ /* setup sg table */
+ if (is_data)
+ inic_fill_sg(prd, qc);
+
+ pp->cpb_tbl[0] = pp->pkt_dma;
+}
+
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ void __iomem *port_base = inic_port_base(ap);
- /* ATA IRQ doesn't wait for DMA transfer completion and vice
- * versa. Mask IRQ selectively to detect command completion.
- * Without it, ATA DMA read command can cause data corruption.
- *
- * Something similar might be needed for ATAPI writes. I
- * tried a lot of combinations but couldn't find the solution.
- */
- if (qc->tf.protocol == ATA_PROT_DMA &&
- !(qc->tf.flags & ATA_TFLAG_WRITE))
- inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ);
- else
- inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
+ /* fire up the ADMA engine */
+ writew(HCTL_FTHD0, port_base + HOST_CTL);
+ writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL);
+ writeb(0, port_base + PORT_CPB_PTQFIFO);
+
+ return 0;
+}
+
+static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ void __iomem *port_base = inic_port_base(ap);
+
+ tf->feature = readb(port_base + PORT_TF_FEATURE);
+ tf->nsect = readb(port_base + PORT_TF_NSECT);
+ tf->lbal = readb(port_base + PORT_TF_LBAL);
+ tf->lbam = readb(port_base + PORT_TF_LBAM);
+ tf->lbah = readb(port_base + PORT_TF_LBAH);
+ tf->device = readb(port_base + PORT_TF_DEVICE);
+ tf->command = readb(port_base + PORT_TF_COMMAND);
+}
- /* Issuing a command to yet uninitialized port locks up the
- * controller. Most of the time, this happens for the first
- * command after reset which are ATA and ATAPI IDENTIFYs.
- * Fast fail if stat is 0x7f or 0xff for those commands.
+static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ata_taskfile *rtf = &qc->result_tf;
+ struct ata_taskfile tf;
+
+ /* FIXME: Except for status and error, result TF access
+ * doesn't work. I tried reading from BAR0/2, CPB and BAR5.
+ * None works regardless of which command interface is used.
+ * For now return true iff status indicates device error.
+ * This means that we're reporting bogus sector for RW
+ * failures. Eeekk....
*/
- if (unlikely(qc->tf.command == ATA_CMD_ID_ATA ||
- qc->tf.command == ATA_CMD_ID_ATAPI)) {
- u8 stat = ap->ops->sff_check_status(ap);
- if (stat == 0x7f || stat == 0xff)
- return AC_ERR_HSM;
- }
+ inic_tf_read(qc->ap, &tf);
- return ata_sff_qc_issue(qc);
+ if (!(tf.command & ATA_ERR))
+ return false;
+
+ rtf->command = tf.command;
+ rtf->feature = tf.feature;
+ return true;
}
static void inic_freeze(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
- __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE);
-
- ap->ops->sff_check_status(ap);
+ writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK);
writeb(0xff, port_base + PORT_IRQ_STAT);
-
- readb(port_base + PORT_IRQ_STAT); /* flush */
}
static void inic_thaw(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
- ap->ops->sff_check_status(ap);
writeb(0xff, port_base + PORT_IRQ_STAT);
+ writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK);
+}
- __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
+static int inic_check_ready(struct ata_link *link)
+{
+ void __iomem *port_base = inic_port_base(link->ap);
- readb(port_base + PORT_IRQ_STAT); /* flush */
+ return ata_check_ready(readb(port_base + PORT_TF_COMMAND));
}
/*
@@ -394,17 +616,15 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
void __iomem *port_base = inic_port_base(ap);
void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
- u16 val;
int rc;
/* hammer it into sane state */
inic_reset_port(port_base);
- val = readw(idma_ctl);
- writew(val | IDMA_CTL_RST_ATA, idma_ctl);
+ writew(IDMA_CTL_RST_ATA, idma_ctl);
readw(idma_ctl); /* flush */
msleep(1);
- writew(val & ~IDMA_CTL_RST_ATA, idma_ctl);
+ writew(0, idma_ctl);
rc = sata_link_resume(link, timing, deadline);
if (rc) {
@@ -418,7 +638,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
struct ata_taskfile tf;
/* wait for link to become ready */
- rc = ata_sff_wait_after_reset(link, 1, deadline);
+ rc = ata_wait_after_reset(link, deadline, inic_check_ready);
/* link occupied, -ENODEV too is an error */
if (rc) {
ata_link_printk(link, KERN_WARNING, "device not ready "
@@ -426,7 +646,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
return rc;
}
- ata_sff_tf_read(ap, &tf);
+ inic_tf_read(ap, &tf);
*class = ata_dev_classify(&tf);
}
@@ -436,18 +656,8 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
static void inic_error_handler(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
- struct inic_port_priv *pp = ap->private_data;
- unsigned long flags;
- /* reset PIO HSM and stop DMA engine */
inic_reset_port(port_base);
-
- spin_lock_irqsave(ap->lock, flags);
- ap->hsm_task_state = HSM_ST_IDLE;
- writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
- spin_unlock_irqrestore(ap->lock, flags);
-
- /* PIO and DMA engines have been stopped, perform recovery */
ata_std_error_handler(ap);
}
@@ -458,26 +668,18 @@ static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
inic_reset_port(inic_port_base(qc->ap));
}
-static void inic_dev_config(struct ata_device *dev)
-{
- /* inic can only handle upto LBA28 max sectors */
- if (dev->max_sectors > ATA_MAX_SECTORS)
- dev->max_sectors = ATA_MAX_SECTORS;
-
- if (dev->n_sectors >= 1 << 28) {
- ata_dev_printk(dev, KERN_ERR,
- "ERROR: This driver doesn't support LBA48 yet and may cause\n"
- " data corruption on such devices. Disabling.\n");
- ata_dev_disable(dev);
- }
-}
-
static void init_port(struct ata_port *ap)
{
void __iomem *port_base = inic_port_base(ap);
+ struct inic_port_priv *pp = ap->private_data;
- /* Setup PRD address */
+ /* clear packet and CPB table */
+ memset(pp->pkt, 0, sizeof(struct inic_pkt));
+ memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE);
+
+ /* setup PRD and CPB lookup table addresses */
writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
+ writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR);
}
static int inic_port_resume(struct ata_port *ap)
@@ -488,28 +690,30 @@ static int inic_port_resume(struct ata_port *ap)
static int inic_port_start(struct ata_port *ap)
{
- void __iomem *port_base = inic_port_base(ap);
+ struct device *dev = ap->host->dev;
struct inic_port_priv *pp;
- u8 tmp;
int rc;
/* alloc and initialize private data */
- pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL);
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
ap->private_data = pp;
- /* default PRD_CTL value, DMAEN, WR and START off */
- tmp = readb(port_base + PORT_PRD_CTL);
- tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START);
- pp->dfl_prdctl = tmp;
-
/* Alloc resources */
rc = ata_port_start(ap);
- if (rc) {
- kfree(pp);
+ if (rc)
return rc;
- }
+
+ pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt),
+ &pp->pkt_dma, GFP_KERNEL);
+ if (!pp->pkt)
+ return -ENOMEM;
+
+ pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE,
+ &pp->cpb_tbl_dma, GFP_KERNEL);
+ if (!pp->cpb_tbl)
+ return -ENOMEM;
init_port(ap);
@@ -517,21 +721,18 @@ static int inic_port_start(struct ata_port *ap)
}
static struct ata_port_operations inic_port_ops = {
- .inherits = &ata_sff_port_ops,
+ .inherits = &sata_port_ops,
- .bmdma_setup = inic_bmdma_setup,
- .bmdma_start = inic_bmdma_start,
- .bmdma_stop = inic_bmdma_stop,
- .bmdma_status = inic_bmdma_status,
+ .check_atapi_dma = inic_check_atapi_dma,
+ .qc_prep = inic_qc_prep,
.qc_issue = inic_qc_issue,
+ .qc_fill_rtf = inic_qc_fill_rtf,
.freeze = inic_freeze,
.thaw = inic_thaw,
- .softreset = ATA_OP_NULL, /* softreset is broken */
.hardreset = inic_hardreset,
.error_handler = inic_error_handler,
.post_internal_cmd = inic_post_internal_cmd,
- .dev_config = inic_dev_config,
.scr_read = inic_scr_read,
.scr_write = inic_scr_write,
@@ -541,12 +742,6 @@ static struct ata_port_operations inic_port_ops = {
};
static struct ata_port_info inic_port_info = {
- /* For some reason, ATAPI_PROT_PIO is broken on this
- * controller, and no, PIO_POLLING does't fix it. It somehow
- * manages to report the wrong ireason and ignoring ireason
- * results in machine lock up. Tell libata to always prefer
- * DMA.
- */
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@@ -599,7 +794,6 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct inic_host_priv *hpriv = host->private_data;
- void __iomem *mmio_base = host->iomap[MMIO_BAR];
int rc;
rc = ata_pci_device_do_resume(pdev);
@@ -607,7 +801,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = init_controller(mmio_base, hpriv->cached_hctl);
+ rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
if (rc)
return rc;
}
@@ -625,6 +819,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ata_host *host;
struct inic_host_priv *hpriv;
void __iomem * const *iomap;
+ int mmio_bar;
int i, rc;
if (!printed_version++)
@@ -638,38 +833,31 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host->private_data = hpriv;
- /* acquire resources and fill host */
+ /* Acquire resources and fill host. Note that PCI and cardbus
+ * use different BARs.
+ */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
+ if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM)
+ mmio_bar = MMIO_BAR_PCI;
+ else
+ mmio_bar = MMIO_BAR_CARDBUS;
+
+ rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME);
if (rc)
return rc;
host->iomap = iomap = pcim_iomap_table(pdev);
+ hpriv->mmio_base = iomap[mmio_bar];
+ hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL);
for (i = 0; i < NR_PORTS; i++) {
struct ata_port *ap = host->ports[i];
- struct ata_ioports *port = &ap->ioaddr;
- unsigned int offset = i * PORT_SIZE;
-
- port->cmd_addr = iomap[2 * i];
- port->altstatus_addr =
- port->ctl_addr = (void __iomem *)
- ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
- port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR;
-
- ata_sff_std_ports(port);
-
- ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio");
- ata_port_pbar_desc(ap, MMIO_BAR, offset, "port");
- ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
- (unsigned long long)pci_resource_start(pdev, 2 * i),
- (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) |
- ATA_PCI_CTL_OFS);
- }
- hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
+ ata_port_pbar_desc(ap, mmio_bar, -1, "mmio");
+ ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port");
+ }
/* Set dma_mask. This devices doesn't support 64bit addressing. */
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
@@ -698,7 +886,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
- rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl);
+ rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"failed to initialize controller\n");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 842b1a15b78..bb73b222262 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -65,6 +65,7 @@
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mbus.h>
+#include <linux/bitops.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -91,9 +92,9 @@ enum {
MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
MV_SATAHC0_REG_BASE = 0x20000,
- MV_FLASH_CTL = 0x1046c,
- MV_GPIO_PORT_CTL = 0x104f0,
- MV_RESET_CFG = 0x180d8,
+ MV_FLASH_CTL_OFS = 0x1046c,
+ MV_GPIO_PORT_CTL_OFS = 0x104f0,
+ MV_RESET_CFG_OFS = 0x180d8,
MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
@@ -147,18 +148,21 @@ enum {
/* PCI interface registers */
PCI_COMMAND_OFS = 0xc00,
+ PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
PCI_MAIN_CMD_STS_OFS = 0xd30,
STOP_PCI_MASTER = (1 << 2),
PCI_MASTER_EMPTY = (1 << 3),
GLOB_SFT_RST = (1 << 4),
- MV_PCI_MODE = 0xd00,
+ MV_PCI_MODE_OFS = 0xd00,
+ MV_PCI_MODE_MASK = 0x30,
+
MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
MV_PCI_DISC_TIMER = 0xd04,
MV_PCI_MSI_TRIGGER = 0xc38,
MV_PCI_SERR_MASK = 0xc28,
- MV_PCI_XBAR_TMOUT = 0x1d04,
+ MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
MV_PCI_ERR_ATTRIBUTE = 0x1d48,
@@ -225,16 +229,18 @@ enum {
PHY_MODE4 = 0x314,
PHY_MODE2 = 0x330,
SATA_IFCTL_OFS = 0x344,
+ SATA_TESTCTL_OFS = 0x348,
SATA_IFSTAT_OFS = 0x34c,
VENDOR_UNIQUE_FIS_OFS = 0x35c,
- FIS_CFG_OFS = 0x360,
- FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
+ FISCFG_OFS = 0x360,
+ FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
+ FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
MV5_PHY_MODE = 0x74,
- MV5_LT_MODE = 0x30,
- MV5_PHY_CTL = 0x0C,
- SATA_INTERFACE_CFG = 0x050,
+ MV5_LTMODE_OFS = 0x30,
+ MV5_PHY_CTL_OFS = 0x0C,
+ SATA_INTERFACE_CFG_OFS = 0x050,
MV_M2_PREAMP_MASK = 0x7e0,
@@ -332,10 +338,16 @@ enum {
EDMA_CMD_OFS = 0x28, /* EDMA command register */
EDMA_EN = (1 << 0), /* enable EDMA */
EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
- ATA_RST = (1 << 2), /* reset trans/link/phy */
+ EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
+
+ EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
+ EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
+ EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
- EDMA_IORDY_TMOUT = 0x34,
- EDMA_ARB_CFG = 0x38,
+ EDMA_IORDY_TMOUT_OFS = 0x34,
+ EDMA_ARB_CFG_OFS = 0x38,
+
+ EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
@@ -350,15 +362,19 @@ enum {
MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
+ MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
+ MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
+ MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
};
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
+#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
@@ -433,6 +449,7 @@ struct mv_port_priv {
unsigned int resp_idx;
u32 pp_flags;
+ unsigned int delayed_eh_pmp_map;
};
struct mv_port_signal {
@@ -479,6 +496,7 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
+static int mv_qc_defer(struct ata_queued_cmd *qc);
static void mv_qc_prep(struct ata_queued_cmd *qc);
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
@@ -527,6 +545,9 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int mv_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
+static void mv_pmp_error_handler(struct ata_port *ap);
+static void mv_process_crpb_entries(struct ata_port *ap,
+ struct mv_port_priv *pp);
/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
* because we have to allow room for worst case splitting of
@@ -548,6 +569,7 @@ static struct scsi_host_template mv6_sht = {
static struct ata_port_operations mv5_ops = {
.inherits = &ata_sff_port_ops,
+ .qc_defer = mv_qc_defer,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
@@ -566,7 +588,6 @@ static struct ata_port_operations mv5_ops = {
static struct ata_port_operations mv6_ops = {
.inherits = &mv5_ops,
- .qc_defer = sata_pmp_qc_defer_cmd_switch,
.dev_config = mv6_dev_config,
.scr_read = mv_scr_read,
.scr_write = mv_scr_write,
@@ -574,12 +595,11 @@ static struct ata_port_operations mv6_ops = {
.pmp_hardreset = mv_pmp_hardreset,
.pmp_softreset = mv_softreset,
.softreset = mv_softreset,
- .error_handler = sata_pmp_error_handler,
+ .error_handler = mv_pmp_error_handler,
};
static struct ata_port_operations mv_iie_ops = {
.inherits = &mv6_ops,
- .qc_defer = ata_std_qc_defer, /* FIS-based switching */
.dev_config = ATA_OP_NULL,
.qc_prep = mv_qc_prep_iie,
};
@@ -875,6 +895,29 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
}
}
+static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+ const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
+ const int per_loop = 5, timeout = (15 * 1000 / per_loop);
+ int i;
+
+ /*
+ * Wait for the EDMA engine to finish transactions in progress.
+ * No idea what a good "timeout" value might be, but measurements
+ * indicate that it often requires hundreds of microseconds
+ * with two drives in-use. So we use the 15msec value above
+ * as a rough guess at what even more drives might require.
+ */
+ for (i = 0; i < timeout; ++i) {
+ u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
+ if ((edma_stat & empty_idle) == empty_idle)
+ break;
+ udelay(per_loop);
+ }
+ /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
+}
+
/**
* mv_stop_edma_engine - Disable eDMA engine
* @port_mmio: io base address
@@ -907,6 +950,7 @@ static int mv_stop_edma(struct ata_port *ap)
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
return 0;
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ mv_wait_for_edma_empty_idle(ap);
if (mv_stop_edma_engine(port_mmio)) {
ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
return -EIO;
@@ -1057,26 +1101,95 @@ static void mv6_dev_config(struct ata_device *adev)
}
}
-static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
+static int mv_qc_defer(struct ata_queued_cmd *qc)
{
- u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
+ struct ata_link *link = qc->dev->link;
+ struct ata_port *ap = link->ap;
+ struct mv_port_priv *pp = ap->private_data;
+
+ /*
+ * Don't allow new commands if we're in a delayed EH state
+ * for NCQ and/or FIS-based switching.
+ */
+ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
+ return ATA_DEFER_PORT;
/*
- * Various bit settings required for operation
- * in FIS-based switching (fbs) mode on GenIIe:
+ * If the port is completely idle, then allow the new qc.
*/
- old_fcfg = readl(port_mmio + FIS_CFG_OFS);
- old_ltmode = readl(port_mmio + LTMODE_OFS);
- if (enable_fbs) {
- new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
- new_ltmode = old_ltmode | LTMODE_BIT8;
- } else { /* disable fbs */
- new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
- new_ltmode = old_ltmode & ~LTMODE_BIT8;
- }
- if (new_fcfg != old_fcfg)
- writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
+ if (ap->nr_active_links == 0)
+ return 0;
+
+ if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
+ /*
+ * The port is operating in host queuing mode (EDMA).
+ * It can accomodate a new qc if the qc protocol
+ * is compatible with the current host queue mode.
+ */
+ if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
+ /*
+ * The host queue (EDMA) is in NCQ mode.
+ * If the new qc is also an NCQ command,
+ * then allow the new qc.
+ */
+ if (qc->tf.protocol == ATA_PROT_NCQ)
+ return 0;
+ } else {
+ /*
+ * The host queue (EDMA) is in non-NCQ, DMA mode.
+ * If the new qc is also a non-NCQ, DMA command,
+ * then allow the new qc.
+ */
+ if (qc->tf.protocol == ATA_PROT_DMA)
+ return 0;
+ }
+ }
+ return ATA_DEFER_PORT;
+}
+
+static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
+{
+ u32 new_fiscfg, old_fiscfg;
+ u32 new_ltmode, old_ltmode;
+ u32 new_haltcond, old_haltcond;
+
+ old_fiscfg = readl(port_mmio + FISCFG_OFS);
+ old_ltmode = readl(port_mmio + LTMODE_OFS);
+ old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
+
+ new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
+ new_ltmode = old_ltmode & ~LTMODE_BIT8;
+ new_haltcond = old_haltcond | EDMA_ERR_DEV;
+
+ if (want_fbs) {
+ new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
+ new_ltmode = old_ltmode | LTMODE_BIT8;
+ if (want_ncq)
+ new_haltcond &= ~EDMA_ERR_DEV;
+ else
+ new_fiscfg |= FISCFG_WAIT_DEV_ERR;
+ }
+
+ if (new_fiscfg != old_fiscfg)
+ writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
if (new_ltmode != old_ltmode)
writelfl(new_ltmode, port_mmio + LTMODE_OFS);
+ if (new_haltcond != old_haltcond)
+ writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS);
+}
+
+static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
+{
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ u32 old, new;
+
+ /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
+ old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
+ if (want_ncq)
+ new = old | (1 << 22);
+ else
+ new = old & ~(1 << 22);
+ if (new != old)
+ writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
}
static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
@@ -1088,25 +1201,40 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
/* set up non-NCQ EDMA configuration */
cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
+ pp->pp_flags &= ~MV_PP_FLAG_FBS_EN;
if (IS_GEN_I(hpriv))
cfg |= (1 << 8); /* enab config burst size mask */
- else if (IS_GEN_II(hpriv))
+ else if (IS_GEN_II(hpriv)) {
cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
+ mv_60x1_errata_sata25(ap, want_ncq);
- else if (IS_GEN_IIE(hpriv)) {
- cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
- cfg |= (1 << 22); /* enab 4-entry host queue cache */
- cfg |= (1 << 18); /* enab early completion */
- cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
+ } else if (IS_GEN_IIE(hpriv)) {
+ int want_fbs = sata_pmp_attached(ap);
+ /*
+ * Possible future enhancement:
+ *
+ * The chip can use FBS with non-NCQ, if we allow it,
+ * But first we need to have the error handling in place
+ * for this mode (datasheet section 7.3.15.4.2.3).
+ * So disallow non-NCQ FBS for now.
+ */
+ want_fbs &= want_ncq;
+
+ mv_config_fbs(port_mmio, want_ncq, want_fbs);
- if (want_ncq && sata_pmp_attached(ap)) {
+ if (want_fbs) {
+ pp->pp_flags |= MV_PP_FLAG_FBS_EN;
cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
- mv_config_fbs(port_mmio, 1);
- } else {
- mv_config_fbs(port_mmio, 0);
}
+
+ cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
+ cfg |= (1 << 22); /* enab 4-entry host queue cache */
+ if (HAS_PCI(ap->host))
+ cfg |= (1 << 18); /* enab early completion */
+ if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
+ cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
}
if (want_ncq) {
@@ -1483,25 +1611,186 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
return qc;
}
-static void mv_unexpected_intr(struct ata_port *ap)
+static void mv_pmp_error_handler(struct ata_port *ap)
{
+ unsigned int pmp, pmp_map;
struct mv_port_priv *pp = ap->private_data;
- struct ata_eh_info *ehi = &ap->link.eh_info;
- char *when = "";
+ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
+ /*
+ * Perform NCQ error analysis on failed PMPs
+ * before we freeze the port entirely.
+ *
+ * The failed PMPs are marked earlier by mv_pmp_eh_prep().
+ */
+ pmp_map = pp->delayed_eh_pmp_map;
+ pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
+ for (pmp = 0; pmp_map != 0; pmp++) {
+ unsigned int this_pmp = (1 << pmp);
+ if (pmp_map & this_pmp) {
+ struct ata_link *link = &ap->pmp_link[pmp];
+ pmp_map &= ~this_pmp;
+ ata_eh_analyze_ncq_error(link);
+ }
+ }
+ ata_port_freeze(ap);
+ }
+ sata_pmp_error_handler(ap);
+}
+
+static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+
+ return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
+}
+
+static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
+{
+ struct ata_eh_info *ehi;
+ unsigned int pmp;
+
+ /*
+ * Initialize EH info for PMPs which saw device errors
+ */
+ ehi = &ap->link.eh_info;
+ for (pmp = 0; pmp_map != 0; pmp++) {
+ unsigned int this_pmp = (1 << pmp);
+ if (pmp_map & this_pmp) {
+ struct ata_link *link = &ap->pmp_link[pmp];
+
+ pmp_map &= ~this_pmp;
+ ehi = &link->eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "dev err");
+ ehi->err_mask |= AC_ERR_DEV;
+ ehi->action |= ATA_EH_RESET;
+ ata_link_abort(link);
+ }
+ }
+}
+
+static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
+{
+ struct mv_port_priv *pp = ap->private_data;
+ int failed_links;
+ unsigned int old_map, new_map;
+
+ /*
+ * Device error during FBS+NCQ operation:
+ *
+ * Set a port flag to prevent further I/O being enqueued.
+ * Leave the EDMA running to drain outstanding commands from this port.
+ * Perform the post-mortem/EH only when all responses are complete.
+ * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
+ */
+ if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
+ pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
+ pp->delayed_eh_pmp_map = 0;
+ }
+ old_map = pp->delayed_eh_pmp_map;
+ new_map = old_map | mv_get_err_pmp_map(ap);
+
+ if (old_map != new_map) {
+ pp->delayed_eh_pmp_map = new_map;
+ mv_pmp_eh_prep(ap, new_map & ~old_map);
+ }
+ failed_links = hweight16(new_map);
+
+ ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
+ "failed_links=%d nr_active_links=%d\n",
+ __func__, pp->delayed_eh_pmp_map,
+ ap->qc_active, failed_links,
+ ap->nr_active_links);
+
+ if (ap->nr_active_links <= failed_links) {
+ mv_process_crpb_entries(ap, pp);
+ mv_stop_edma(ap);
+ mv_eh_freeze(ap);
+ ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
+ return 1; /* handled */
+ }
+ ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
+ return 1; /* handled */
+}
+
+static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
+{
/*
- * We got a device interrupt from something that
- * was supposed to be using EDMA or polling.
+ * Possible future enhancement:
+ *
+ * FBS+non-NCQ operation is not yet implemented.
+ * See related notes in mv_edma_cfg().
+ *
+ * Device error during FBS+non-NCQ operation:
+ *
+ * We need to snapshot the shadow registers for each failed command.
+ * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
*/
+ return 0; /* not handled */
+}
+
+static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
+{
+ struct mv_port_priv *pp = ap->private_data;
+
+ if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+ return 0; /* EDMA was not active: not handled */
+ if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
+ return 0; /* FBS was not active: not handled */
+
+ if (!(edma_err_cause & EDMA_ERR_DEV))
+ return 0; /* non DEV error: not handled */
+ edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
+ if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
+ return 0; /* other problems: not handled */
+
+ if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
+ /*
+ * EDMA should NOT have self-disabled for this case.
+ * If it did, then something is wrong elsewhere,
+ * and we cannot handle it here.
+ */
+ if (edma_err_cause & EDMA_ERR_SELF_DIS) {
+ ata_port_printk(ap, KERN_WARNING,
+ "%s: err_cause=0x%x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
+ return 0; /* not handled */
+ }
+ return mv_handle_fbs_ncq_dev_err(ap);
+ } else {
+ /*
+ * EDMA should have self-disabled for this case.
+ * If it did not, then something is wrong elsewhere,
+ * and we cannot handle it here.
+ */
+ if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
+ ata_port_printk(ap, KERN_WARNING,
+ "%s: err_cause=0x%x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
+ return 0; /* not handled */
+ }
+ return mv_handle_fbs_non_ncq_dev_err(ap);
+ }
+ return 0; /* not handled */
+}
+
+static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
+{
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ char *when = "idle";
+
ata_ehi_clear_desc(ehi);
- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
- when = " while EDMA enabled";
+ if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
+ when = "disabled";
+ } else if (edma_was_enabled) {
+ when = "EDMA enabled";
} else {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
- when = " while polling";
+ when = "polling";
}
- ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when);
+ ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
ehi->err_mask |= AC_ERR_OTHER;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
@@ -1519,7 +1808,7 @@ static void mv_unexpected_intr(struct ata_port *ap)
* LOCKING:
* Inherited from caller.
*/
-static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
+static void mv_err_intr(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 edma_err_cause, eh_freeze_mask, serr = 0;
@@ -1527,24 +1816,42 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int action = 0, err_mask = 0;
struct ata_eh_info *ehi = &ap->link.eh_info;
-
- ata_ehi_clear_desc(ehi);
+ struct ata_queued_cmd *qc;
+ int abort = 0;
/*
- * Read and clear the err_cause bits. This won't actually
- * clear for some errors (eg. SError), but we will be doing
- * a hard reset in those cases regardless, which *will* clear it.
+ * Read and clear the SError and err_cause bits.
*/
+ sata_scr_read(&ap->link, SCR_ERROR, &serr);
+ sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
+
edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
- ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
+ ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
+
+ if (edma_err_cause & EDMA_ERR_DEV) {
+ /*
+ * Device errors during FIS-based switching operation
+ * require special handling.
+ */
+ if (mv_handle_dev_err(ap, edma_err_cause))
+ return;
+ }
+ qc = mv_get_active_qc(ap);
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
+ edma_err_cause, pp->pp_flags);
/*
* All generations share these EDMA error cause bits:
*/
- if (edma_err_cause & EDMA_ERR_DEV)
+ if (edma_err_cause & EDMA_ERR_DEV) {
err_mask |= AC_ERR_DEV;
+ action |= ATA_EH_RESET;
+ ata_ehi_push_desc(ehi, "dev error");
+ }
if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR)) {
@@ -1576,13 +1883,6 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
if (edma_err_cause & EDMA_ERR_SERR) {
- /*
- * Ensure that we read our own SCR, not a pmp link SCR:
- */
- ap->ops->scr_read(ap, SCR_ERROR, &serr);
- /*
- * Don't clear SError here; leave it for libata-eh:
- */
ata_ehi_push_desc(ehi, "SError=%08x", serr);
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_RESET;
@@ -1602,10 +1902,29 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
else
ehi->err_mask |= err_mask;
- if (edma_err_cause & eh_freeze_mask)
+ if (err_mask == AC_ERR_DEV) {
+ /*
+ * Cannot do ata_port_freeze() here,
+ * because it would kill PIO access,
+ * which is needed for further diagnosis.
+ */
+ mv_eh_freeze(ap);
+ abort = 1;
+ } else if (edma_err_cause & eh_freeze_mask) {
+ /*
+ * Note to self: ata_port_freeze() calls ata_port_abort()
+ */
ata_port_freeze(ap);
- else
- ata_port_abort(ap);
+ } else {
+ abort = 1;
+ }
+
+ if (abort) {
+ if (qc)
+ ata_link_abort(qc->dev->link);
+ else
+ ata_port_abort(ap);
+ }
}
static void mv_process_crpb_response(struct ata_port *ap,
@@ -1632,8 +1951,9 @@ static void mv_process_crpb_response(struct ata_port *ap,
}
}
ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
- qc->err_mask |= ac_err_mask(ata_status);
- ata_qc_complete(qc);
+ if (!ac_err_mask(ata_status))
+ ata_qc_complete(qc);
+ /* else: leave it for mv_err_intr() */
} else {
ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
__func__, tag);
@@ -1677,6 +1997,44 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
}
+static void mv_port_intr(struct ata_port *ap, u32 port_cause)
+{
+ struct mv_port_priv *pp;
+ int edma_was_enabled;
+
+ if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
+ mv_unexpected_intr(ap, 0);
+ return;
+ }
+ /*
+ * Grab a snapshot of the EDMA_EN flag setting,
+ * so that we have a consistent view for this port,
+ * even if something we call of our routines changes it.
+ */
+ pp = ap->private_data;
+ edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
+ /*
+ * Process completed CRPB response(s) before other events.
+ */
+ if (edma_was_enabled && (port_cause & DONE_IRQ)) {
+ mv_process_crpb_entries(ap, pp);
+ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
+ mv_handle_fbs_ncq_dev_err(ap);
+ }
+ /*
+ * Handle chip-reported errors, or continue on to handle PIO.
+ */
+ if (unlikely(port_cause & ERR_IRQ)) {
+ mv_err_intr(ap);
+ } else if (!edma_was_enabled) {
+ struct ata_queued_cmd *qc = mv_get_active_qc(ap);
+ if (qc)
+ ata_sff_host_intr(ap, qc);
+ else
+ mv_unexpected_intr(ap, edma_was_enabled);
+ }
+}
+
/**
* mv_host_intr - Handle all interrupts on the given host controller
* @host: host specific structure
@@ -1688,66 +2046,58 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
{
struct mv_host_priv *hpriv = host->private_data;
- void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
- u32 hc_irq_cause = 0;
+ void __iomem *mmio = hpriv->base, *hc_mmio;
unsigned int handled = 0, port;
for (port = 0; port < hpriv->n_ports; port++) {
struct ata_port *ap = host->ports[port];
- struct mv_port_priv *pp;
- unsigned int shift, hardport, port_cause;
- /*
- * When we move to the second hc, flag our cached
- * copies of hc_mmio (and hc_irq_cause) as invalid again.
- */
- if (port == MV_PORTS_PER_HC)
- hc_mmio = NULL;
- /*
- * Do nothing if port is not interrupting or is disabled:
- */
+ unsigned int p, shift, hardport, port_cause;
+
MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
- port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
- if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
- continue;
/*
- * Each hc within the host has its own hc_irq_cause register.
- * We defer reading it until we know we need it, right now:
- *
- * FIXME later: we don't really need to read this register
- * (some logic changes required below if we go that way),
- * because it doesn't tell us anything new. But we do need
- * to write to it, outside the top of this loop,
- * to reset the interrupt triggers for next time.
+ * Each hc within the host has its own hc_irq_cause register,
+ * where the interrupting ports bits get ack'd.
*/
- if (!hc_mmio) {
+ if (hardport == 0) { /* first port on this hc ? */
+ u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
+ u32 port_mask, ack_irqs;
+ /*
+ * Skip this entire hc if nothing pending for any ports
+ */
+ if (!hc_cause) {
+ port += MV_PORTS_PER_HC - 1;
+ continue;
+ }
+ /*
+ * We don't need/want to read the hc_irq_cause register,
+ * because doing so hurts performance, and
+ * main_irq_cause already gives us everything we need.
+ *
+ * But we do have to *write* to the hc_irq_cause to ack
+ * the ports that we are handling this time through.
+ *
+ * This requires that we create a bitmap for those
+ * ports which interrupted us, and use that bitmap
+ * to ack (only) those ports via hc_irq_cause.
+ */
+ ack_irqs = 0;
+ for (p = 0; p < MV_PORTS_PER_HC; ++p) {
+ if ((port + p) >= hpriv->n_ports)
+ break;
+ port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
+ if (hc_cause & port_mask)
+ ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
+ }
hc_mmio = mv_hc_base_from_port(mmio, port);
- hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
- writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
+ writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
handled = 1;
}
/*
- * Process completed CRPB response(s) before other events.
- */
- pp = ap->private_data;
- if (hc_irq_cause & (DMA_IRQ << hardport)) {
- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
- mv_process_crpb_entries(ap, pp);
- }
- /*
- * Handle chip-reported errors, or continue on to handle PIO.
+ * Handle interrupts signalled for this port:
*/
- if (unlikely(port_cause & ERR_IRQ)) {
- mv_err_intr(ap, mv_get_active_qc(ap));
- } else if (hc_irq_cause & (DEV_IRQ << hardport)) {
- if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
- struct ata_queued_cmd *qc = mv_get_active_qc(ap);
- if (qc) {
- ata_sff_host_intr(ap, qc);
- continue;
- }
- }
- mv_unexpected_intr(ap);
- }
+ port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
+ if (port_cause)
+ mv_port_intr(ap, port_cause);
}
return handled;
}
@@ -1894,7 +2244,7 @@ static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
- writel(0x0fcfffff, mmio + MV_FLASH_CTL);
+ writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
}
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
@@ -1913,7 +2263,7 @@ static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{
u32 tmp;
- writel(0, mmio + MV_GPIO_PORT_CTL);
+ writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
@@ -1931,14 +2281,14 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
if (fix_apm_sq) {
- tmp = readl(phy_mmio + MV5_LT_MODE);
+ tmp = readl(phy_mmio + MV5_LTMODE_OFS);
tmp |= (1 << 19);
- writel(tmp, phy_mmio + MV5_LT_MODE);
+ writel(tmp, phy_mmio + MV5_LTMODE_OFS);
- tmp = readl(phy_mmio + MV5_PHY_CTL);
+ tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
tmp &= ~0x3;
tmp |= 0x1;
- writel(tmp, phy_mmio + MV5_PHY_CTL);
+ writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
}
tmp = readl(phy_mmio + MV5_PHY_MODE);
@@ -1956,11 +2306,6 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
{
void __iomem *port_mmio = mv_port_base(mmio, port);
- /*
- * The datasheet warns against setting ATA_RST when EDMA is active
- * (but doesn't say what the problem might be). So we first try
- * to disable the EDMA engine before doing the ATA_RST operation.
- */
mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
@@ -1975,7 +2320,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
- writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
+ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
}
#undef ZERO
@@ -2021,13 +2366,13 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
struct mv_host_priv *hpriv = host->private_data;
u32 tmp;
- tmp = readl(mmio + MV_PCI_MODE);
+ tmp = readl(mmio + MV_PCI_MODE_OFS);
tmp &= 0xff00ffff;
- writel(tmp, mmio + MV_PCI_MODE);
+ writel(tmp, mmio + MV_PCI_MODE_OFS);
ZERO(MV_PCI_DISC_TIMER);
ZERO(MV_PCI_MSI_TRIGGER);
- writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
+ writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
ZERO(MV_PCI_SERR_MASK);
ZERO(hpriv->irq_cause_ofs);
@@ -2045,10 +2390,10 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
mv5_reset_flash(hpriv, mmio);
- tmp = readl(mmio + MV_GPIO_PORT_CTL);
+ tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
tmp &= 0x3;
tmp |= (1 << 5) | (1 << 6);
- writel(tmp, mmio + MV_GPIO_PORT_CTL);
+ writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
}
/**
@@ -2121,7 +2466,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *port_mmio;
u32 tmp;
- tmp = readl(mmio + MV_RESET_CFG);
+ tmp = readl(mmio + MV_RESET_CFG_OFS);
if ((tmp & (1 << 0)) == 0) {
hpriv->signal[idx].amps = 0x7 << 8;
hpriv->signal[idx].pre = 0x1 << 5;
@@ -2137,7 +2482,7 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{
- writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
+ writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
}
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
@@ -2235,11 +2580,6 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
{
void __iomem *port_mmio = mv_port_base(mmio, port);
- /*
- * The datasheet warns against setting ATA_RST when EDMA is active
- * (but doesn't say what the problem might be). So we first try
- * to disable the EDMA engine before doing the ATA_RST operation.
- */
mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
@@ -2254,7 +2594,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
- writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
+ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
}
#undef ZERO
@@ -2297,38 +2637,39 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
return;
}
-static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
+static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
{
- u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
+ u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
- ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
+ ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
if (want_gen2i)
- ifctl |= (1 << 7); /* enable gen2i speed */
- writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
+ ifcfg |= (1 << 7); /* enable gen2i speed */
+ writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
}
-/*
- * Caller must ensure that EDMA is not active,
- * by first doing mv_stop_edma() where needed.
- */
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no)
{
void __iomem *port_mmio = mv_port_base(mmio, port_no);
+ /*
+ * The datasheet warns against setting EDMA_RESET when EDMA is active
+ * (but doesn't say what the problem might be). So we first try
+ * to disable the EDMA engine before doing the EDMA_RESET operation.
+ */
mv_stop_edma_engine(port_mmio);
- writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
+ writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
if (!IS_GEN_I(hpriv)) {
- /* Enable 3.0gb/s link speed */
- mv_setup_ifctl(port_mmio, 1);
+ /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
+ mv_setup_ifcfg(port_mmio, 1);
}
/*
- * Strobing ATA_RST here causes a hard reset of the SATA transport,
+ * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
* link, and physical layers. It resets all SATA interface registers
* (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
*/
- writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
+ writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
udelay(25); /* allow reset propagation */
writelfl(0, port_mmio + EDMA_CMD_OFS);
@@ -2392,7 +2733,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
sata_scr_read(link, SCR_STATUS, &sstatus);
if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
/* Force 1.5gb/s link speed and try again */
- mv_setup_ifctl(mv_ap_base(ap), 0);
+ mv_setup_ifcfg(mv_ap_base(ap), 0);
if (time_after(jiffies + HZ, deadline))
extra = HZ; /* only extend it once, max */
}
@@ -2493,6 +2834,34 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
}
+static unsigned int mv_in_pcix_mode(struct ata_host *host)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base;
+ u32 reg;
+
+ if (!HAS_PCI(host) || !IS_PCIE(hpriv))
+ return 0; /* not PCI-X capable */
+ reg = readl(mmio + MV_PCI_MODE_OFS);
+ if ((reg & MV_PCI_MODE_MASK) == 0)
+ return 0; /* conventional PCI mode */
+ return 1; /* chip is in PCI-X mode */
+}
+
+static int mv_pci_cut_through_okay(struct ata_host *host)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base;
+ u32 reg;
+
+ if (!mv_in_pcix_mode(host)) {
+ reg = readl(mmio + PCI_COMMAND_OFS);
+ if (reg & PCI_COMMAND_MRDTRIG)
+ return 0; /* not okay */
+ }
+ return 1; /* okay */
+}
+
static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
@@ -2560,7 +2929,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
case chip_7042:
- hp_flags |= MV_HP_PCIE;
+ hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
if (pdev->vendor == PCI_VENDOR_ID_TTI &&
(pdev->device == 0x2300 || pdev->device == 0x2310))
{
@@ -2590,9 +2959,12 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
}
+ /* drop through */
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
+ if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
+ hp_flags |= MV_HP_CUT_THROUGH;
switch (pdev->revision) {
case 0x0:
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index 133eefcc047..b2cd20f549c 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -1025,7 +1025,8 @@ typedef struct iadev_t {
spinlock_t rx_lock, misc_lock;
struct atm_vcc **rx_open; /* list of all open VCs */
u16 num_rx_desc, rx_buf_sz, rxing;
- u32 rx_pkt_ram, rx_tmp_cnt, rx_tmp_jif;
+ u32 rx_pkt_ram, rx_tmp_cnt;
+ unsigned long rx_tmp_jif;
void __iomem *RX_DESC_BASE_ADDR;
u32 drop_rxpkt, drop_rxcell, rx_cell_cnt, rx_pkt_cnt;
struct atm_dev *next_board; /* other iphase devices */
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0ef00e8d415..e085af0ff94 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -140,7 +140,6 @@ int class_register(struct class *cls)
pr_debug("device class '%s': registering\n", cls->name);
- INIT_LIST_HEAD(&cls->children);
INIT_LIST_HEAD(&cls->devices);
INIT_LIST_HEAD(&cls->interfaces);
kset_init(&cls->class_dirs);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index be288b5e418..3eeac5a7858 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1218,13 +1218,11 @@ int device_rename(struct device *dev, char *new_name)
}
#else
if (dev->class) {
- sysfs_remove_link(&dev->class->subsys.kobj, old_device_name);
error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
dev->bus_id);
- if (error) {
- dev_err(dev, "%s: sysfs_create_symlink failed (%d)\n",
- __func__, error);
- }
+ if (error)
+ goto out;
+ sysfs_remove_link(&dev->class->subsys.kobj, old_device_name);
}
#endif
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ce6de5a7e2..937e8258981 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -53,11 +53,13 @@ int register_memory_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&memory_chain, nb);
}
+EXPORT_SYMBOL(register_memory_notifier);
void unregister_memory_notifier(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&memory_chain, nb);
}
+EXPORT_SYMBOL(unregister_memory_notifier);
/*
* register_memory - Setup a sysfs device for a memory block
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 4fbb56bcb1e..358bb0be3c0 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -175,8 +175,7 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv)
}
/* Check whether this driver has already been added to a class. */
- if ((drv->entry.next != drv->entry.prev) ||
- (drv->entry.next != NULL)) {
+ if (drv->entry.next && !list_empty(&drv->entry)) {
printk(KERN_WARNING "sysdev: class %s: driver (%p) has already"
" been registered to a class, something is wrong, but "
"will forge on!\n", cls->name, drv);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8fc429cf82b..41f818be2f7 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -755,11 +755,13 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
{
unsigned long n_sect = bio->bi_size >> 9;
const int rw = bio_data_dir(bio);
+ struct hd_struct *part;
- all_stat_inc(disk, ios[rw], sector);
- all_stat_add(disk, ticks[rw], duration, sector);
- all_stat_add(disk, sectors[rw], n_sect, sector);
- all_stat_add(disk, io_ticks, duration, sector);
+ part = get_part(disk, sector);
+ all_stat_inc(disk, part, ios[rw], sector);
+ all_stat_add(disk, part, ticks[rw], duration, sector);
+ all_stat_add(disk, part, sectors[rw], n_sect, sector);
+ all_stat_add(disk, part, io_ticks, duration, sector);
}
void
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 5dce3877eee..595a925c62a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -196,6 +196,7 @@ config ESPSERIAL
config MOXA_INTELLIO
tristate "Moxa Intellio support"
depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
+ select FW_LOADER
help
Say Y here if you have a Moxa Intellio multiport serial card.
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5a5455585c1..192688344ed 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2352,10 +2352,16 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
info->si_type = (enum si_type) match->data;
info->addr_source = "device-tree";
- info->io_setup = mem_setup;
info->irq_setup = std_irq_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ if (resource.flags & IORESOURCE_IO) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+
info->io.addr_data = resource.start;
info->io.regsize = regsize ? *regsize : DEFAULT_REGSIZE;
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index fd2db07a50f..3b23270eaa6 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -1073,7 +1073,7 @@ static int cy_put_char(struct tty_struct *tty, unsigned char ch)
return 0;
if (!info->xmit_buf)
- return;
+ return 0;
local_irq_save(flags);
if (info->xmit_cnt >= PAGE_SIZE - 1) {
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index f39f6fd8935..b1a7a8cb65e 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -970,7 +970,8 @@ static int sx_set_real_termios(void *ptr)
sx_write_channel_byte(port, hi_mask, 0x1f);
break;
default:
- printk(KERN_INFO "sx: Invalid wordsize: %u\n", CFLAG & CSIZE);
+ printk(KERN_INFO "sx: Invalid wordsize: %u\n",
+ (unsigned int)CFLAG & CSIZE);
break;
}
@@ -997,7 +998,8 @@ static int sx_set_real_termios(void *ptr)
set_bit(TTY_HW_COOK_IN, &port->gs.tty->flags);
}
sx_dprintk(SX_DEBUG_TERMIOS, "iflags: %x(%d) ",
- port->gs.tty->termios->c_iflag, I_OTHER(port->gs.tty));
+ (unsigned int)port->gs.tty->termios->c_iflag,
+ I_OTHER(port->gs.tty));
/* Tell line discipline whether we will do output cooking.
* If OPOST is set and no other output flags are set then we can do output
@@ -1010,7 +1012,8 @@ static int sx_set_real_termios(void *ptr)
clear_bit(TTY_HW_COOK_OUT, &port->gs.tty->flags);
}
sx_dprintk(SX_DEBUG_TERMIOS, "oflags: %x(%d)\n",
- port->gs.tty->termios->c_oflag, O_OTHER(port->gs.tty));
+ (unsigned int)port->gs.tty->termios->c_oflag,
+ O_OTHER(port->gs.tty));
/* port->c_dcd = sx_get_CD (port); */
func_exit();
return 0;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 2001b0e52dc..55c1653be00 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -916,7 +916,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
- int ret;
+ int ret = 0;
if (sanity_check(info, tty->name, "put_char"))
return 0;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 49c1a2267a5..e94bee03231 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1215,10 +1215,11 @@ int tty_check_change(struct tty_struct *tty)
if (!tty->pgrp) {
printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n");
- goto out;
+ goto out_unlock;
}
if (task_pgrp(current) == tty->pgrp)
- goto out;
+ goto out_unlock;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
if (is_ignored(SIGTTOU))
goto out;
if (is_current_pgrp_orphaned()) {
@@ -1229,6 +1230,8 @@ int tty_check_change(struct tty_struct *tty)
set_thread_flag(TIF_SIGPENDING);
ret = -ERESTARTSYS;
out:
+ return ret;
+out_unlock:
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
return ret;
}
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index e458b08139a..fa1ffbf2c62 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2742,6 +2742,10 @@ static int con_open(struct tty_struct *tty, struct file *filp)
tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
tty->winsize.ws_col = vc_cons[currcons].d->vc_cols;
}
+ if (vc->vc_utf)
+ tty->termios->c_iflag |= IUTF8;
+ else
+ tty->termios->c_iflag &= ~IUTF8;
release_console_sem();
vcs_make_sysfs(tty);
return ret;
@@ -2918,6 +2922,8 @@ int __init vty_init(void)
console_driver->minor_start = 1;
console_driver->type = TTY_DRIVER_TYPE_CONSOLE;
console_driver->init_termios = tty_std_termios;
+ if (default_utf8)
+ console_driver->init_termios.c_iflag |= IUTF8;
console_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
tty_set_operations(console_driver, &con_ops);
if (tty_register_driver(console_driver))
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index a9aa845dbe7..b27b13c5eb5 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -97,7 +97,7 @@ extern int edac_debug_level;
#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
PCI_DEVICE_ID_ ## vend ## _ ## dev
-#define dev_name(dev) (dev)->dev_name
+#define edac_dev_name(dev) (dev)->dev_name
/* memory devices */
enum dev_type {
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 63372fa7ecf..5fcd3d89c75 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -333,7 +333,7 @@ static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
fail0:
edac_printk(KERN_WARNING, EDAC_MC,
"%s (%s) %s %s already assigned %d\n",
- rover->dev->bus_id, dev_name(rover),
+ rover->dev->bus_id, edac_dev_name(rover),
rover->mod_name, rover->ctl_name, rover->dev_idx);
return 1;
@@ -538,7 +538,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
"'%s': DEV '%s' (%s)\n",
edac_dev->mod_name,
edac_dev->ctl_name,
- dev_name(edac_dev),
+ edac_dev_name(edac_dev),
edac_op_state_to_string(edac_dev->op_state));
mutex_unlock(&device_ctls_mutex);
@@ -599,7 +599,7 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
edac_printk(KERN_INFO, EDAC_MC,
"Removed device %d for %s %s: DEV %s\n",
edac_dev->dev_idx,
- edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev));
+ edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
return edac_dev;
}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index a4cf1645f58..d110392d48f 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -402,7 +402,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
fail0:
edac_printk(KERN_WARNING, EDAC_MC,
"%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
- dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
+ edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
return 1;
fail1:
@@ -517,7 +517,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
/* Report action taken */
edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
- " DEV %s\n", mci->mod_name, mci->ctl_name, dev_name(mci));
+ " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
mutex_unlock(&mem_ctls_mutex);
return 0;
@@ -565,7 +565,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
edac_printk(KERN_INFO, EDAC_MC,
"Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
- mci->mod_name, mci->ctl_name, dev_name(mci));
+ mci->mod_name, mci->ctl_name, edac_dev_name(mci));
return mci;
}
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 9b24340b52e..22ec9d5d431 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -150,7 +150,7 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
fail0:
edac_printk(KERN_WARNING, EDAC_PCI,
"%s (%s) %s %s already assigned %d\n",
- rover->dev->bus_id, dev_name(rover),
+ rover->dev->bus_id, edac_dev_name(rover),
rover->mod_name, rover->ctl_name, rover->pci_idx);
return 1;
@@ -360,7 +360,7 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
" DEV '%s' (%s)\n",
pci->mod_name,
pci->ctl_name,
- dev_name(pci), edac_op_state_to_string(pci->op_state));
+ edac_dev_name(pci), edac_op_state_to_string(pci->op_state));
mutex_unlock(&edac_pci_ctls_mutex);
return 0;
@@ -415,7 +415,7 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
edac_printk(KERN_INFO, EDAC_PCI,
"Removed device %d for %s %s: DEV %s\n",
- pci->pci_idx, pci->mod_name, pci->ctl_name, dev_name(pci));
+ pci->pci_idx, pci->mod_name, pci->ctl_name, edac_dev_name(pci));
return pci;
}
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 491718fe46b..cae9dc89d88 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -335,7 +335,7 @@ i2c_au1550_probe(struct platform_device *pdev)
goto out_mem;
}
- priv->psc_base = r->start;
+ priv->psc_base = CKSEG1ADDR(r->start);
priv->xfer_timeout = 200;
priv->ack_timeout = 200;
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 18beb0ad7bf..a076129de7e 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -99,7 +99,7 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
u32 x;
int result = 0;
- if (i2c->irq == 0)
+ if (i2c->irq == NO_IRQ)
{
while (!(readb(i2c->base + MPC_I2C_SR) & CSR_MIF)) {
schedule();
@@ -329,10 +329,9 @@ static int fsl_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
i2c->irq = platform_get_irq(pdev, 0);
- if (i2c->irq < 0) {
- result = -ENXIO;
- goto fail_get_irq;
- }
+ if (i2c->irq < 0)
+ i2c->irq = NO_IRQ; /* Use polling */
+
i2c->flags = pdata->device_flags;
init_waitqueue_head(&i2c->queue);
@@ -344,7 +343,7 @@ static int fsl_i2c_probe(struct platform_device *pdev)
goto fail_map;
}
- if (i2c->irq != 0)
+ if (i2c->irq != NO_IRQ)
if ((result = request_irq(i2c->irq, mpc_i2c_isr,
IRQF_SHARED, "i2c-mpc", i2c)) < 0) {
printk(KERN_ERR
@@ -367,12 +366,11 @@ static int fsl_i2c_probe(struct platform_device *pdev)
return result;
fail_add:
- if (i2c->irq != 0)
+ if (i2c->irq != NO_IRQ)
free_irq(i2c->irq, i2c);
fail_irq:
iounmap(i2c->base);
fail_map:
- fail_get_irq:
kfree(i2c);
return result;
};
@@ -384,7 +382,7 @@ static int fsl_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
platform_set_drvdata(pdev, NULL);
- if (i2c->irq != 0)
+ if (i2c->irq != NO_IRQ)
free_irq(i2c->irq, i2c);
iounmap(i2c->base);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index fdc9ad805e3..ac916596858 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -104,10 +104,31 @@ MODULE_PARM_DESC(force_addr,
static int piix4_transaction(void);
static unsigned short piix4_smba;
+static int srvrworks_csb5_delay;
static struct pci_driver piix4_driver;
static struct i2c_adapter piix4_adapter;
-static struct dmi_system_id __devinitdata piix4_dmi_table[] = {
+static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
+ {
+ .ident = "Sapphire AM2RD790",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "SAPPHIRE Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "PC-AM2RD790"),
+ },
+ },
+ {
+ .ident = "DFI Lanparty UT 790FX",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "DFI Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "LP UT 790FX"),
+ },
+ },
+ { }
+};
+
+/* The IBM entry is in a separate table because we only check it
+ on Intel-based systems */
+static struct dmi_system_id __devinitdata piix4_dmi_ibm[] = {
{
.ident = "IBM",
.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
@@ -122,8 +143,20 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
dev_info(&PIIX4_dev->dev, "Found %s device\n", pci_name(PIIX4_dev));
+ if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) &&
+ (PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5))
+ srvrworks_csb5_delay = 1;
+
+ /* On some motherboards, it was reported that accessing the SMBus
+ caused severe hardware problems */
+ if (dmi_check_system(piix4_dmi_blacklist)) {
+ dev_err(&PIIX4_dev->dev,
+ "Accessing the SMBus on this system is unsafe!\n");
+ return -EPERM;
+ }
+
/* Don't access SMBus on IBM systems which get corrupted eeproms */
- if (dmi_check_system(piix4_dmi_table) &&
+ if (dmi_check_system(piix4_dmi_ibm) &&
PIIX4_dev->vendor == PCI_VENDOR_ID_INTEL) {
dev_err(&PIIX4_dev->dev, "IBM system detected; this module "
"may corrupt your serial eeprom! Refusing to load "
@@ -230,10 +263,14 @@ static int piix4_transaction(void)
outb_p(inb(SMBHSTCNT) | 0x040, SMBHSTCNT);
/* We will always wait for a fraction of a second! (See PIIX4 docs errata) */
- do {
+ if (srvrworks_csb5_delay) /* Extra delay for SERVERWORKS_CSB5 */
+ msleep(2);
+ else
+ msleep(1);
+
+ while ((timeout++ < MAX_TIMEOUT) &&
+ ((temp = inb_p(SMBHSTSTS)) & 0x01))
msleep(1);
- temp = inb_p(SMBHSTSTS);
- } while ((temp & 0x01) && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout >= MAX_TIMEOUT) {
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 8fbbdb4c2f3..114634da6c6 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -132,14 +132,14 @@ static const struct i2c_algorithm i2c_sibyte_algo = {
/*
* registering functions to load algorithms at runtime
*/
-int __init i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
+static int __init i2c_sibyte_add_bus(struct i2c_adapter *i2c_adap, int speed)
{
struct i2c_algo_sibyte_data *adap = i2c_adap->algo_data;
- /* register new adapter to i2c module... */
+ /* Register new adapter to i2c module... */
i2c_adap->algo = &i2c_sibyte_algo;
- /* Set the frequency to 100 kHz */
+ /* Set the requested frequency. */
csr_out32(speed, SMB_CSR(adap,R_SMB_FREQ));
csr_out32(0, SMB_CSR(adap,R_SMB_CONTROL));
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 26384daccb9..c99ebeadb55 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -327,6 +327,11 @@ void i2c_unregister_device(struct i2c_client *client)
EXPORT_SYMBOL_GPL(i2c_unregister_device);
+static const struct i2c_device_id dummy_id[] = {
+ { "dummy", 0 },
+ { },
+};
+
static int dummy_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -342,13 +347,13 @@ static struct i2c_driver dummy_driver = {
.driver.name = "dummy",
.probe = dummy_probe,
.remove = dummy_remove,
+ .id_table = dummy_id,
};
/**
* i2c_new_dummy - return a new i2c device bound to a dummy driver
* @adapter: the adapter managing the device
* @address: seven bit address to be used
- * @type: optional label used for i2c_client.name
* Context: can sleep
*
* This returns an I2C client bound to the "dummy" driver, intended for use
@@ -364,15 +369,12 @@ static struct i2c_driver dummy_driver = {
* i2c_unregister_device(); or NULL to indicate an error.
*/
struct i2c_client *
-i2c_new_dummy(struct i2c_adapter *adapter, u16 address, const char *type)
+i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
{
struct i2c_board_info info = {
- .driver_name = "dummy",
- .addr = address,
+ I2C_BOARD_INFO("dummy", address),
};
- if (type)
- strlcpy(info.type, type, sizeof info.type);
return i2c_new_device(adapter, &info);
}
EXPORT_SYMBOL_GPL(i2c_new_dummy);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index f702f9152ce..b4f3aefa12b 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -1,8 +1,6 @@
#
# IDE ATA ATAPI Block device driver configuration
#
-# Andre Hedrick <andre@linux-ide.org>
-#
# Select HAVE_IDE if IDE is supported
config HAVE_IDE
@@ -335,7 +333,7 @@ config BLK_DEV_CMD640
This driver will work automatically in PCI based systems (most new
systems have PCI slots). But if your system uses VESA local bus
(VLB) instead of PCI, you must also supply a kernel boot parameter
- to enable the CMD640 bugfix/support: "ide0=cmd640_vlb". (Try "man
+ to enable the CMD640 bugfix/support: "cmd640.probe_vlb". (Try "man
bootparam" or see the documentation of your boot loader about how to
pass options to the kernel.)
@@ -457,27 +455,11 @@ config BLK_DEV_ALI15X3
onboard chipsets. It also tests for Simplex mode and enables
normal dual channel support.
- If you say Y here, you also need to say Y to "Use DMA by default
- when available", above. Please read the comments at the top of
+ Please read the comments at the top of
<file:drivers/ide/pci/alim15x3.c>.
If unsure, say N.
-config WDC_ALI15X3
- bool "ALI M15x3 WDC support (DANGEROUS)"
- depends on BLK_DEV_ALI15X3
- ---help---
- This allows for UltraDMA support for WDC drives that ignore CRC
- checking. You are a fool for enabling this option, but there have
- been requests. DO NOT COMPLAIN IF YOUR DRIVE HAS FS CORRUPTION, IF
- YOU ENABLE THIS! No one will listen, just laugh for ignoring this
- SERIOUS WARNING.
-
- Using this option can allow WDC drives to run at ATA-4/5 transfer
- rates with only an ATA-2 support structure.
-
- SAY N!
-
config BLK_DEV_AMD74XX
tristate "AMD and nVidia IDE support"
depends on !ARM
@@ -520,9 +502,6 @@ config BLK_DEV_CY82C693
This driver adds detection and support for the CY82C693 chipset
used on Digital's PC-Alpha 164SX boards.
- If you say Y here, you need to say Y to "Use DMA by default
- when available" as well.
-
config BLK_DEV_CS5520
tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -613,13 +592,12 @@ config BLK_DEV_SC1200
National SCx200 series of embedded x86 "Geode" systems.
config BLK_DEV_PIIX
- tristate "Intel PIIXn chipsets support"
+ tristate "Intel PIIX/ICH chipsets support"
select BLK_DEV_IDEDMA_PCI
help
- This driver adds explicit support for Intel PIIX and ICH chips
- and also for the Efar Victory66 (slc90e66) chip. This allows
- the kernel to change PIO, DMA and UDMA speeds and to configure
- the chip to optimum performance.
+ This driver adds explicit support for Intel PIIX and ICH chips.
+ This allows the kernel to change PIO, DMA and UDMA speeds and to
+ configure the chip to optimum performance.
config BLK_DEV_IT8213
tristate "IT8213 IDE support"
@@ -657,11 +635,7 @@ config BLK_DEV_PDC202XX_OLD
happen if the BIOS revisions of all installed cards (three-max) do
not match, the driver attempts to do dynamic tuning of the chipset
at boot-time for max-speed. Ultra33 BIOS 1.25 or newer is required
- for more than one card. This card may require that you say Y to
- "Special UDMA Feature".
-
- If you say Y here, you need to say Y to "Use DMA by default when
- available" as well.
+ for more than one card.
Please read the comments at the top of
<file:drivers/ide/pci/pdc202xx_old.c>.
@@ -710,9 +684,6 @@ config BLK_DEV_SIS5513
ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,
SiS745, SiS750
- If you say Y here, you need to say Y to "Use DMA by default when
- available" as well.
-
Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>.
config BLK_DEV_SL82C105
@@ -734,9 +705,6 @@ config BLK_DEV_SLC90E66
and it will handle timing cycles. Since this is an improved
look-a-like to the PIIX4 it should be a nice addition.
- If you say Y here, you need to say Y to "Use DMA by default when
- available" as well.
-
Please read the comments at the top of
<file:drivers/ide/pci/slc90e66.c>.
@@ -888,17 +856,17 @@ config BLK_DEV_IDEDOUBLER
bool "Amiga IDE Doubler support (EXPERIMENTAL)"
depends on BLK_DEV_GAYLE && EXPERIMENTAL
---help---
- This driver provides support for the so-called `IDE doublers' (made
+ This feature provides support for the so-called `IDE doublers' (made
by various manufacturers, e.g. Eyetech) that can be connected to
the on-board IDE interface of some Amiga models. Using such an IDE
doubler, you can connect up to four instead of two IDE devices to
the Amiga's on-board IDE interface.
Note that the normal Amiga Gayle IDE driver may not work correctly
- if you have an IDE doubler and don't enable this driver!
+ if you have an IDE doubler and don't enable this feature!
- Say Y if you have an IDE doubler. The driver is enabled at kernel
- runtime using the "ide=doubler" kernel boot parameter.
+ Say Y if you have an IDE doubler. The feature is enabled at kernel
+ runtime using the "gayle.doubler" kernel boot parameter.
config BLK_DEV_BUDDHA
tristate "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 57d9a9a79a6..0daf923541f 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -95,7 +95,7 @@ void SELECT_DRIVE (ide_drive_t *drive)
hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
}
-void SELECT_MASK (ide_drive_t *drive, int mask)
+static void SELECT_MASK(ide_drive_t *drive, int mask)
{
const struct ide_port_ops *port_ops = drive->hwif->port_ops;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 83555ca513b..9e449a0c623 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -61,7 +61,7 @@ static void falconide_output_data(ide_drive_t *drive, struct request *rq,
unsigned long data_addr = drive->hwif->io_ports.data_addr;
if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
- return outsw(data_adr, buf, (len + 1) / 2);
+ return outsw(data_addr, buf, (len + 1) / 2);
outsw_swapw(data_addr, buf, (len + 1) / 2);
}
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 712d17bdd47..52fee3d2771 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -4,7 +4,7 @@
* Author: Manish Lachwani, mlachwani@mvista.com
* Copyright (C) 2004 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
- * Copyright (c) 2006 Maciej W. Rozycki
+ * Copyright (c) 2006, 2008 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -70,8 +70,9 @@ static int __devinit swarm_ide_probe(struct device *dev)
ide_hwif_t *hwif;
u8 __iomem *base;
phys_t offset, size;
+ hw_regs_t hw;
int i;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
if (!SIBYTE_HAVE_IDE)
return -ENODEV;
@@ -112,14 +113,15 @@ static int __devinit swarm_ide_probe(struct device *dev)
hwif->host_flags = IDE_HFLAG_MMIO;
default_hwif_mmiops(hwif);
- hwif->chipset = ide_generic;
-
for (i = 0; i <= 7; i++)
- hwif->io_ports_array[i] =
+ hw.io_ports_array[i] =
(unsigned long)(base + ((0x1f0 + i) << 5));
- hwif->io_ports.ctl_addr =
+ hw.io_ports.ctl_addr =
(unsigned long)(base + (0x3f6 << 5));
- hwif->irq = K_INT_GB_IDE;
+ hw.irq = K_INT_GB_IDE;
+ hw.chipset = ide_generic;
+
+ ide_init_port_hw(hwif, &hw);
idx[0] = hwif->index;
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index c1922f9cfe8..f2129d5e07f 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -39,6 +39,16 @@
#include <asm/io.h>
/*
+ * Allow UDMA on M1543C-E chipset for WDC disks that ignore CRC checking
+ * (this is DANGEROUS and could result in data corruption).
+ */
+static int wdc_udma;
+
+module_param(wdc_udma, bool, 0);
+MODULE_PARM_DESC(wdc_udma,
+ "allow UDMA on M1543C-E chipset for WDC disks (DANGEROUS)");
+
+/*
* ALi devices are not plug in. Otherwise these static values would
* need to go. They ought to go away anyway
*/
@@ -76,11 +86,6 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
a_clc = 0;
c_time = ide_pio_timings[pio].cycle_time;
-#if 0
- if ((r_clc = ((c_time - s_time - a_time) * bus_speed + 999) / 1000) >= 16)
- r_clc = 0;
-#endif
-
if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) {
r_clc = 1;
} else {
@@ -110,16 +115,6 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
pci_write_config_byte(dev, port, s_clc);
pci_write_config_byte(dev, port+drive->select.b.unit+2, (a_clc << 4) | r_clc);
local_irq_restore(flags);
-
- /*
- * setup active rec
- * { 70, 165, 365 }, PIO Mode 0
- * { 50, 125, 208 }, PIO Mode 1
- * { 30, 100, 110 }, PIO Mode 2
- * { 30, 80, 70 }, PIO Mode 3 with IORDY
- * { 25, 70, 25 }, PIO Mode 4 with IORDY ns
- * { 20, 50, 30 } PIO Mode 5 with IORDY (nonstandard)
- */
}
/**
@@ -131,9 +126,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
* The actual rules for the ALi are:
* No UDMA on revisions <= 0x20
* Disk only for revisions < 0xC2
- * Not WDC drives for revisions < 0xC2
- *
- * FIXME: WDC ifdef needs to die
+ * Not WDC drives on M1543C-E (?)
*/
static u8 ali_udma_filter(ide_drive_t *drive)
@@ -141,10 +134,9 @@ static u8 ali_udma_filter(ide_drive_t *drive)
if (m5229_revision > 0x20 && m5229_revision < 0xC2) {
if (drive->media != ide_disk)
return 0;
-#ifndef CONFIG_WDC_ALI15X3
- if (chip_is_1543c_e && strstr(drive->id->model, "WDC "))
+ if (chip_is_1543c_e && strstr(drive->id->model, "WDC ") &&
+ wdc_udma == 0)
return 0;
-#endif
}
return drive->hwif->ultra_mask;
@@ -537,17 +529,9 @@ static const struct ide_port_info ali15x3_chipset __devinitdata = {
static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- static struct pci_device_id ati_rs100[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100) },
- { },
- };
-
struct ide_port_info d = ali15x3_chipset;
u8 rev = dev->revision, idx = id->driver_data;
- if (pci_dev_present(ati_rs100))
- printk(KERN_WARNING "alim15x3: ATI Radeon IGP Northbridge is not yet fully tested.\n");
-
/* don't use LBA48 DMA on ALi devices before rev 0xC5 */
if (rev <= 0xC4)
d.host_flags |= IDE_HFLAG_NO_LBA48_DMA;
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 17669a43443..992b1cf8db6 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -119,6 +119,7 @@ static const struct ide_dma_ops cs5520_dma_ops = {
.dma_timeout = ide_dma_timeout,
};
+/* FIXME: VDMA is disabled because it caused system hangs */
#define DECLARE_CS_DEV(name_str) \
{ \
.name = name_str, \
@@ -126,7 +127,6 @@ static const struct ide_dma_ops cs5520_dma_ops = {
.dma_ops = &cs5520_dma_ops, \
.host_flags = IDE_HFLAG_ISA_PORTS | \
IDE_HFLAG_CS5520 | \
- IDE_HFLAG_VDMA | \
IDE_HFLAG_NO_ATAPI_DMA | \
IDE_HFLAG_ABUSE_SET_DMA_MODE, \
.pio_mask = ATA_PIO4, \
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index ed2ee4ba4b7..3f441fc57c1 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -359,9 +359,10 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
cq->sw_wptr++;
}
-void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
+int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
u32 ptr;
+ int flushed = 0;
PDBG("%s wq %p cq %p\n", __func__, wq, cq);
@@ -369,8 +370,11 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
wq->rq_rptr, wq->rq_wptr, count);
ptr = wq->rq_rptr + count;
- while (ptr++ != wq->rq_wptr)
+ while (ptr++ != wq->rq_wptr) {
insert_recv_cqe(wq, cq);
+ flushed++;
+ }
+ return flushed;
}
static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
@@ -394,18 +398,21 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
cq->sw_wptr++;
}
-void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
+int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
__u32 ptr;
+ int flushed = 0;
struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
ptr = wq->sq_rptr + count;
- sqp += count;
+ sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (ptr != wq->sq_wptr) {
insert_sq_cqe(wq, cq, sqp);
- sqp++;
ptr++;
+ sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
+ flushed++;
}
+ return flushed;
}
/*
@@ -581,7 +588,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
* caller aquires the ctrl_qp lock before the call
*/
static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
- u32 len, void *data, int completion)
+ u32 len, void *data)
{
u32 i, nr_wqe, copy_len;
u8 *copy_data;
@@ -617,7 +624,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
flag = 0;
if (i == (nr_wqe - 1)) {
/* last WQE */
- flag = completion ? T3_COMPLETION_FLAG : 0;
+ flag = T3_COMPLETION_FLAG;
if (len % 32)
utx_len = len / 32 + 1;
else
@@ -676,21 +683,20 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
return 0;
}
-/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size
- * OUT: stag index, actual pbl_size, pbl_addr allocated.
+/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
+ * OUT: stag index
* TBD: shared memory region support
*/
static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
u32 *stag, u8 stag_state, u32 pdid,
enum tpt_mem_type type, enum tpt_mem_perm perm,
- u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl,
- u32 *pbl_size, u32 *pbl_addr)
+ u32 zbva, u64 to, u32 len, u8 page_size,
+ u32 pbl_size, u32 pbl_addr)
{
int err;
struct tpt_entry tpt;
u32 stag_idx;
u32 wptr;
- int rereg = (*stag != T3_STAG_UNSET);
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
@@ -704,30 +710,8 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
__func__, stag_state, type, pdid, stag_idx);
- if (reset_tpt_entry)
- cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
- else if (!rereg) {
- *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3);
- if (!*pbl_addr) {
- return -ENOMEM;
- }
- }
-
mutex_lock(&rdev_p->ctrl_qp.lock);
- /* write PBL first if any - update pbl only if pbl list exist */
- if (pbl) {
-
- PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
- *pbl_size);
- err = cxio_hal_ctrl_qp_write_mem(rdev_p,
- (*pbl_addr >> 5),
- (*pbl_size << 3), pbl, 0);
- if (err)
- goto ret;
- }
-
/* write TPT entry */
if (reset_tpt_entry)
memset(&tpt, 0, sizeof(tpt));
@@ -742,23 +726,23 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
V_TPT_PAGE_SIZE(page_size));
tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
- cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3));
+ cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
tpt.len = cpu_to_be32(len);
tpt.va_hi = cpu_to_be32((u32) (to >> 32));
tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
tpt.rsvd_bind_cnt_or_pstag = 0;
tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
- cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2));
+ cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
}
err = cxio_hal_ctrl_qp_write_mem(rdev_p,
stag_idx +
(rdev_p->rnic_info.tpt_base >> 5),
- sizeof(tpt), &tpt, 1);
+ sizeof(tpt), &tpt);
/* release the stag index to free pool */
if (reset_tpt_entry)
cxio_hal_put_stag(rdev_p->rscp, stag_idx);
-ret:
+
wptr = rdev_p->ctrl_qp.wptr;
mutex_unlock(&rdev_p->ctrl_qp.lock);
if (!err)
@@ -769,44 +753,67 @@ ret:
return err;
}
+int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
+ u32 pbl_addr, u32 pbl_size)
+{
+ u32 wptr;
+ int err;
+
+ PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
+ pbl_size);
+
+ mutex_lock(&rdev_p->ctrl_qp.lock);
+ err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
+ pbl);
+ wptr = rdev_p->ctrl_qp.wptr;
+ mutex_unlock(&rdev_p->ctrl_qp.lock);
+ if (err)
+ return err;
+
+ if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
+ SEQ32_GE(rdev_p->ctrl_qp.rptr,
+ wptr)))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, __be64 *pbl, u32 *pbl_size,
- u32 *pbl_addr)
+ u8 page_size, u32 pbl_size, u32 pbl_addr)
{
*stag = T3_STAG_UNSET;
return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl, pbl_size, pbl_addr);
+ zbva, to, len, page_size, pbl_size, pbl_addr);
}
int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, __be64 *pbl, u32 *pbl_size,
- u32 *pbl_addr)
+ u8 page_size, u32 pbl_size, u32 pbl_addr)
{
return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl, pbl_size, pbl_addr);
+ zbva, to, len, page_size, pbl_size, pbl_addr);
}
int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
u32 pbl_addr)
{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,
- &pbl_size, &pbl_addr);
+ return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
+ pbl_size, pbl_addr);
}
int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
{
- u32 pbl_size = 0;
*stag = T3_STAG_UNSET;
return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
- NULL, &pbl_size, NULL);
+ 0, 0);
}
int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,
- NULL, NULL);
+ return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
+ 0, 0);
}
int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 2bcff7f5046..6e128f6bab0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -154,14 +154,14 @@ int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
struct cxio_ucontext *uctx);
int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
+int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
+ u32 pbl_addr, u32 pbl_size);
int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, __be64 *pbl, u32 *pbl_size,
- u32 *pbl_addr);
+ u8 page_size, u32 pbl_size, u32 pbl_addr);
int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, __be64 *pbl, u32 *pbl_size,
- u32 *pbl_addr);
+ u8 page_size, u32 pbl_size, u32 pbl_addr);
int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
u32 pbl_addr);
int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
@@ -173,8 +173,8 @@ u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
int __init cxio_hal_init(void);
void __exit cxio_hal_exit(void);
-void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
-void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
+int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
+int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
void cxio_flush_hw_cq(struct t3_cq *cq);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 45ed4f25ef7..bd233c08765 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -250,7 +250,6 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
*/
#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
-#define PBL_CHUNK 2*1024*1024
u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
{
@@ -267,14 +266,35 @@ void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
{
- unsigned long i;
+ unsigned pbl_start, pbl_chunk;
+
rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
- if (rdev_p->pbl_pool)
- for (i = rdev_p->rnic_info.pbl_base;
- i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1;
- i += PBL_CHUNK)
- gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1);
- return rdev_p->pbl_pool ? 0 : -ENOMEM;
+ if (!rdev_p->pbl_pool)
+ return -ENOMEM;
+
+ pbl_start = rdev_p->rnic_info.pbl_base;
+ pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
+
+ while (pbl_start < rdev_p->rnic_info.pbl_top) {
+ pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
+ pbl_chunk);
+ if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
+ PDBG("%s failed to add PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
+ if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
+ printk(KERN_WARNING MOD "%s: Failed to add all PBL chunks (%x/%x)\n",
+ __func__, pbl_start, rdev_p->rnic_info.pbl_top - pbl_start);
+ return 0;
+ }
+ pbl_chunk >>= 1;
+ } else {
+ PDBG("%s added PBL chunk (%x/%x)\n",
+ __func__, pbl_start, pbl_chunk);
+ pbl_start += pbl_chunk;
+ }
+ }
+
+ return 0;
}
void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d44a6df9ad8..c325c44807e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -67,10 +67,10 @@ int peer2peer = 0;
module_param(peer2peer, int, 0644);
MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
-static int ep_timeout_secs = 10;
+static int ep_timeout_secs = 60;
module_param(ep_timeout_secs, int, 0644);
MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
- "in seconds (default=10)");
+ "in seconds (default=60)");
static int mpa_rev = 1;
module_param(mpa_rev, int, 0644);
@@ -1650,8 +1650,8 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
release = 1;
break;
case ABORTING:
- break;
case DEAD:
+ break;
default:
BUG_ON(1);
break;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index 58c3d61bcd1..ec49a5cbdeb 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -35,17 +35,26 @@
#include <rdma/ib_verbs.h>
#include "cxio_hal.h"
+#include "cxio_resource.h"
#include "iwch.h"
#include "iwch_provider.h"
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp,
- int shift,
- __be64 *page_list)
+static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
{
- u32 stag;
u32 mmid;
+ mhp->attr.state = 1;
+ mhp->attr.stag = stag;
+ mmid = stag >> 8;
+ mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+ insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
+ PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+}
+
+int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
+ struct iwch_mr *mhp, int shift)
+{
+ u32 stag;
if (cxio_register_phys_mem(&rhp->rdev,
&stag, mhp->attr.pdid,
@@ -53,28 +62,21 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
mhp->attr.zbva,
mhp->attr.va_fbo,
mhp->attr.len,
- shift-12,
- page_list,
- &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
+ shift - 12,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr))
return -ENOMEM;
- mhp->attr.state = 1;
- mhp->attr.stag = stag;
- mmid = stag >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- insert_handle(rhp, &rhp->mmidr, mhp, mmid);
- PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+
+ iwch_finish_mem_reg(mhp, stag);
+
return 0;
}
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp,
int shift,
- __be64 *page_list,
int npages)
{
u32 stag;
- u32 mmid;
-
/* We could support this... */
if (npages > mhp->attr.pbl_size)
@@ -87,19 +89,40 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
mhp->attr.zbva,
mhp->attr.va_fbo,
mhp->attr.len,
- shift-12,
- page_list,
- &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
+ shift - 12,
+ mhp->attr.pbl_size, mhp->attr.pbl_addr))
return -ENOMEM;
- mhp->attr.state = 1;
- mhp->attr.stag = stag;
- mmid = stag >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- insert_handle(rhp, &rhp->mmidr, mhp, mmid);
- PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+
+ iwch_finish_mem_reg(mhp, stag);
+
+ return 0;
+}
+
+int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
+{
+ mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
+ npages << 3);
+
+ if (!mhp->attr.pbl_addr)
+ return -ENOMEM;
+
+ mhp->attr.pbl_size = npages;
+
return 0;
}
+void iwch_free_pbl(struct iwch_mr *mhp)
+{
+ cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+ mhp->attr.pbl_size << 3);
+}
+
+int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
+{
+ return cxio_write_pbl(&mhp->rhp->rdev, pages,
+ mhp->attr.pbl_addr + (offset << 3), npages);
+}
+
int build_phys_page_list(struct ib_phys_buf *buffer_list,
int num_phys_buf,
u64 *iova_start,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index d07d3a377b5..8934178a23e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -442,6 +442,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
mmid = mhp->attr.stag >> 8;
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr);
+ iwch_free_pbl(mhp);
remove_handle(rhp, &rhp->mmidr, mmid);
if (mhp->kva)
kfree((void *) (unsigned long) mhp->kva);
@@ -475,6 +476,8 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->rhp = rhp;
+
/* First check that we have enough alignment */
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
ret = -EINVAL;
@@ -492,7 +495,17 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
if (ret)
goto err;
- mhp->rhp = rhp;
+ ret = iwch_alloc_pbl(mhp, npages);
+ if (ret) {
+ kfree(page_list);
+ goto err_pbl;
+ }
+
+ ret = iwch_write_pbl(mhp, page_list, npages, 0);
+ kfree(page_list);
+ if (ret)
+ goto err_pbl;
+
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
@@ -502,12 +515,15 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
mhp->attr.len = (u32) total_size;
mhp->attr.pbl_size = npages;
- ret = iwch_register_mem(rhp, php, mhp, shift, page_list);
- kfree(page_list);
- if (ret) {
- goto err;
- }
+ ret = iwch_register_mem(rhp, php, mhp, shift);
+ if (ret)
+ goto err_pbl;
+
return &mhp->ibmr;
+
+err_pbl:
+ iwch_free_pbl(mhp);
+
err:
kfree(mhp);
return ERR_PTR(ret);
@@ -560,7 +576,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
return ret;
}
- ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages);
+ ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
kfree(page_list);
if (ret) {
return ret;
@@ -602,6 +618,8 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->rhp = rhp;
+
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
@@ -615,10 +633,14 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
n += chunk->nents;
- pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
+ err = iwch_alloc_pbl(mhp, n);
+ if (err)
+ goto err;
+
+ pages = (__be64 *) __get_free_page(GFP_KERNEL);
if (!pages) {
err = -ENOMEM;
- goto err;
+ goto err_pbl;
}
i = n = 0;
@@ -630,25 +652,38 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
pages[i++] = cpu_to_be64(sg_dma_address(
&chunk->page_list[j]) +
mhp->umem->page_size * k);
+ if (i == PAGE_SIZE / sizeof *pages) {
+ err = iwch_write_pbl(mhp, pages, i, n);
+ if (err)
+ goto pbl_done;
+ n += i;
+ i = 0;
+ }
}
}
- mhp->rhp = rhp;
+ if (i)
+ err = iwch_write_pbl(mhp, pages, i, n);
+
+pbl_done:
+ free_page((unsigned long) pages);
+ if (err)
+ goto err_pbl;
+
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
mhp->attr.perms = iwch_ib_to_tpt_access(acc);
mhp->attr.va_fbo = virt;
mhp->attr.page_size = shift - 12;
mhp->attr.len = (u32) length;
- mhp->attr.pbl_size = i;
- err = iwch_register_mem(rhp, php, mhp, shift, pages);
- kfree(pages);
+
+ err = iwch_register_mem(rhp, php, mhp, shift);
if (err)
- goto err;
+ goto err_pbl;
if (udata && !t3a_device(rhp)) {
uresp.pbl_addr = (mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3;
+ rhp->rdev.rnic_info.pbl_base) >> 3;
PDBG("%s user resp pbl_addr 0x%x\n", __func__,
uresp.pbl_addr);
@@ -661,6 +696,9 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mhp->ibmr;
+err_pbl:
+ iwch_free_pbl(mhp);
+
err:
ib_umem_release(mhp->umem);
kfree(mhp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index db5100d27ca..836163fc542 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -340,14 +340,14 @@ int iwch_quiesce_qps(struct iwch_cq *chp);
int iwch_resume_qps(struct iwch_cq *chp);
void stop_read_rep_timer(struct iwch_qp *qhp);
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp,
- int shift,
- __be64 *page_list);
+ struct iwch_mr *mhp, int shift);
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp,
int shift,
- __be64 *page_list,
int npages);
+int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
+void iwch_free_pbl(struct iwch_mr *mhp);
+int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
int build_phys_page_list(struct ib_phys_buf *buffer_list,
int num_phys_buf,
u64 *iova_start,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 9b4be889c58..79dbe5beae5 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -655,6 +655,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
{
struct iwch_cq *rchp, *schp;
int count;
+ int flushed;
rchp = get_chp(qhp->rhp, qhp->attr.rcq);
schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -669,20 +670,22 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
spin_lock(&qhp->lock);
cxio_flush_hw_cq(&rchp->cq);
cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
- cxio_flush_rq(&qhp->wq, &rchp->cq, count);
+ flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, *flag);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ if (flushed)
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
/* locking heirarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, *flag);
spin_lock(&qhp->lock);
cxio_flush_hw_cq(&schp->cq);
cxio_count_scqes(&schp->cq, &qhp->wq, &count);
- cxio_flush_sq(&qhp->wq, &schp->cq, count);
+ flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, *flag);
- (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+ if (flushed)
+ (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
/* deref */
if (atomic_dec_and_test(&qhp->refcnt))
@@ -880,7 +883,6 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
ep = qhp->ep;
get_ep(&ep->com);
}
- flush_qp(qhp, &flag);
break;
case IWCH_QP_STATE_TERMINATE:
qhp->attr.state = IWCH_QP_STATE_TERMINATE;
@@ -911,6 +913,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
}
switch (attrs->next_state) {
case IWCH_QP_STATE_IDLE:
+ flush_qp(qhp, &flag);
qhp->attr.state = IWCH_QP_STATE_IDLE;
qhp->attr.llp_stream_handle = NULL;
put_ep(&qhp->ep->com);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 00bab60f6de..1e9e99a1393 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -192,6 +192,8 @@ struct ehca_qp {
int mtu_shift;
u32 message_count;
u32 packet_count;
+ atomic_t nr_events; /* events seen */
+ wait_queue_head_t wait_completion;
};
#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 2515cbde7e6..bc3b37d2070 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -101,7 +101,6 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
props->max_ee = limit_uint(rblock->max_rd_ee_context);
props->max_rdd = limit_uint(rblock->max_rd_domain);
props->max_fmr = limit_uint(rblock->max_mr);
- props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
@@ -115,7 +114,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
}
props->max_pkeys = 16;
- props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
+ props->local_ca_ack_delay = min_t(u8, rblock->local_ca_ack_delay, 255);
props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
@@ -136,7 +135,7 @@ query_device1:
return ret;
}
-static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
+static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
{
switch (fw_mtu) {
case 0x1:
@@ -156,7 +155,7 @@ static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
}
}
-static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
+static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
{
switch (vl_cap) {
case 0x1:
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index ca5eb0cb628..ce1ab0571be 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -204,6 +204,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, token);
+ if (qp)
+ atomic_inc(&qp->nr_events);
read_unlock(&ehca_qp_idr_lock);
if (!qp)
@@ -223,6 +225,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
if (fatal && qp->ext_type == EQPT_SRQBASE)
dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
+ if (atomic_dec_and_test(&qp->nr_events))
+ wake_up(&qp->wait_completion);
return;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 18fba92fa7a..3f59587338e 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -566,6 +566,8 @@ static struct ehca_qp *internal_create_qp(
return ERR_PTR(-ENOMEM);
}
+ atomic_set(&my_qp->nr_events, 0);
+ init_waitqueue_head(&my_qp->wait_completion);
spin_lock_init(&my_qp->spinlock_s);
spin_lock_init(&my_qp->spinlock_r);
my_qp->qp_type = qp_type;
@@ -1934,6 +1936,9 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
idr_remove(&ehca_qp_idr, my_qp->token);
write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ /* now wait until all pending events have completed */
+ wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
+
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) {
ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index acf30c06a0c..daad09a4591 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1197,7 +1197,7 @@ void ipath_kreceive(struct ipath_portdata *pd)
}
reloop:
- for (last = 0, i = 1; !last; i++) {
+ for (last = 0, i = 1; !last; i += !last) {
hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
eflags = ipath_hdrget_err_flags(rhf_addr);
etype = ipath_hdrget_rcv_type(rhf_addr);
@@ -1428,6 +1428,40 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
}
+/*
+ * used to force update of pioavailshadow if we can't get a pio buffer.
+ * Needed primarily due to exitting freeze mode after recovering
+ * from errors. Done lazily, because it's safer (known to not
+ * be writing pio buffers).
+ */
+static void ipath_reset_availshadow(struct ipath_devdata *dd)
+{
+ int i, im;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipath_pioavail_lock, flags);
+ for (i = 0; i < dd->ipath_pioavregs; i++) {
+ u64 val, oldval;
+ /* deal with 6110 chip bug on high register #s */
+ im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
+ i ^ 1 : i;
+ val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
+ /*
+ * busy out the buffers not in the kernel avail list,
+ * without changing the generation bits.
+ */
+ oldval = dd->ipath_pioavailshadow[i];
+ dd->ipath_pioavailshadow[i] = val |
+ ((~dd->ipath_pioavailkernel[i] <<
+ INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
+ 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
+ if (oldval != dd->ipath_pioavailshadow[i])
+ ipath_dbg("shadow[%d] was %Lx, now %lx\n",
+ i, oldval, dd->ipath_pioavailshadow[i]);
+ }
+ spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
+}
+
/**
* ipath_setrcvhdrsize - set the receive header size
* @dd: the infinipath device
@@ -1482,9 +1516,12 @@ static noinline void no_pio_bufs(struct ipath_devdata *dd)
*/
ipath_stats.sps_nopiobufs++;
if (!(++dd->ipath_consec_nopiobuf % 100000)) {
- ipath_dbg("%u pio sends with no bufavail; dmacopy: "
- "%llx %llx %llx %llx; shadow: %lx %lx %lx %lx\n",
+ ipath_force_pio_avail_update(dd); /* at start */
+ ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
+ "%llx %llx %llx %llx\n"
+ "ipath shadow: %lx %lx %lx %lx\n",
dd->ipath_consec_nopiobuf,
+ (unsigned long)get_cycles(),
(unsigned long long) le64_to_cpu(dma[0]),
(unsigned long long) le64_to_cpu(dma[1]),
(unsigned long long) le64_to_cpu(dma[2]),
@@ -1496,14 +1533,17 @@ static noinline void no_pio_bufs(struct ipath_devdata *dd)
*/
if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
(sizeof(shadow[0]) * 4 * 4))
- ipath_dbg("2nd group: dmacopy: %llx %llx "
- "%llx %llx; shadow: %lx %lx %lx %lx\n",
+ ipath_dbg("2nd group: dmacopy: "
+ "%llx %llx %llx %llx\n"
+ "ipath shadow: %lx %lx %lx %lx\n",
(unsigned long long)le64_to_cpu(dma[4]),
(unsigned long long)le64_to_cpu(dma[5]),
(unsigned long long)le64_to_cpu(dma[6]),
(unsigned long long)le64_to_cpu(dma[7]),
- shadow[4], shadow[5], shadow[6],
- shadow[7]);
+ shadow[4], shadow[5], shadow[6], shadow[7]);
+
+ /* at end, so update likely happened */
+ ipath_reset_availshadow(dd);
}
}
@@ -1652,19 +1692,46 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail)
{
unsigned long flags;
- unsigned end;
+ unsigned end, cnt = 0, next;
/* There are two bits per send buffer (busy and generation) */
start *= 2;
- len *= 2;
- end = start + len;
+ end = start + len * 2;
- /* Set or clear the generation bits. */
spin_lock_irqsave(&ipath_pioavail_lock, flags);
+ /* Set or clear the busy bit in the shadow. */
while (start < end) {
if (avail) {
- __clear_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
- dd->ipath_pioavailshadow);
+ unsigned long dma;
+ int i, im;
+ /*
+ * the BUSY bit will never be set, because we disarm
+ * the user buffers before we hand them back to the
+ * kernel. We do have to make sure the generation
+ * bit is set correctly in shadow, since it could
+ * have changed many times while allocated to user.
+ * We can't use the bitmap functions on the full
+ * dma array because it is always little-endian, so
+ * we have to flip to host-order first.
+ * BITS_PER_LONG is slightly wrong, since it's
+ * always 64 bits per register in chip...
+ * We only work on 64 bit kernels, so that's OK.
+ */
+ /* deal with 6110 chip bug on high register #s */
+ i = start / BITS_PER_LONG;
+ im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
+ i ^ 1 : i;
+ __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
+ + start, dd->ipath_pioavailshadow);
+ dma = (unsigned long) le64_to_cpu(
+ dd->ipath_pioavailregs_dma[im]);
+ if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
+ + start) % BITS_PER_LONG, &dma))
+ __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
+ + start, dd->ipath_pioavailshadow);
+ else
+ __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
+ + start, dd->ipath_pioavailshadow);
__set_bit(start, dd->ipath_pioavailkernel);
} else {
__set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
@@ -1673,7 +1740,44 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
}
start += 2;
}
+
+ if (dd->ipath_pioupd_thresh) {
+ end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
+ next = find_first_bit(dd->ipath_pioavailkernel, end);
+ while (next < end) {
+ cnt++;
+ next = find_next_bit(dd->ipath_pioavailkernel, end,
+ next + 1);
+ }
+ }
spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
+
+ /*
+ * When moving buffers from kernel to user, if number assigned to
+ * the user is less than the pio update threshold, and threshold
+ * is supported (cnt was computed > 0), drop the update threshold
+ * so we update at least once per allocated number of buffers.
+ * In any case, if the kernel buffers are less than the threshold,
+ * drop the threshold. We don't bother increasing it, having once
+ * decreased it, since it would typically just cycle back and forth.
+ * If we don't decrease below buffers in use, we can wait a long
+ * time for an update, until some other context uses PIO buffers.
+ */
+ if (!avail && len < cnt)
+ cnt = len;
+ if (cnt < dd->ipath_pioupd_thresh) {
+ dd->ipath_pioupd_thresh = cnt;
+ ipath_dbg("Decreased pio update threshold to %u\n",
+ dd->ipath_pioupd_thresh);
+ spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
+ dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
+ << INFINIPATH_S_UPDTHRESH_SHIFT);
+ dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
+ << INFINIPATH_S_UPDTHRESH_SHIFT;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+ spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+ }
}
/**
@@ -1790,12 +1894,12 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
*/
if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
int skip_cancel;
- u64 *statp = &dd->ipath_sdma_status;
+ unsigned long *statp = &dd->ipath_sdma_status;
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
skip_cancel =
- !test_bit(IPATH_SDMA_DISABLED, statp) &&
- test_and_set_bit(IPATH_SDMA_ABORTING, statp);
+ test_and_set_bit(IPATH_SDMA_ABORTING, statp)
+ && !test_bit(IPATH_SDMA_DISABLED, statp);
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
if (skip_cancel)
goto bail;
@@ -1826,6 +1930,9 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
ipath_disarm_piobufs(dd, 0,
dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
+ if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
+ set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
+
if (restore_sendctrl) {
/* else done by caller later if needed */
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
@@ -1845,7 +1952,6 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
/* only wait so long for intr */
dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
dd->ipath_sdma_reset_wait = 200;
- __set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
@@ -2510,7 +2616,7 @@ int ipath_reset_device(int unit)
ipath_dbg("unit %u port %d is in use "
"(PID %u cmd %s), can't reset\n",
unit, i,
- dd->ipath_pd[i]->port_pid,
+ pid_nr(dd->ipath_pd[i]->port_pid),
dd->ipath_pd[i]->port_comm);
ret = -EBUSY;
goto bail;
@@ -2548,19 +2654,21 @@ bail:
static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
{
int i, sub, any = 0;
- pid_t pid;
+ struct pid *pid;
if (!dd->ipath_pd)
return 0;
for (i = 1; i < dd->ipath_cfgports; i++) {
- if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt ||
- !dd->ipath_pd[i]->port_pid)
+ if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
continue;
pid = dd->ipath_pd[i]->port_pid;
+ if (!pid)
+ continue;
+
dev_info(&dd->pcidev->dev, "context %d in use "
"(PID %u), sending signal %d\n",
- i, pid, sig);
- kill_proc(pid, sig, 1);
+ i, pid_nr(pid), sig);
+ kill_pid(pid, sig, 1);
any++;
for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
pid = dd->ipath_pd[i]->port_subpid[sub];
@@ -2568,8 +2676,8 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
continue;
dev_info(&dd->pcidev->dev, "sub-context "
"%d:%d in use (PID %u), sending "
- "signal %d\n", i, sub, pid, sig);
- kill_proc(pid, sig, 1);
+ "signal %d\n", i, sub, pid_nr(pid), sig);
+ kill_pid(pid, sig, 1);
any++;
}
}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 8b1752202e7..b472b15637f 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -173,47 +173,25 @@ static int ipath_get_base_info(struct file *fp,
(void *) dd->ipath_statusp -
(void *) dd->ipath_pioavailregs_dma;
if (!shared) {
- kinfo->spi_piocnt = dd->ipath_pbufsport;
+ kinfo->spi_piocnt = pd->port_piocnt;
kinfo->spi_piobufbase = (u64) pd->port_piobufs;
kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
dd->ipath_ureg_align * pd->port_port;
} else if (master) {
- kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) +
- (dd->ipath_pbufsport % subport_cnt);
+ kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
+ (pd->port_piocnt % subport_cnt);
/* Master's PIO buffers are after all the slave's */
kinfo->spi_piobufbase = (u64) pd->port_piobufs +
dd->ipath_palign *
- (dd->ipath_pbufsport - kinfo->spi_piocnt);
+ (pd->port_piocnt - kinfo->spi_piocnt);
} else {
unsigned slave = subport_fp(fp) - 1;
- kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt;
+ kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
kinfo->spi_piobufbase = (u64) pd->port_piobufs +
dd->ipath_palign * kinfo->spi_piocnt * slave;
}
- /*
- * Set the PIO avail update threshold to no larger
- * than the number of buffers per process. Note that
- * we decrease it here, but won't ever increase it.
- */
- if (dd->ipath_pioupd_thresh &&
- kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
- unsigned long flags;
-
- dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
- ipath_dbg("Decreased pio update threshold to %u\n",
- dd->ipath_pioupd_thresh);
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
- << INFINIPATH_S_UPDTHRESH_SHIFT);
- dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
- << INFINIPATH_S_UPDTHRESH_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- }
-
if (shared) {
kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
dd->ipath_ureg_align * pd->port_port;
@@ -577,7 +555,7 @@ static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
p = dd->ipath_pageshadow[porttid + tid];
dd->ipath_pageshadow[porttid + tid] = NULL;
ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
- pd->port_pid, tid);
+ pid_nr(pd->port_pid), tid);
dd->ipath_f_put_tid(dd, &tidbase[tid],
RCVHQ_RCV_TYPE_EXPECTED,
dd->ipath_tidinvalid);
@@ -1309,19 +1287,19 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
if (!pd->port_subport_cnt) {
/* port is not shared */
- piocnt = dd->ipath_pbufsport;
+ piocnt = pd->port_piocnt;
piobufs = pd->port_piobufs;
} else if (!subport_fp(fp)) {
/* caller is the master */
- piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) +
- (dd->ipath_pbufsport % pd->port_subport_cnt);
+ piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
+ (pd->port_piocnt % pd->port_subport_cnt);
piobufs = pd->port_piobufs +
- dd->ipath_palign * (dd->ipath_pbufsport - piocnt);
+ dd->ipath_palign * (pd->port_piocnt - piocnt);
} else {
unsigned slave = subport_fp(fp) - 1;
/* caller is a slave */
- piocnt = dd->ipath_pbufsport / pd->port_subport_cnt;
+ piocnt = pd->port_piocnt / pd->port_subport_cnt;
piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
}
@@ -1631,11 +1609,8 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
port);
pd->port_cnt = 1;
port_fp(fp) = pd;
- pd->port_pid = current->pid;
+ pd->port_pid = get_pid(task_pid(current));
strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
- ipath_chg_pioavailkernel(dd,
- dd->ipath_pbufsport * (pd->port_port - 1),
- dd->ipath_pbufsport, 0);
ipath_stats.sps_ports++;
ret = 0;
} else
@@ -1818,14 +1793,15 @@ static int find_shared_port(struct file *fp,
}
port_fp(fp) = pd;
subport_fp(fp) = pd->port_cnt++;
- pd->port_subpid[subport_fp(fp)] = current->pid;
+ pd->port_subpid[subport_fp(fp)] =
+ get_pid(task_pid(current));
tidcursor_fp(fp) = 0;
pd->active_slaves |= 1 << subport_fp(fp);
ipath_cdbg(PROC,
"%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
current->comm, current->pid,
subport_fp(fp),
- pd->port_comm, pd->port_pid,
+ pd->port_comm, pid_nr(pd->port_pid),
dd->ipath_unit, pd->port_port);
ret = 1;
goto done;
@@ -1938,11 +1914,25 @@ static int ipath_do_user_init(struct file *fp,
/* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
+ /* some ports may get extra buffers, calculate that here */
+ if (pd->port_port <= dd->ipath_ports_extrabuf)
+ pd->port_piocnt = dd->ipath_pbufsport + 1;
+ else
+ pd->port_piocnt = dd->ipath_pbufsport;
+
/* for right now, kernel piobufs are at end, so port 1 is at 0 */
+ if (pd->port_port <= dd->ipath_ports_extrabuf)
+ pd->port_pio_base = (dd->ipath_pbufsport + 1)
+ * (pd->port_port - 1);
+ else
+ pd->port_pio_base = dd->ipath_ports_extrabuf +
+ dd->ipath_pbufsport * (pd->port_port - 1);
pd->port_piobufs = dd->ipath_piobufbase +
- dd->ipath_pbufsport * (pd->port_port - 1) * dd->ipath_palign;
- ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n",
- pd->port_port, pd->port_piobufs);
+ pd->port_pio_base * dd->ipath_palign;
+ ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
+ " first pio %u\n", pd->port_port, pd->port_piobufs,
+ pd->port_piocnt, pd->port_pio_base);
+ ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
/*
* Now allocate the rcvhdr Q and eager TIDs; skip the TID
@@ -2077,7 +2067,8 @@ static int ipath_close(struct inode *in, struct file *fp)
* the slave(s) don't wait for receive data forever.
*/
pd->active_slaves &= ~(1 << fd->subport);
- pd->port_subpid[fd->subport] = 0;
+ put_pid(pd->port_subpid[fd->subport]);
+ pd->port_subpid[fd->subport] = NULL;
mutex_unlock(&ipath_mutex);
goto bail;
}
@@ -2085,7 +2076,7 @@ static int ipath_close(struct inode *in, struct file *fp)
if (pd->port_hdrqfull) {
ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
- "during run\n", pd->port_comm, pd->port_pid,
+ "during run\n", pd->port_comm, pid_nr(pd->port_pid),
pd->port_hdrqfull);
pd->port_hdrqfull = 0;
}
@@ -2107,7 +2098,6 @@ static int ipath_close(struct inode *in, struct file *fp)
}
if (dd->ipath_kregbase) {
- int i;
/* atomically clear receive enable port and intr avail. */
clear_bit(dd->ipath_r_portenable_shift + port,
&dd->ipath_rcvctrl);
@@ -2136,9 +2126,9 @@ static int ipath_close(struct inode *in, struct file *fp)
ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
pd->port_port, dd->ipath_dummy_hdrq_phys);
- i = dd->ipath_pbufsport * (port - 1);
- ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
- ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1);
+ ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
+ ipath_chg_pioavailkernel(dd, pd->port_pio_base,
+ pd->port_piocnt, 1);
dd->ipath_f_clear_tids(dd, pd->port_port);
@@ -2146,11 +2136,12 @@ static int ipath_close(struct inode *in, struct file *fp)
unlock_expected_tids(pd);
ipath_stats.sps_ports--;
ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
- pd->port_comm, pd->port_pid,
+ pd->port_comm, pid_nr(pd->port_pid),
dd->ipath_unit, port);
}
- pd->port_pid = 0;
+ put_pid(pd->port_pid);
+ pd->port_pid = NULL;
dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
mutex_unlock(&ipath_mutex);
ipath_free_pddata(dd, pd); /* after releasing the mutex */
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
index e3ec0d1bdf5..8eee7830f04 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c
@@ -595,7 +595,7 @@ static void ipath_7220_txe_recover(struct ipath_devdata *dd)
dev_info(&dd->pcidev->dev,
"Recovering from TXE PIO parity error\n");
- ipath_disarm_senderrbufs(dd, 1);
+ ipath_disarm_senderrbufs(dd);
}
@@ -675,10 +675,8 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
/*
- * Parity errors in send memory are recoverable,
- * just cancel the send (if indicated in * sendbuffererror),
- * count the occurrence, unfreeze (if no other handled
- * hardware error bits are set), and continue.
+ * Parity errors in send memory are recoverable by h/w
+ * just do housekeeping, exit freeze mode and continue.
*/
if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
@@ -687,13 +685,6 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
- if (!hwerrs) {
- /* else leave in freeze mode */
- ipath_write_kreg(dd,
- dd->ipath_kregs->kr_control,
- dd->ipath_control);
- goto bail;
- }
}
if (hwerrs) {
/*
@@ -723,8 +714,8 @@ static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
dd->ipath_flags &= ~IPATH_INITTED;
} else {
- ipath_dbg("Clearing freezemode on ignored hardware "
- "error\n");
+ ipath_dbg("Clearing freezemode on ignored or "
+ "recovered hardware error\n");
ipath_clear_freeze(dd);
}
}
@@ -870,8 +861,9 @@ static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
"revision %u.%u!\n",
dd->ipath_majrev, dd->ipath_minrev);
ret = 1;
- } else if (dd->ipath_minrev == 1) {
- /* Rev1 chips are prototype. Complain, but allow use */
+ } else if (dd->ipath_minrev == 1 &&
+ !(dd->ipath_flags & IPATH_INITTED)) {
+ /* Rev1 chips are prototype. Complain at init, but allow use */
ipath_dev_err(dd, "Unsupported hardware "
"revision %u.%u, Contact support@qlogic.com\n",
dd->ipath_majrev, dd->ipath_minrev);
@@ -1966,7 +1958,7 @@ static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
dd->ipath_rcvctrl);
dd->ipath_p0_rcvegrcnt = 2048; /* always */
if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- dd->ipath_pioreserved = 1; /* reserve a buffer */
+ dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
}
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 27dd8947666..3e5baa43fc8 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -41,7 +41,7 @@
/*
* min buffers we want to have per port, after driver
*/
-#define IPATH_MIN_USER_PORT_BUFCNT 8
+#define IPATH_MIN_USER_PORT_BUFCNT 7
/*
* Number of ports we are configured to use (to allow for more pio
@@ -54,13 +54,9 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
/*
* Number of buffers reserved for driver (verbs and layered drivers.)
- * Reserved at end of buffer list. Initialized based on
- * number of PIO buffers if not set via module interface.
+ * Initialized based on number of PIO buffers if not set via module interface.
* The problem with this is that it's global, but we'll use different
- * numbers for different chip types. So the default value is not
- * very useful. I've redefined it for the 1.3 release so that it's
- * zero unless set by the user to something else, in which case we
- * try to respect it.
+ * numbers for different chip types.
*/
static ushort ipath_kpiobufs;
@@ -546,9 +542,12 @@ static void enable_chip(struct ipath_devdata *dd, int reinit)
pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
else
pioavail = dd->ipath_pioavailregs_dma[i];
- dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) |
- (~dd->ipath_pioavailkernel[i] <<
- INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
+ /*
+ * don't need to worry about ipath_pioavailkernel here
+ * because we will call ipath_chg_pioavailkernel() later
+ * in initialization, to busy out buffers as needed
+ */
+ dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
}
/* can get counters, stats, etc. */
dd->ipath_flags |= IPATH_PRESENT;
@@ -708,12 +707,11 @@ static void verify_interrupt(unsigned long opaque)
int ipath_init_chip(struct ipath_devdata *dd, int reinit)
{
int ret = 0;
- u32 val32, kpiobufs;
+ u32 kpiobufs, defkbufs;
u32 piobufs, uports;
u64 val;
struct ipath_portdata *pd;
gfp_t gfp_flags = GFP_USER | __GFP_COMP;
- unsigned long flags;
ret = init_housekeeping(dd, reinit);
if (ret)
@@ -753,56 +751,46 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
/ (sizeof(u64) * BITS_PER_BYTE / 2);
uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
- if (ipath_kpiobufs == 0) {
- /* not set by user (this is default) */
- if (piobufs > 144)
- kpiobufs = 32;
- else
- kpiobufs = 16;
- }
+ if (piobufs > 144)
+ defkbufs = 32 + dd->ipath_pioreserved;
else
- kpiobufs = ipath_kpiobufs;
+ defkbufs = 16 + dd->ipath_pioreserved;
- if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
+ if (ipath_kpiobufs && (ipath_kpiobufs +
+ (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {
int i = (int) piobufs -
(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
if (i < 1)
i = 1;
dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
"%d for kernel leaves too few for %d user ports "
- "(%d each); using %u\n", kpiobufs,
+ "(%d each); using %u\n", ipath_kpiobufs,
piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
/*
* shouldn't change ipath_kpiobufs, because could be
* different for different devices...
*/
kpiobufs = i;
- }
+ } else if (ipath_kpiobufs)
+ kpiobufs = ipath_kpiobufs;
+ else
+ kpiobufs = defkbufs;
dd->ipath_lastport_piobuf = piobufs - kpiobufs;
dd->ipath_pbufsport =
uports ? dd->ipath_lastport_piobuf / uports : 0;
- val32 = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports);
- if (val32 > 0) {
- ipath_dbg("allocating %u pbufs/port leaves %u unused, "
- "add to kernel\n", dd->ipath_pbufsport, val32);
- dd->ipath_lastport_piobuf -= val32;
- kpiobufs += val32;
- ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
- dd->ipath_pbufsport, val32);
- }
+ /* if not an even divisor, some user ports get extra buffers */
+ dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -
+ (dd->ipath_pbufsport * uports);
+ if (dd->ipath_ports_extrabuf)
+ ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "
+ "ports <= %u\n", dd->ipath_pbufsport,
+ dd->ipath_ports_extrabuf);
dd->ipath_lastpioindex = 0;
dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
- ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
+ /* ipath_pioavailshadow initialized earlier */
ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
"each for %u user ports\n", kpiobufs,
piobufs, dd->ipath_pbufsport, uports);
- if (dd->ipath_pioupd_thresh) {
- if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
- dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
- if (kpiobufs < dd->ipath_pioupd_thresh)
- dd->ipath_pioupd_thresh = kpiobufs;
- }
-
ret = dd->ipath_f_early_init(dd);
if (ret) {
ipath_dev_err(dd, "Early initialization failure\n");
@@ -810,13 +798,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
}
/*
- * Cancel any possible active sends from early driver load.
- * Follows early_init because some chips have to initialize
- * PIO buffers in early_init to avoid false parity errors.
- */
- ipath_cancel_sends(dd, 0);
-
- /*
* Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
* done after early_init.
*/
@@ -836,6 +817,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
dd->ipath_pioavailregs_phys);
+
/*
* this is to detect s/w errors, which the h/w works around by
* ignoring the low 6 bits of address, if it wasn't aligned.
@@ -862,12 +844,6 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
- spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
- dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
- ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
/*
* before error clears, since we expect serdes pll errors during
* this, the first time after reset
@@ -940,6 +916,19 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
else
enable_chip(dd, reinit);
+ /* after enable_chip, so pioavailshadow setup */
+ ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
+
+ /*
+ * Cancel any possible active sends from early driver load.
+ * Follows early_init because some chips have to initialize
+ * PIO buffers in early_init to avoid false parity errors.
+ * After enable and ipath_chg_pioavailkernel so we can safely
+ * enable pioavail updates and PIOENABLE; packets are now
+ * ready to go out.
+ */
+ ipath_cancel_sends(dd, 1);
+
if (!reinit) {
/*
* Used when we close a port, for DMA already in flight
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 1b58f4737c7..26900b3b7a4 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -38,42 +38,12 @@
#include "ipath_verbs.h"
#include "ipath_common.h"
-/*
- * clear (write) a pio buffer, to clear a parity error. This routine
- * should only be called when in freeze mode, and the buffer should be
- * canceled afterwards.
- */
-static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
-{
- u32 __iomem *pbuf;
- u32 dwcnt; /* dword count to write */
- if (pnum < dd->ipath_piobcnt2k) {
- pbuf = (u32 __iomem *) (dd->ipath_pio2kbase + pnum *
- dd->ipath_palign);
- dwcnt = dd->ipath_piosize2k >> 2;
- }
- else {
- pbuf = (u32 __iomem *) (dd->ipath_pio4kbase +
- (pnum - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
- dwcnt = dd->ipath_piosize4k >> 2;
- }
- dev_info(&dd->pcidev->dev,
- "Rewrite PIO buffer %u, to recover from parity error\n",
- pnum);
-
- /* no flush required, since already in freeze */
- writel(dwcnt + 1, pbuf);
- while (--dwcnt)
- writel(0, pbuf++);
-}
/*
* Called when we might have an error that is specific to a particular
* PIO buffer, and may need to cancel that buffer, so it can be re-used.
- * If rewrite is true, and bits are set in the sendbufferror registers,
- * we'll write to the buffer, for error recovery on parity errors.
*/
-void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
+void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
{
u32 piobcnt;
unsigned long sbuf[4];
@@ -109,11 +79,8 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
}
for (i = 0; i < piobcnt; i++)
- if (test_bit(i, sbuf)) {
- if (rewrite)
- ipath_clrpiobuf(dd, i);
+ if (test_bit(i, sbuf))
ipath_disarm_piobufs(dd, i, 1);
- }
/* ignore armlaunch errs for a bit */
dd->ipath_lastcancel = jiffies+3;
}
@@ -164,7 +131,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
{
u64 ignore_this_time = 0;
- ipath_disarm_senderrbufs(dd, 0);
+ ipath_disarm_senderrbufs(dd);
if ((errs & E_SUM_LINK_PKTERRS) &&
!(dd->ipath_flags & IPATH_LINKACTIVE)) {
/*
@@ -909,8 +876,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
* processes (causing armlaunch), send errors due to going into freeze mode,
* etc., and try to avoid causing extra interrupts while doing so.
* Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it for anything changing while in freeze mode
- * (we don't want to wait for the next pio buffer state change).
+ * because the chip won't do it while in freeze mode (the register values
+ * themselves are kept correct).
* Make sure that we don't lose any important interrupts by using the chip
* feature that says that writing 0 to a bit in *clear that is set in
* *status will cause an interrupt to be generated again (if allowed by
@@ -918,44 +885,23 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
*/
void ipath_clear_freeze(struct ipath_devdata *dd)
{
- int i, im;
- u64 val;
-
/* disable error interrupts, to avoid confusion */
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
/* also disable interrupts; errormask is sometimes overwriten */
ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
- /*
- * clear all sends, because they have may been
- * completed by usercode while in freeze mode, and
- * therefore would not be sent, and eventually
- * might cause the process to run out of bufs
- */
- ipath_cancel_sends(dd, 0);
+ ipath_cancel_sends(dd, 1);
+
+ /* clear the freeze, and be sure chip saw it */
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
dd->ipath_control);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
- /* ensure pio avail updates continue */
+ /* force in-memory update now we are out of freeze */
ipath_force_pio_avail_update(dd);
/*
- * We just enabled pioavailupdate, so dma copy is almost certainly
- * not yet right, so read the registers directly. Similar to init
- */
- for (i = 0; i < dd->ipath_pioavregs; i++) {
- /* deal with 6110 chip bug */
- im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
- i ^ 1 : i;
- val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
- dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
- dd->ipath_pioavailshadow[i] = val |
- (~dd->ipath_pioavailkernel[i] <<
- INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
- }
-
- /*
* force new interrupt if any hwerr, error or interrupt bits are
* still set, and clear "safe" send packet errors related to freeze
* and cancelling sends. Re-enable error interrupts before possible
@@ -1312,10 +1258,8 @@ irqreturn_t ipath_intr(int irq, void *data)
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
- if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
- handle_layer_pioavail(dd);
- else
- ipath_dbg("unexpected BUFAVAIL intr\n");
+ /* always process; sdma verbs uses PIO for acks and VL15 */
+ handle_layer_pioavail(dd);
}
ret = IRQ_HANDLED;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 202337ae90d..59a8b254b97 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -117,6 +117,10 @@ struct ipath_portdata {
u16 port_subport_cnt;
/* non-zero if port is being shared. */
u16 port_subport_id;
+ /* number of pio bufs for this port (all procs, if shared) */
+ u32 port_piocnt;
+ /* first pio buffer for this port */
+ u32 port_pio_base;
/* chip offset of PIO buffers for this port */
u32 port_piobufs;
/* how many alloc_pages() chunks in port_rcvegrbuf_pages */
@@ -155,8 +159,8 @@ struct ipath_portdata {
/* saved total number of polled urgent packets for poll edge trigger */
u32 port_urgent_poll;
/* pid of process using this port */
- pid_t port_pid;
- pid_t port_subpid[INFINIPATH_MAX_SUBPORT];
+ struct pid *port_pid;
+ struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
/* same size as task_struct .comm[] */
char port_comm[16];
/* pkeys set by this use of this port */
@@ -384,6 +388,8 @@ struct ipath_devdata {
u32 ipath_lastrpkts;
/* pio bufs allocated per port */
u32 ipath_pbufsport;
+ /* if remainder on bufs/port, ports < extrabuf get 1 extra */
+ u32 ipath_ports_extrabuf;
u32 ipath_pioupd_thresh; /* update threshold, some chips */
/*
* number of ports configured as max; zero is set to number chip
@@ -477,7 +483,7 @@ struct ipath_devdata {
/* SendDMA related entries */
spinlock_t ipath_sdma_lock;
- u64 ipath_sdma_status;
+ unsigned long ipath_sdma_status;
unsigned long ipath_sdma_abort_jiffies;
unsigned long ipath_sdma_abort_intr_timeout;
unsigned long ipath_sdma_buf_jiffies;
@@ -816,8 +822,8 @@ struct ipath_devdata {
#define IPATH_SDMA_DISARMED 1
#define IPATH_SDMA_DISABLED 2
#define IPATH_SDMA_LAYERBUF 3
-#define IPATH_SDMA_RUNNING 62
-#define IPATH_SDMA_SHUTDOWN 63
+#define IPATH_SDMA_RUNNING 30
+#define IPATH_SDMA_SHUTDOWN 31
/* bit combinations that correspond to abort states */
#define IPATH_SDMA_ABORT_NONE 0
@@ -1011,7 +1017,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *);
int ipath_update_eeprom_log(struct ipath_devdata *dd);
void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
-void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
+void ipath_disarm_senderrbufs(struct ipath_devdata *);
void ipath_force_pio_avail_update(struct ipath_devdata *);
void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index dd5b6e9d57c..4715911101e 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -242,7 +242,6 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
{
struct ipath_qp *q, **qpp;
unsigned long flags;
- int fnd = 0;
spin_lock_irqsave(&qpt->lock, flags);
@@ -253,51 +252,40 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
*qpp = qp->next;
qp->next = NULL;
atomic_dec(&qp->refcount);
- fnd = 1;
break;
}
}
spin_unlock_irqrestore(&qpt->lock, flags);
-
- if (!fnd)
- return;
-
- free_qpn(qpt, qp->ibqp.qp_num);
-
- wait_event(qp->wait, !atomic_read(&qp->refcount));
}
/**
- * ipath_free_all_qps - remove all QPs from the table
+ * ipath_free_all_qps - check for QPs still in use
* @qpt: the QP table to empty
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
*/
-void ipath_free_all_qps(struct ipath_qp_table *qpt)
+unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
{
unsigned long flags;
- struct ipath_qp *qp, *nqp;
- u32 n;
+ struct ipath_qp *qp;
+ u32 n, qp_inuse = 0;
+ spin_lock_irqsave(&qpt->lock, flags);
for (n = 0; n < qpt->max; n++) {
- spin_lock_irqsave(&qpt->lock, flags);
qp = qpt->table[n];
qpt->table[n] = NULL;
- spin_unlock_irqrestore(&qpt->lock, flags);
-
- while (qp) {
- nqp = qp->next;
- free_qpn(qpt, qp->ibqp.qp_num);
- if (!atomic_dec_and_test(&qp->refcount) ||
- !ipath_destroy_qp(&qp->ibqp))
- ipath_dbg("QP memory leak!\n");
- qp = nqp;
- }
+
+ for (; qp; qp = qp->next)
+ qp_inuse++;
}
+ spin_unlock_irqrestore(&qpt->lock, flags);
- for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
+ for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
if (qpt->map[n].page)
- free_page((unsigned long)qpt->map[n].page);
- }
+ free_page((unsigned long) qpt->map[n].page);
+ return qp_inuse;
}
/**
@@ -336,11 +324,12 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
qp->remote_qpn = 0;
qp->qkey = 0;
qp->qp_access_flags = 0;
- qp->s_busy = 0;
+ atomic_set(&qp->s_dma_busy, 0);
qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0;
qp->s_wqe = NULL;
qp->s_pkt_delay = 0;
+ qp->s_draining = 0;
qp->s_psn = 0;
qp->r_psn = 0;
qp->r_msn = 0;
@@ -353,7 +342,8 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
}
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
qp->r_nak_state = 0;
- qp->r_wrid_valid = 0;
+ qp->r_aflags = 0;
+ qp->r_flags = 0;
qp->s_rnr_timeout = 0;
qp->s_head = 0;
qp->s_tail = 0;
@@ -361,7 +351,6 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
qp->s_last = 0;
qp->s_ssn = 1;
qp->s_lsn = 0;
- qp->s_wait_credit = 0;
memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
@@ -370,17 +359,17 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
qp->r_rq.wq->head = 0;
qp->r_rq.wq->tail = 0;
}
- qp->r_reuse_sge = 0;
}
/**
- * ipath_error_qp - put a QP into an error state
- * @qp: the QP to put into an error state
+ * ipath_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
* @err: the receive completion error to signal if a RWQE is active
*
* Flushes both send and receive work queues.
* Returns true if last WQE event should be generated.
* The QP s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
*/
int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
@@ -389,8 +378,10 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
struct ib_wc wc;
int ret = 0;
- ipath_dbg("QP%d/%d in error state (%d)\n",
- qp->ibqp.qp_num, qp->remote_qpn, err);
+ if (qp->state == IB_QPS_ERR)
+ goto bail;
+
+ qp->state = IB_QPS_ERR;
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->timerwait))
@@ -399,39 +390,21 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
list_del_init(&qp->piowait);
spin_unlock(&dev->pending_lock);
- wc.vendor_err = 0;
- wc.byte_len = 0;
- wc.imm_data = 0;
+ /* Schedule the sending tasklet to drain the send work queue. */
+ if (qp->s_last != qp->s_head)
+ ipath_schedule_send(qp);
+
+ memset(&wc, 0, sizeof(wc));
wc.qp = &qp->ibqp;
- wc.src_qp = 0;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
- wc.slid = 0;
- wc.sl = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- if (qp->r_wrid_valid) {
- qp->r_wrid_valid = 0;
+ wc.opcode = IB_WC_RECV;
+
+ if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
wc.wr_id = qp->r_wr_id;
- wc.opcode = IB_WC_RECV;
wc.status = err;
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
}
wc.status = IB_WC_WR_FLUSH_ERR;
- while (qp->s_last != qp->s_head) {
- struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
-
- wc.wr_id = wqe->wr.wr_id;
- wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
- }
- qp->s_cur = qp->s_tail = qp->s_head;
- qp->s_hdrwords = 0;
- qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-
if (qp->r_rq.wq) {
struct ipath_rwq *wq;
u32 head;
@@ -447,7 +420,6 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
tail = wq->tail;
if (tail >= qp->r_rq.size)
tail = 0;
- wc.opcode = IB_WC_RECV;
while (tail != head) {
wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
if (++tail >= qp->r_rq.size)
@@ -460,6 +432,7 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
} else if (qp->ibqp.event_handler)
ret = 1;
+bail:
return ret;
}
@@ -478,11 +451,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ipath_ibdev *dev = to_idev(ibqp->device);
struct ipath_qp *qp = to_iqp(ibqp);
enum ib_qp_state cur_state, new_state;
- unsigned long flags;
int lastwqe = 0;
int ret;
- spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock_irq(&qp->s_lock);
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : qp->state;
@@ -535,16 +507,42 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
switch (new_state) {
case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->timerwait))
+ list_del_init(&qp->timerwait);
+ if (!list_empty(&qp->piowait))
+ list_del_init(&qp->piowait);
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~IPATH_S_ANY_WAIT;
+ spin_unlock_irq(&qp->s_lock);
+ /* Stop the sending tasklet */
+ tasklet_kill(&qp->s_task);
+ wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ spin_lock_irq(&qp->s_lock);
+ }
ipath_reset_qp(qp, ibqp->qp_type);
break;
+ case IB_QPS_SQD:
+ qp->s_draining = qp->s_last != qp->s_cur;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQE:
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ goto inval;
+ qp->state = new_state;
+ break;
+
case IB_QPS_ERR:
lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
break;
default:
+ qp->state = new_state;
break;
-
}
if (attr_mask & IB_QP_PKEY_INDEX)
@@ -597,8 +595,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
qp->s_max_rd_atomic = attr->max_rd_atomic;
- qp->state = new_state;
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock_irq(&qp->s_lock);
if (lastwqe) {
struct ib_event ev;
@@ -612,7 +609,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto bail;
inval:
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock_irq(&qp->s_lock);
ret = -EINVAL;
bail:
@@ -643,7 +640,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->pkey_index = qp->s_pkey_index;
attr->alt_pkey_index = 0;
attr->en_sqd_async_notify = 0;
- attr->sq_draining = 0;
+ attr->sq_draining = qp->s_draining;
attr->max_rd_atomic = qp->s_max_rd_atomic;
attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
attr->min_rnr_timer = qp->r_min_rnr_timer;
@@ -833,6 +830,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0);
init_waitqueue_head(&qp->wait);
+ init_waitqueue_head(&qp->wait_dma);
tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
INIT_LIST_HEAD(&qp->piowait);
INIT_LIST_HEAD(&qp->timerwait);
@@ -926,6 +924,7 @@ bail_ip:
else
vfree(qp->r_rq.wq);
ipath_free_qp(&dev->qp_table, qp);
+ free_qpn(&dev->qp_table, qp->ibqp.qp_num);
bail_qp:
kfree(qp);
bail_swq:
@@ -947,41 +946,44 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
{
struct ipath_qp *qp = to_iqp(ibqp);
struct ipath_ibdev *dev = to_idev(ibqp->device);
- unsigned long flags;
- spin_lock_irqsave(&qp->s_lock, flags);
- qp->state = IB_QPS_ERR;
- spin_unlock_irqrestore(&qp->s_lock, flags);
- spin_lock(&dev->n_qps_lock);
- dev->n_qps_allocated--;
- spin_unlock(&dev->n_qps_lock);
+ /* Make sure HW and driver activity is stopped. */
+ spin_lock_irq(&qp->s_lock);
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+ spin_lock(&dev->pending_lock);
+ if (!list_empty(&qp->timerwait))
+ list_del_init(&qp->timerwait);
+ if (!list_empty(&qp->piowait))
+ list_del_init(&qp->piowait);
+ spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~IPATH_S_ANY_WAIT;
+ spin_unlock_irq(&qp->s_lock);
+ /* Stop the sending tasklet */
+ tasklet_kill(&qp->s_task);
+ wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
+ } else
+ spin_unlock_irq(&qp->s_lock);
- /* Stop the sending tasklet. */
- tasklet_kill(&qp->s_task);
+ ipath_free_qp(&dev->qp_table, qp);
if (qp->s_tx) {
atomic_dec(&qp->refcount);
if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
kfree(qp->s_tx->txreq.map_addr);
+ spin_lock_irq(&dev->pending_lock);
+ list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
+ spin_unlock_irq(&dev->pending_lock);
+ qp->s_tx = NULL;
}
- /* Make sure the QP isn't on the timeout list. */
- spin_lock_irqsave(&dev->pending_lock, flags);
- if (!list_empty(&qp->timerwait))
- list_del_init(&qp->timerwait);
- if (!list_empty(&qp->piowait))
- list_del_init(&qp->piowait);
- if (qp->s_tx)
- list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
- /*
- * Make sure that the QP is not in the QPN table so receive
- * interrupts will discard packets for this QP. XXX Also remove QP
- * from multicast table.
- */
- if (atomic_read(&qp->refcount) != 0)
- ipath_free_qp(&dev->qp_table, qp);
+ /* all user's cleaned up, mark it available */
+ free_qpn(&dev->qp_table, qp->ibqp.qp_num);
+ spin_lock(&dev->n_qps_lock);
+ dev->n_qps_allocated--;
+ spin_unlock(&dev->n_qps_lock);
if (qp->ip)
kref_put(&qp->ip->ref, ipath_release_mmap_info);
@@ -1026,48 +1028,6 @@ bail:
}
/**
- * ipath_sqerror_qp - put a QP's send queue into an error state
- * @qp: QP who's send queue will be put into an error state
- * @wc: the WC responsible for putting the QP in this state
- *
- * Flushes the send work queue.
- * The QP s_lock should be held and interrupts disabled.
- */
-
-void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
-
- ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
- qp->ibqp.qp_num, qp->remote_qpn, wc->status);
-
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->timerwait))
- list_del_init(&qp->timerwait);
- if (!list_empty(&qp->piowait))
- list_del_init(&qp->piowait);
- spin_unlock(&dev->pending_lock);
-
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
-
- wc->status = IB_WC_WR_FLUSH_ERR;
-
- while (qp->s_last != qp->s_head) {
- wqe = get_swqe_ptr(qp, qp->s_last);
- wc->wr_id = wqe->wr.wr_id;
- wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- }
- qp->s_cur = qp->s_tail = qp->s_head;
- qp->state = IB_QPS_SQE;
-}
-
-/**
* ipath_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
* @aeth: the Acknowledge Extended Transport Header
@@ -1093,9 +1053,10 @@ void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
}
/* Restart sending if it was blocked due to lack of credits. */
- if (qp->s_cur != qp->s_head &&
+ if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
+ qp->s_cur != qp->s_head &&
(qp->s_lsn == (u32) -1 ||
ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
qp->s_lsn + 1) <= 0))
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
}
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index c405dfba553..108df667d2e 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -92,6 +92,10 @@ static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
u32 bth0;
u32 bth2;
+ /* Don't send an ACK if we aren't supposed to. */
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+ goto bail;
+
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
@@ -238,14 +242,25 @@ int ipath_make_rc_req(struct ipath_qp *qp)
ipath_make_rc_ack(dev, qp, ohdr, pmtu))
goto done;
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
- qp->s_rnr_timeout || qp->s_wait_credit)
- goto bail;
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= IPATH_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
- /* Limit the number of packets sent without an ACK. */
- if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
- qp->s_wait_credit = 1;
- dev->n_rc_stalls++;
+ /* Leave BUSY set until RNR timeout. */
+ if (qp->s_rnr_timeout) {
+ qp->s_flags |= IPATH_S_WAITING;
goto bail;
}
@@ -257,6 +272,9 @@ int ipath_make_rc_req(struct ipath_qp *qp)
wqe = get_swqe_ptr(qp, qp->s_cur);
switch (qp->s_state) {
default:
+ if (!(ib_ipath_state_ops[qp->state] &
+ IPATH_PROCESS_NEXT_SEND_OK))
+ goto bail;
/*
* Resend an old request or start a new one.
*
@@ -294,8 +312,10 @@ int ipath_make_rc_req(struct ipath_qp *qp)
case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */
if (qp->s_lsn != (u32) -1 &&
- ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
+ ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
goto bail;
+ }
wqe->lpsn = wqe->psn;
if (len > pmtu) {
wqe->lpsn += (len - 1) / pmtu;
@@ -325,8 +345,10 @@ int ipath_make_rc_req(struct ipath_qp *qp)
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
if (qp->s_lsn != (u32) -1 &&
- ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
+ ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
+ qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
goto bail;
+ }
ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
ohdr->u.rc.reth.rkey =
@@ -570,7 +592,11 @@ int ipath_make_rc_req(struct ipath_qp *qp)
ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
done:
ret = 1;
+ goto unlock;
+
bail:
+ qp->s_flags &= ~IPATH_S_BUSY;
+unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
@@ -606,7 +632,11 @@ static void send_rc_ack(struct ipath_qp *qp)
spin_unlock_irqrestore(&qp->s_lock, flags);
+ /* Don't try to send ACKs if the link isn't ACTIVE */
dd = dev->dd;
+ if (!(dd->ipath_flags & IPATH_LINKACTIVE))
+ goto done;
+
piobuf = ipath_getpiobuf(dd, 0, NULL);
if (!piobuf) {
/*
@@ -668,15 +698,16 @@ static void send_rc_ack(struct ipath_qp *qp)
goto done;
queue_ack:
- dev->n_rc_qacks++;
- qp->s_flags |= IPATH_S_ACK_PENDING;
- qp->s_nak_state = qp->r_nak_state;
- qp->s_ack_psn = qp->r_ack_psn;
+ if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) {
+ dev->n_rc_qacks++;
+ qp->s_flags |= IPATH_S_ACK_PENDING;
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+
+ /* Schedule the send tasklet. */
+ ipath_schedule_send(qp);
+ }
spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Call ipath_do_rc_send() in another thread. */
- tasklet_hi_schedule(&qp->s_task);
-
done:
return;
}
@@ -735,7 +766,7 @@ static void reset_psn(struct ipath_qp *qp, u32 psn)
/*
* Set the state to restart in the middle of a request.
* Don't change the s_sge, s_cur_sge, or s_cur_size.
- * See ipath_do_rc_send().
+ * See ipath_make_rc_req().
*/
switch (opcode) {
case IB_WR_SEND:
@@ -771,27 +802,14 @@ done:
*
* The QP s_lock should be held and interrupts disabled.
*/
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
+void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
{
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
struct ipath_ibdev *dev;
if (qp->s_retry == 0) {
- wc->wr_id = wqe->wr.wr_id;
- wc->status = IB_WC_RETRY_EXC_ERR;
- wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc->vendor_err = 0;
- wc->byte_len = 0;
- wc->qp = &qp->ibqp;
- wc->imm_data = 0;
- wc->src_qp = qp->remote_qpn;
- wc->wc_flags = 0;
- wc->pkey_index = 0;
- wc->slid = qp->remote_ah_attr.dlid;
- wc->sl = qp->remote_ah_attr.sl;
- wc->dlid_path_bits = 0;
- wc->port_num = 0;
- ipath_sqerror_qp(qp, wc);
+ ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
goto bail;
}
qp->s_retry--;
@@ -804,6 +822,8 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
spin_lock(&dev->pending_lock);
if (!list_empty(&qp->timerwait))
list_del_init(&qp->timerwait);
+ if (!list_empty(&qp->piowait))
+ list_del_init(&qp->piowait);
spin_unlock(&dev->pending_lock);
if (wqe->wr.opcode == IB_WR_RDMA_READ)
@@ -812,7 +832,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
reset_psn(qp, psn);
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
bail:
return;
@@ -820,13 +840,7 @@ bail:
static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
{
- if (qp->s_last_psn != psn) {
- qp->s_last_psn = psn;
- if (qp->s_wait_credit) {
- qp->s_wait_credit = 0;
- tasklet_hi_schedule(&qp->s_task);
- }
- }
+ qp->s_last_psn = psn;
}
/**
@@ -845,6 +859,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
+ enum ib_wc_status status;
struct ipath_swqe *wqe;
int ret = 0;
u32 ack_psn;
@@ -909,7 +924,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
*/
update_last_psn(qp, wqe->psn - 1);
/* Retry this request. */
- ipath_restart_rc(qp, wqe->psn, &wc);
+ ipath_restart_rc(qp, wqe->psn);
/*
* No need to process the ACK/NAK since we are
* restarting an earlier request.
@@ -925,32 +940,23 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
qp->s_num_rd_atomic--;
/* Restart sending task if fence is complete */
- if ((qp->s_flags & IPATH_S_FENCE_PENDING) &&
- !qp->s_num_rd_atomic) {
- qp->s_flags &= ~IPATH_S_FENCE_PENDING;
- tasklet_hi_schedule(&qp->s_task);
- } else if (qp->s_flags & IPATH_S_RDMAR_PENDING) {
- qp->s_flags &= ~IPATH_S_RDMAR_PENDING;
- tasklet_hi_schedule(&qp->s_task);
- }
+ if (((qp->s_flags & IPATH_S_FENCE_PENDING) &&
+ !qp->s_num_rd_atomic) ||
+ qp->s_flags & IPATH_S_RDMAR_PENDING)
+ ipath_schedule_send(qp);
}
/* Post a send completion queue entry if requested. */
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
+ memset(&wc, 0, sizeof wc);
wc.wr_id = wqe->wr.wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc.vendor_err = 0;
wc.byte_len = wqe->length;
- wc.imm_data = 0;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
}
qp->s_retry = qp->s_retry_cnt;
@@ -971,6 +977,8 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
} else {
if (++qp->s_last >= qp->s_size)
qp->s_last = 0;
+ if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur)
+ qp->s_draining = 0;
if (qp->s_last == qp->s_tail)
break;
wqe = get_swqe_ptr(qp, qp->s_last);
@@ -994,7 +1002,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
*/
if (ipath_cmp24(qp->s_psn, psn) <= 0) {
reset_psn(qp, psn + 1);
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
}
} else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
qp->s_state = OP(SEND_LAST);
@@ -1012,7 +1020,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
if (qp->s_last == qp->s_tail)
goto bail;
if (qp->s_rnr_retry == 0) {
- wc.status = IB_WC_RNR_RETRY_EXC_ERR;
+ status = IB_WC_RNR_RETRY_EXC_ERR;
goto class_b;
}
if (qp->s_rnr_retry_cnt < 7)
@@ -1033,6 +1041,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
IPATH_AETH_CREDIT_MASK];
ipath_insert_rnr_queue(qp);
+ ipath_schedule_send(qp);
goto bail;
case 3: /* NAK */
@@ -1050,37 +1059,25 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
* RDMA READ response which terminates the RDMA
* READ.
*/
- ipath_restart_rc(qp, psn, &wc);
+ ipath_restart_rc(qp, psn);
break;
case 1: /* Invalid Request */
- wc.status = IB_WC_REM_INV_REQ_ERR;
+ status = IB_WC_REM_INV_REQ_ERR;
dev->n_other_naks++;
goto class_b;
case 2: /* Remote Access Error */
- wc.status = IB_WC_REM_ACCESS_ERR;
+ status = IB_WC_REM_ACCESS_ERR;
dev->n_other_naks++;
goto class_b;
case 3: /* Remote Operation Error */
- wc.status = IB_WC_REM_OP_ERR;
+ status = IB_WC_REM_OP_ERR;
dev->n_other_naks++;
class_b:
- wc.wr_id = wqe->wr.wr_id;
- wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc.vendor_err = 0;
- wc.byte_len = 0;
- wc.qp = &qp->ibqp;
- wc.imm_data = 0;
- wc.src_qp = qp->remote_qpn;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- ipath_sqerror_qp(qp, &wc);
+ ipath_send_complete(qp, wqe, status);
+ ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
break;
default:
@@ -1126,8 +1123,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
int header_in_data)
{
struct ipath_swqe *wqe;
+ enum ib_wc_status status;
unsigned long flags;
- struct ib_wc wc;
int diff;
u32 pad;
u32 aeth;
@@ -1135,6 +1132,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this now that we hold the s_lock. */
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+ goto ack_done;
+
/* Ignore invalid responses. */
if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
goto ack_done;
@@ -1159,6 +1160,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
if (unlikely(qp->s_last == qp->s_tail))
goto ack_done;
wqe = get_swqe_ptr(qp, qp->s_last);
+ status = IB_WC_SUCCESS;
switch (opcode) {
case OP(ACKNOWLEDGE):
@@ -1187,6 +1189,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
wqe = get_swqe_ptr(qp, qp->s_last);
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
goto ack_op_err;
+ qp->r_flags &= ~IPATH_R_RDMAR_SEQ;
/*
* If this is a response to a resent RDMA read, we
* have to be careful to copy the data to the right
@@ -1200,7 +1203,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
/* no AETH, no ACK */
if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
dev->n_rdma_seq++;
- ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
+ if (qp->r_flags & IPATH_R_RDMAR_SEQ)
+ goto ack_done;
+ qp->r_flags |= IPATH_R_RDMAR_SEQ;
+ ipath_restart_rc(qp, qp->s_last_psn + 1);
goto ack_done;
}
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
@@ -1261,7 +1267,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
/* ACKs READ req. */
if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
dev->n_rdma_seq++;
- ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
+ if (qp->r_flags & IPATH_R_RDMAR_SEQ)
+ goto ack_done;
+ qp->r_flags |= IPATH_R_RDMAR_SEQ;
+ ipath_restart_rc(qp, qp->s_last_psn + 1);
goto ack_done;
}
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
@@ -1291,31 +1300,16 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
goto ack_done;
}
-ack_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- goto bail;
-
ack_op_err:
- wc.status = IB_WC_LOC_QP_OP_ERR;
+ status = IB_WC_LOC_QP_OP_ERR;
goto ack_err;
ack_len_err:
- wc.status = IB_WC_LOC_LEN_ERR;
+ status = IB_WC_LOC_LEN_ERR;
ack_err:
- wc.wr_id = wqe->wr.wr_id;
- wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc.vendor_err = 0;
- wc.byte_len = 0;
- wc.imm_data = 0;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- ipath_sqerror_qp(qp, &wc);
+ ipath_send_complete(qp, wqe, status);
+ ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ack_done:
spin_unlock_irqrestore(&qp->s_lock, flags);
bail:
return;
@@ -1384,7 +1378,12 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
psn &= IPATH_PSN_MASK;
e = NULL;
old_req = 1;
+
spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this now that we hold the s_lock. */
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+ goto unlock_done;
+
for (i = qp->r_head_ack_queue; ; i = prev) {
if (i == qp->s_tail_ack_queue)
old_req = 0;
@@ -1512,7 +1511,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
break;
}
qp->r_nak_state = 0;
- tasklet_hi_schedule(&qp->s_task);
+ ipath_schedule_send(qp);
unlock_done:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1523,13 +1522,12 @@ send_ack:
return 0;
}
-static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
+void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
{
unsigned long flags;
int lastwqe;
spin_lock_irqsave(&qp->s_lock, flags);
- qp->state = IB_QPS_ERR;
lastwqe = ipath_error_qp(qp, err);
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1545,18 +1543,15 @@ static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
{
- unsigned long flags;
unsigned next;
next = n + 1;
if (next > IPATH_MAX_RDMA_ATOMIC)
next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
if (n == qp->s_tail_ack_queue) {
qp->s_tail_ack_queue = next;
qp->s_ack_state = OP(ACKNOWLEDGE);
}
- spin_unlock_irqrestore(&qp->s_lock, flags);
}
/**
@@ -1585,6 +1580,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
int diff;
struct ib_reth *reth;
int header_in_data;
+ unsigned long flags;
/* Validate the SLID. See Ch. 9.6.1.5 */
if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
@@ -1643,11 +1639,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
opcode == OP(SEND_LAST) ||
opcode == OP(SEND_LAST_WITH_IMMEDIATE))
break;
- nack_inv:
- ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
- qp->r_nak_state = IB_NAK_INVALID_REQUEST;
- qp->r_ack_psn = qp->r_psn;
- goto send_ack;
+ goto nack_inv;
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_MIDDLE):
@@ -1673,18 +1665,13 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
break;
}
- wc.imm_data = 0;
- wc.wc_flags = 0;
+ memset(&wc, 0, sizeof wc);
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
- if (!ipath_get_rwqe(qp, 0)) {
- rnr_nak:
- qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
- qp->r_ack_psn = qp->r_psn;
- goto send_ack;
- }
+ if (!ipath_get_rwqe(qp, 0))
+ goto rnr_nak;
qp->r_rcv_len = 0;
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
@@ -1741,20 +1728,19 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
goto nack_inv;
ipath_copy_sge(&qp->r_sge, data, tlen);
qp->r_msn++;
- if (!qp->r_wrid_valid)
+ if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
break;
- qp->r_wrid_valid = 0;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
+ if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
@@ -1815,9 +1801,13 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
next = qp->r_head_ack_queue + 1;
if (next > IPATH_MAX_RDMA_ATOMIC)
next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this while holding the s_lock. */
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+ goto unlock;
if (unlikely(next == qp->s_tail_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
- goto nack_inv;
+ goto nack_inv_unlck;
ipath_update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
@@ -1838,7 +1828,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
rkey, IB_ACCESS_REMOTE_READ);
if (unlikely(!ok))
- goto nack_acc;
+ goto nack_acc_unlck;
/*
* Update the next expected PSN. We add 1 later
* below, so only add the remainder here.
@@ -1865,13 +1855,12 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
qp->r_psn++;
qp->r_state = opcode;
qp->r_nak_state = 0;
- barrier();
qp->r_head_ack_queue = next;
- /* Call ipath_do_rc_send() in another thread. */
- tasklet_hi_schedule(&qp->s_task);
+ /* Schedule the send tasklet. */
+ ipath_schedule_send(qp);
- goto done;
+ goto unlock;
}
case OP(COMPARE_SWAP):
@@ -1890,9 +1879,13 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
next = qp->r_head_ack_queue + 1;
if (next > IPATH_MAX_RDMA_ATOMIC)
next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ /* Double check we can process this while holding the s_lock. */
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
+ goto unlock;
if (unlikely(next == qp->s_tail_ack_queue)) {
if (!qp->s_ack_queue[next].sent)
- goto nack_inv;
+ goto nack_inv_unlck;
ipath_update_ack_queue(qp, next);
}
if (!header_in_data)
@@ -1902,13 +1895,13 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
be32_to_cpu(ateth->vaddr[1]);
if (unlikely(vaddr & (sizeof(u64) - 1)))
- goto nack_inv;
+ goto nack_inv_unlck;
rkey = be32_to_cpu(ateth->rkey);
/* Check rkey & NAK */
if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
sizeof(u64), vaddr, rkey,
IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_acc;
+ goto nack_acc_unlck;
/* Perform atomic OP and save result. */
maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
sdata = be64_to_cpu(ateth->swap_data);
@@ -1925,13 +1918,12 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
qp->r_psn++;
qp->r_state = opcode;
qp->r_nak_state = 0;
- barrier();
qp->r_head_ack_queue = next;
- /* Call ipath_do_rc_send() in another thread. */
- tasklet_hi_schedule(&qp->s_task);
+ /* Schedule the send tasklet. */
+ ipath_schedule_send(qp);
- goto done;
+ goto unlock;
}
default:
@@ -1947,14 +1939,31 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
goto send_ack;
goto done;
+rnr_nak:
+ qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+ qp->r_ack_psn = qp->r_psn;
+ goto send_ack;
+
+nack_inv_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+nack_inv:
+ ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+ qp->r_nak_state = IB_NAK_INVALID_REQUEST;
+ qp->r_ack_psn = qp->r_psn;
+ goto send_ack;
+
+nack_acc_unlck:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
nack_acc:
- ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
+ ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
qp->r_ack_psn = qp->r_psn;
-
send_ack:
send_rc_ack(qp);
+ goto done;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
done:
return;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 8ac5c1d82cc..a4b5521567f 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -78,6 +78,7 @@ const u32 ib_ipath_rnr_table[32] = {
* ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
* @qp: the QP
*
+ * Called with the QP s_lock held and interrupts disabled.
* XXX Use a simple list for now. We might need a priority
* queue if we have lots of QPs waiting for RNR timeouts
* but that should be rare.
@@ -85,9 +86,9 @@ const u32 ib_ipath_rnr_table[32] = {
void ipath_insert_rnr_queue(struct ipath_qp *qp)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- unsigned long flags;
- spin_lock_irqsave(&dev->pending_lock, flags);
+ /* We already did a spin_lock_irqsave(), so just use spin_lock */
+ spin_lock(&dev->pending_lock);
if (list_empty(&dev->rnrwait))
list_add(&qp->timerwait, &dev->rnrwait);
else {
@@ -109,7 +110,7 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
nqp->s_rnr_timeout -= qp->s_rnr_timeout;
list_add(&qp->timerwait, l);
}
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock(&dev->pending_lock);
}
/**
@@ -140,20 +141,11 @@ int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
goto bail;
bad_lkey:
+ memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr_id;
wc.status = IB_WC_LOC_PROT_ERR;
wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
- wc.byte_len = 0;
- wc.imm_data = 0;
wc.qp = &qp->ibqp;
- wc.src_qp = 0;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
- wc.slid = 0;
- wc.sl = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
/* Signal solicited completion event. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
ret = 0;
@@ -194,6 +186,11 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
}
spin_lock_irqsave(&rq->lock, flags);
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
+ ret = 0;
+ goto unlock;
+ }
+
wq = rq->wq;
tail = wq->tail;
/* Validate tail before using it since it is user writable. */
@@ -201,9 +198,8 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
tail = 0;
do {
if (unlikely(tail == wq->head)) {
- spin_unlock_irqrestore(&rq->lock, flags);
ret = 0;
- goto bail;
+ goto unlock;
}
/* Make sure entry is read after head index is read. */
smp_rmb();
@@ -216,7 +212,7 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
wq->tail = tail;
ret = 1;
- qp->r_wrid_valid = 1;
+ set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);
if (handler) {
u32 n;
@@ -243,8 +239,8 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
goto bail;
}
}
+unlock:
spin_unlock_irqrestore(&rq->lock, flags);
-
bail:
return ret;
}
@@ -270,38 +266,63 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
struct ib_wc wc;
u64 sdata;
atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
- if (!qp) {
- dev->n_pkt_drops++;
- return;
- }
-again:
spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) ||
- sqp->s_rnr_timeout) {
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- goto done;
- }
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
+ !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
- /* Get the next send request. */
- if (sqp->s_last == sqp->s_head) {
- /* Send work queue is empty. */
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- goto done;
+ sqp->s_flags |= IPATH_S_BUSY;
+
+again:
+ if (sqp->s_last == sqp->s_head)
+ goto clr_busy;
+ wqe = get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work reqeust. */
+ if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
}
/*
* We can rely on the entry not changing without the s_lock
* being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
*/
- wqe = get_swqe_ptr(sqp, sqp->s_last);
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
spin_unlock_irqrestore(&sqp->s_lock, flags);
- wc.wc_flags = 0;
- wc.imm_data = 0;
+ if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
+ dev->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ if (sqp->ibqp.qp_type == IB_QPT_RC)
+ send_status = IB_WC_RETRY_EXC_ERR;
+ else
+ send_status = IB_WC_SUCCESS;
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof wc);
+ send_status = IB_WC_SUCCESS;
sqp->s_sge.sge = wqe->sg_list[0];
sqp->s_sge.sg_list = wqe->sg_list + 1;
@@ -313,75 +334,33 @@ again:
wc.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
- if (!ipath_get_rwqe(qp, 0)) {
- rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- if (sqp->s_rnr_retry == 0) {
- wc.status = IB_WC_RNR_RETRY_EXC_ERR;
- goto err;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- dev->n_rnr_naks++;
- sqp->s_rnr_timeout =
- ib_ipath_rnr_table[qp->r_min_rnr_timer];
- ipath_insert_rnr_queue(sqp);
- goto done;
- }
+ if (!ipath_get_rwqe(qp, 0))
+ goto rnr_nak;
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE))) {
- wc.status = IB_WC_REM_INV_REQ_ERR;
- goto err;
- }
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.imm_data = wqe->wr.ex.imm_data;
if (!ipath_get_rwqe(qp, 1))
goto rnr_nak;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE:
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE))) {
- wc.status = IB_WC_REM_INV_REQ_ERR;
- goto err;
- }
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
if (wqe->length == 0)
break;
if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
- IB_ACCESS_REMOTE_WRITE))) {
- acc_err:
- wc.status = IB_WC_REM_ACCESS_ERR;
- err:
- wc.wr_id = wqe->wr.wr_id;
- wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc.vendor_err = 0;
- wc.byte_len = 0;
- wc.qp = &sqp->ibqp;
- wc.src_qp = sqp->remote_qpn;
- wc.pkey_index = 0;
- wc.slid = sqp->remote_ah_attr.dlid;
- wc.sl = sqp->remote_ah_attr.sl;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- spin_lock_irqsave(&sqp->s_lock, flags);
- ipath_sqerror_qp(sqp, &wc);
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- goto done;
- }
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
break;
case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_READ))) {
- wc.status = IB_WC_REM_INV_REQ_ERR;
- goto err;
- }
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
@@ -394,11 +373,8 @@ again:
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_ATOMIC))) {
- wc.status = IB_WC_REM_INV_REQ_ERR;
- goto err;
- }
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
wqe->wr.wr.atomic.remote_addr,
wqe->wr.wr.atomic.rkey,
@@ -415,7 +391,8 @@ again:
goto send_comp;
default:
- goto done;
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
}
sge = &sqp->s_sge.sge;
@@ -448,8 +425,7 @@ again:
sqp->s_len -= len;
}
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
- wqe->wr.opcode == IB_WR_RDMA_READ)
+ if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
@@ -458,32 +434,89 @@ again:
wc.opcode = IB_WC_RECV;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
- wc.vendor_err = 0;
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
- wc.dlid_path_bits = 0;
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
wqe->wr.send_flags & IB_SEND_SOLICITED);
send_comp:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- ipath_send_complete(sqp, wqe, IB_WC_SUCCESS);
+ ipath_send_complete(sqp, wqe, send_status);
goto again;
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
+ goto clr_busy;
+ sqp->s_flags |= IPATH_S_WAITING;
+ dev->n_rnr_naks++;
+ sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
+ ipath_insert_rnr_queue(sqp);
+ goto clr_busy;
+
+inv_err:
+ send_status = IB_WC_REM_INV_REQ_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ ipath_rc_error(qp, wc.status);
+
+serr:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ ipath_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+ sqp->s_flags &= ~IPATH_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~IPATH_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
done:
- if (atomic_dec_and_test(&qp->refcount))
+ if (qp && atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
-static void want_buffer(struct ipath_devdata *dd)
+static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
{
- if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) {
+ if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
+ qp->ibqp.qp_type == IB_QPT_SMI) {
unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
@@ -501,26 +534,36 @@ static void want_buffer(struct ipath_devdata *dd)
* @dev: the device we ran out of buffers on
*
* Called when we run out of PIO buffers.
+ * If we are now in the error state, return zero to flush the
+ * send work request.
*/
-static void ipath_no_bufs_available(struct ipath_qp *qp,
+static int ipath_no_bufs_available(struct ipath_qp *qp,
struct ipath_ibdev *dev)
{
unsigned long flags;
+ int ret = 1;
/*
* Note that as soon as want_buffer() is called and
* possibly before it returns, ipath_ib_piobufavail()
- * could be called. If we are still in the tasklet function,
- * tasklet_hi_schedule() will not call us until the next time
- * tasklet_hi_schedule() is called.
- * We leave the busy flag set so that another post send doesn't
- * try to put the same QP on the piowait list again.
+ * could be called. Therefore, put QP on the piowait list before
+ * enabling the PIO avail interrupt.
*/
- spin_lock_irqsave(&dev->pending_lock, flags);
- list_add_tail(&qp->piowait, &dev->piowait);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
- want_buffer(dev->dd);
- dev->n_piowait++;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
+ dev->n_piowait++;
+ qp->s_flags |= IPATH_S_WAITING;
+ qp->s_flags &= ~IPATH_S_BUSY;
+ spin_lock(&dev->pending_lock);
+ if (list_empty(&qp->piowait))
+ list_add_tail(&qp->piowait, &dev->piowait);
+ spin_unlock(&dev->pending_lock);
+ } else
+ ret = 0;
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (ret)
+ want_buffer(dev->dd, qp);
+ return ret;
}
/**
@@ -596,15 +639,13 @@ void ipath_do_send(unsigned long data)
struct ipath_qp *qp = (struct ipath_qp *)data;
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
int (*make_req)(struct ipath_qp *qp);
-
- if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy))
- goto bail;
+ unsigned long flags;
if ((qp->ibqp.qp_type == IB_QPT_RC ||
qp->ibqp.qp_type == IB_QPT_UC) &&
qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
ipath_ruc_loopback(qp);
- goto clear;
+ goto bail;
}
if (qp->ibqp.qp_type == IB_QPT_RC)
@@ -614,6 +655,19 @@ void ipath_do_send(unsigned long data)
else
make_req = ipath_make_ud_req;
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
+ !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ goto bail;
+ }
+
+ qp->s_flags |= IPATH_S_BUSY;
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
again:
/* Check for a constructed packet to be sent. */
if (qp->s_hdrwords != 0) {
@@ -623,8 +677,8 @@ again:
*/
if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
qp->s_cur_sge, qp->s_cur_size)) {
- ipath_no_bufs_available(qp, dev);
- goto bail;
+ if (ipath_no_bufs_available(qp, dev))
+ goto bail;
}
dev->n_unicast_xmit++;
/* Record that we sent the packet and s_hdr is empty. */
@@ -633,16 +687,20 @@ again:
if (make_req(qp))
goto again;
-clear:
- clear_bit(IPATH_S_BUSY, &qp->s_busy);
+
bail:;
}
+/*
+ * This should be called with s_lock held.
+ */
void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
enum ib_wc_status status)
{
- unsigned long flags;
- u32 last;
+ u32 old_last, last;
+
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
+ return;
/* See ch. 11.2.4.1 and 10.7.3.1 */
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
@@ -650,27 +708,25 @@ void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
status != IB_WC_SUCCESS) {
struct ib_wc wc;
+ memset(&wc, 0, sizeof wc);
wc.wr_id = wqe->wr.wr_id;
wc.status = status;
wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- wc.vendor_err = 0;
- wc.byte_len = wqe->length;
- wc.imm_data = 0;
wc.qp = &qp->ibqp;
- wc.src_qp = 0;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
- wc.slid = 0;
- wc.sl = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ if (status == IB_WC_SUCCESS)
+ wc.byte_len = wqe->length;
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+ status != IB_WC_SUCCESS);
}
- spin_lock_irqsave(&qp->s_lock, flags);
- last = qp->s_last;
+ old_last = last = qp->s_last;
if (++last >= qp->s_size)
last = 0;
qp->s_last = last;
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index 1974df7a9f7..3697449c1ba 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -308,13 +308,15 @@ static void sdma_abort_task(unsigned long opaque)
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
/*
- * Don't restart sdma here. Wait until link is up to ACTIVE.
- * VL15 MADs used to bring the link up use PIO, and multiple
- * link transitions otherwise cause the sdma engine to be
+ * Don't restart sdma here (with the exception
+ * below). Wait until link is up to ACTIVE. VL15 MADs
+ * used to bring the link up use PIO, and multiple link
+ * transitions otherwise cause the sdma engine to be
* stopped and started multiple times.
- * The disable is done here, including the shadow, so the
- * state is kept consistent.
- * See ipath_restart_sdma() for the actual starting of sdma.
+ * The disable is done here, including the shadow,
+ * so the state is kept consistent.
+ * See ipath_restart_sdma() for the actual starting
+ * of sdma.
*/
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
@@ -326,6 +328,13 @@ static void sdma_abort_task(unsigned long opaque)
/* make sure I see next message */
dd->ipath_sdma_abort_jiffies = 0;
+ /*
+ * Not everything that takes SDMA offline is a link
+ * status change. If the link was up, restart SDMA.
+ */
+ if (dd->ipath_flags & IPATH_LINKACTIVE)
+ ipath_restart_sdma(dd);
+
goto done;
}
@@ -427,7 +436,12 @@ int setup_sdma(struct ipath_devdata *dd)
goto done;
}
- dd->ipath_sdma_status = 0;
+ /*
+ * Set initial status as if we had been up, then gone down.
+ * This lets initial start on transition to ACTIVE be the
+ * same as restart after link flap.
+ */
+ dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
dd->ipath_sdma_abort_jiffies = 0;
dd->ipath_sdma_generation = 0;
dd->ipath_sdma_descq_tail = 0;
@@ -449,16 +463,19 @@ int setup_sdma(struct ipath_devdata *dd)
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
dd->ipath_sdma_head_phys);
- /* Reserve all the former "kernel" piobufs */
- n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved;
- for (i = dd->ipath_lastport_piobuf; i < n; ++i) {
+ /*
+ * Reserve all the former "kernel" piobufs, using high number range
+ * so we get as many 4K buffers as possible
+ */
+ n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
+ i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
+ ipath_chg_pioavailkernel(dd, i, n - i , 0);
+ for (; i < n; ++i) {
unsigned word = i / 64;
unsigned bit = i & 63;
BUG_ON(word >= 3);
senddmabufmask[word] |= 1ULL << bit;
}
- ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
- n - dd->ipath_lastport_piobuf, 0);
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
senddmabufmask[0]);
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
@@ -615,6 +632,9 @@ void ipath_restart_sdma(struct ipath_devdata *dd)
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
+ /* notify upper layers */
+ ipath_ib_piobufavail(dd->verbs_dev);
+
bail:
return;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index bfe8926b551..7fd18e83390 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -47,14 +47,30 @@ int ipath_make_uc_req(struct ipath_qp *qp)
{
struct ipath_other_headers *ohdr;
struct ipath_swqe *wqe;
+ unsigned long flags;
u32 hwords;
u32 bth0;
u32 len;
u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
int ret = 0;
- if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= IPATH_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
+ }
ohdr = &qp->s_hdr.u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
@@ -69,9 +85,12 @@ int ipath_make_uc_req(struct ipath_qp *qp)
qp->s_wqe = NULL;
switch (qp->s_state) {
default:
+ if (!(ib_ipath_state_ops[qp->state] &
+ IPATH_PROCESS_NEXT_SEND_OK))
+ goto bail;
/* Check if send work queue is empty. */
if (qp->s_cur == qp->s_head)
- goto done;
+ goto bail;
/*
* Start a new request.
*/
@@ -134,7 +153,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
break;
default:
- goto done;
+ goto bail;
}
break;
@@ -194,9 +213,14 @@ int ipath_make_uc_req(struct ipath_qp *qp)
ipath_make_ruc_header(to_idev(qp->ibqp.device),
qp, ohdr, bth0 | (qp->s_state << 24),
qp->s_next_psn++ & IPATH_PSN_MASK);
+done:
ret = 1;
+ goto unlock;
-done:
+bail:
+ qp->s_flags &= ~IPATH_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
@@ -258,8 +282,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
*/
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- wc.imm_data = 0;
- wc.wc_flags = 0;
+ memset(&wc, 0, sizeof wc);
/* Compare the PSN verses the expected PSN. */
if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
@@ -322,8 +345,8 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first:
- if (qp->r_reuse_sge) {
- qp->r_reuse_sge = 0;
+ if (qp->r_flags & IPATH_R_REUSE_SGE) {
+ qp->r_flags &= ~IPATH_R_REUSE_SGE;
qp->r_sge = qp->s_rdma_read_sge;
} else if (!ipath_get_rwqe(qp, 0)) {
dev->n_pkt_drops++;
@@ -340,13 +363,13 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
case OP(SEND_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4))) {
- qp->r_reuse_sge = 1;
+ qp->r_flags |= IPATH_R_REUSE_SGE;
dev->n_pkt_drops++;
goto done;
}
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len)) {
- qp->r_reuse_sge = 1;
+ qp->r_flags |= IPATH_R_REUSE_SGE;
dev->n_pkt_drops++;
goto done;
}
@@ -372,7 +395,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4))) {
- qp->r_reuse_sge = 1;
+ qp->r_flags |= IPATH_R_REUSE_SGE;
dev->n_pkt_drops++;
goto done;
}
@@ -380,7 +403,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
tlen -= (hdrsize + pad + 4);
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_reuse_sge = 1;
+ qp->r_flags |= IPATH_R_REUSE_SGE;
dev->n_pkt_drops++;
goto done;
}
@@ -390,14 +413,10 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.pkey_index = 0;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
@@ -488,8 +507,8 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
dev->n_pkt_drops++;
goto done;
}
- if (qp->r_reuse_sge)
- qp->r_reuse_sge = 0;
+ if (qp->r_flags & IPATH_R_REUSE_SGE)
+ qp->r_flags &= ~IPATH_R_REUSE_SGE;
else if (!ipath_get_rwqe(qp, 1)) {
dev->n_pkt_drops++;
goto done;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 8b6a261c89e..77ca8ca74e7 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -65,9 +65,9 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
u32 length;
qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn);
- if (!qp) {
+ if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
dev->n_pkt_drops++;
- goto send_comp;
+ goto done;
}
rsge.sg_list = NULL;
@@ -91,14 +91,12 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
* present on the wire.
*/
length = swqe->length;
+ memset(&wc, 0, sizeof wc);
wc.byte_len = length + sizeof(struct ib_grh);
if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
wc.wc_flags = IB_WC_WITH_IMM;
wc.imm_data = swqe->wr.ex.imm_data;
- } else {
- wc.wc_flags = 0;
- wc.imm_data = 0;
}
/*
@@ -229,7 +227,6 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
}
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
wc.qp = &qp->ibqp;
wc.src_qp = sqp->ibqp.qp_num;
/* XXX do we know which pkey matched? Only needed for GSI. */
@@ -248,8 +245,7 @@ drop:
kfree(rsge.sg_list);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
-send_comp:
- ipath_send_complete(sqp, swqe, IB_WC_SUCCESS);
+done:;
}
/**
@@ -264,6 +260,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
struct ipath_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct ipath_swqe *wqe;
+ unsigned long flags;
u32 nwords;
u32 extra_bytes;
u32 bth0;
@@ -271,13 +268,30 @@ int ipath_make_ud_req(struct ipath_qp *qp)
u16 lid;
int ret = 0;
- if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)))
- goto bail;
+ spin_lock_irqsave(&qp->s_lock, flags);
+
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
+ goto bail;
+ /* We are in the error state, flush the work request. */
+ if (qp->s_last == qp->s_head)
+ goto bail;
+ /* If DMAs are in progress, we can't flush immediately. */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= IPATH_S_WAIT_DMA;
+ goto bail;
+ }
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ goto done;
+ }
if (qp->s_cur == qp->s_head)
goto bail;
wqe = get_swqe_ptr(qp, qp->s_cur);
+ if (++qp->s_cur >= qp->s_size)
+ qp->s_cur = 0;
/* Construct the header. */
ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
@@ -288,10 +302,23 @@ int ipath_make_ud_req(struct ipath_qp *qp)
dev->n_unicast_xmit++;
} else {
dev->n_unicast_xmit++;
- lid = ah_attr->dlid &
- ~((1 << dev->dd->ipath_lmc) - 1);
+ lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1);
if (unlikely(lid == dev->dd->ipath_lid)) {
+ /*
+ * If DMAs are in progress, we can't generate
+ * a completion for the loopback packet since
+ * it would be out of order.
+ * XXX Instead of waiting, we could queue a
+ * zero length descriptor so we get a callback.
+ */
+ if (atomic_read(&qp->s_dma_busy)) {
+ qp->s_flags |= IPATH_S_WAIT_DMA;
+ goto bail;
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
ipath_ud_loopback(qp, wqe);
+ spin_lock_irqsave(&qp->s_lock, flags);
+ ipath_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done;
}
}
@@ -368,11 +395,13 @@ int ipath_make_ud_req(struct ipath_qp *qp)
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
done:
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
ret = 1;
+ goto unlock;
bail:
+ qp->s_flags &= ~IPATH_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
@@ -506,8 +535,8 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_reuse_sge)
- qp->r_reuse_sge = 0;
+ if (qp->r_flags & IPATH_R_REUSE_SGE)
+ qp->r_flags &= ~IPATH_R_REUSE_SGE;
else if (!ipath_get_rwqe(qp, 0)) {
/*
* Count VL15 packets dropped due to no receive buffer.
@@ -523,7 +552,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
}
/* Silently drop packets which are too big. */
if (wc.byte_len > qp->r_len) {
- qp->r_reuse_sge = 1;
+ qp->r_flags |= IPATH_R_REUSE_SGE;
dev->n_pkt_drops++;
goto bail;
}
@@ -535,7 +564,8 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
ipath_copy_sge(&qp->r_sge, data,
wc.byte_len - sizeof(struct ib_grh));
- qp->r_wrid_valid = 0;
+ if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
+ goto bail;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.h b/drivers/infiniband/hw/ipath/ipath_user_sdma.h
index e70946c1428..fc76316c4a5 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.h
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.h
@@ -45,8 +45,6 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd,
int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
struct ipath_user_sdma_queue *pq);
-int ipath_user_sdma_pkt_sent(const struct ipath_user_sdma_queue *pq,
- u32 counter);
void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
struct ipath_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index e63927cce5b..e0ec540042b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -111,16 +111,24 @@ static unsigned int ib_ipath_disable_sma;
module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA");
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; ipath_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = 0,
[IB_QPS_INIT] = IPATH_POST_RECV_OK,
[IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
[IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
- IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
+ IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK |
+ IPATH_PROCESS_NEXT_SEND_OK,
[IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
- IPATH_POST_SEND_OK,
- [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
- [IB_QPS_ERR] = 0,
+ IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
+ [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
+ IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
+ [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV |
+ IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
};
struct ipath_ucontext {
@@ -230,18 +238,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
}
}
-static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
-{
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wr->wr_id;
- wc.status = IB_WC_WR_FLUSH_ERR;
- wc.opcode = ib_ipath_wc_opcode[wr->opcode];
- wc.qp = &qp->ibqp;
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
-}
-
/*
* Count the number of DMA descriptors needed to send length bytes of data.
* Don't modify the ipath_sge_state to get the count.
@@ -347,14 +343,8 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
spin_lock_irqsave(&qp->s_lock, flags);
/* Check that state is OK to post send. */
- if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) {
- if (qp->state != IB_QPS_SQE && qp->state != IB_QPS_ERR)
- goto bail_inval;
- /* C10-96 says generate a flushed completion entry. */
- ipath_flush_wqe(qp, wr);
- ret = 0;
- goto bail;
- }
+ if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
+ goto bail_inval;
/* IB spec says that num_sge == 0 is OK. */
if (wr->num_sge > qp->s_max_sge)
@@ -396,7 +386,6 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
wqe = get_swqe_ptr(qp, qp->s_head);
wqe->wr = *wr;
- wqe->ssn = qp->s_ssn++;
wqe->length = 0;
if (wr->num_sge) {
acc = wr->opcode >= IB_WR_RDMA_READ ?
@@ -422,6 +411,7 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
goto bail_inval;
} else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
goto bail_inval;
+ wqe->ssn = qp->s_ssn++;
qp->s_head = next;
ret = 0;
@@ -677,6 +667,7 @@ bail:;
static void ipath_ib_timer(struct ipath_ibdev *dev)
{
struct ipath_qp *resend = NULL;
+ struct ipath_qp *rnr = NULL;
struct list_head *last;
struct ipath_qp *qp;
unsigned long flags;
@@ -703,7 +694,9 @@ static void ipath_ib_timer(struct ipath_ibdev *dev)
if (--qp->s_rnr_timeout == 0) {
do {
list_del_init(&qp->timerwait);
- tasklet_hi_schedule(&qp->s_task);
+ qp->timer_next = rnr;
+ rnr = qp;
+ atomic_inc(&qp->refcount);
if (list_empty(last))
break;
qp = list_entry(last->next, struct ipath_qp,
@@ -743,13 +736,15 @@ static void ipath_ib_timer(struct ipath_ibdev *dev)
spin_unlock_irqrestore(&dev->pending_lock, flags);
/* XXX What if timer fires again while this is running? */
- for (qp = resend; qp != NULL; qp = qp->timer_next) {
- struct ib_wc wc;
+ while (resend != NULL) {
+ qp = resend;
+ resend = qp->timer_next;
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) {
+ if (qp->s_last != qp->s_tail &&
+ ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
dev->n_timeouts++;
- ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
+ ipath_restart_rc(qp, qp->s_last_psn + 1);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -757,6 +752,19 @@ static void ipath_ib_timer(struct ipath_ibdev *dev)
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
+ while (rnr != NULL) {
+ qp = rnr;
+ rnr = qp->timer_next;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
+ ipath_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Notify ipath_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
}
static void update_sge(struct ipath_sge_state *ss, u32 length)
@@ -1012,13 +1020,24 @@ static void sdma_complete(void *cookie, int status)
struct ipath_verbs_txreq *tx = cookie;
struct ipath_qp *qp = tx->qp;
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ unsigned int flags;
+ enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
+ IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
- /* Generate a completion queue entry if needed */
- if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) {
- enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
-
+ if (atomic_dec_and_test(&qp->s_dma_busy)) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (tx->wqe)
+ ipath_send_complete(qp, tx->wqe, ibs);
+ if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
+ qp->s_last != qp->s_head) ||
+ (qp->s_flags & IPATH_S_WAIT_DMA))
+ ipath_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ wake_up(&qp->wait_dma);
+ } else if (tx->wqe) {
+ spin_lock_irqsave(&qp->s_lock, flags);
ipath_send_complete(qp, tx->wqe, ibs);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
}
if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
@@ -1029,6 +1048,21 @@ static void sdma_complete(void *cookie, int status)
wake_up(&qp->wait);
}
+static void decrement_dma_busy(struct ipath_qp *qp)
+{
+ unsigned int flags;
+
+ if (atomic_dec_and_test(&qp->s_dma_busy)) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
+ qp->s_last != qp->s_head) ||
+ (qp->s_flags & IPATH_S_WAIT_DMA))
+ ipath_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ wake_up(&qp->wait_dma);
+ }
+}
+
/*
* Compute the number of clock cycles of delay before sending the next packet.
* The multipliers reflect the number of clocks for the fastest rate so
@@ -1067,9 +1101,12 @@ static int ipath_verbs_send_dma(struct ipath_qp *qp,
if (tx) {
qp->s_tx = NULL;
/* resend previously constructed packet */
+ atomic_inc(&qp->s_dma_busy);
ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
- if (ret)
+ if (ret) {
qp->s_tx = tx;
+ decrement_dma_busy(qp);
+ }
goto bail;
}
@@ -1120,12 +1157,14 @@ static int ipath_verbs_send_dma(struct ipath_qp *qp,
tx->txreq.sg_count = ndesc;
tx->map_len = (hdrwords + 2) << 2;
tx->txreq.map_addr = &tx->hdr;
+ atomic_inc(&qp->s_dma_busy);
ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
if (ret) {
/* save ss and length in dwords */
tx->ss = ss;
tx->len = dwords;
qp->s_tx = tx;
+ decrement_dma_busy(qp);
}
goto bail;
}
@@ -1146,6 +1185,7 @@ static int ipath_verbs_send_dma(struct ipath_qp *qp,
memcpy(piobuf, hdr, hdrwords << 2);
ipath_copy_from_sge(piobuf + hdrwords, ss, len);
+ atomic_inc(&qp->s_dma_busy);
ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
/*
* If we couldn't queue the DMA request, save the info
@@ -1156,6 +1196,7 @@ static int ipath_verbs_send_dma(struct ipath_qp *qp,
tx->ss = NULL;
tx->len = 0;
qp->s_tx = tx;
+ decrement_dma_busy(qp);
}
dev->n_unaligned++;
goto bail;
@@ -1179,6 +1220,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp,
unsigned flush_wc;
u32 control;
int ret;
+ unsigned int flags;
piobuf = ipath_getpiobuf(dd, plen, NULL);
if (unlikely(piobuf == NULL)) {
@@ -1249,8 +1291,11 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp,
}
copy_io(piobuf, ss, len, flush_wc);
done:
- if (qp->s_wqe)
+ if (qp->s_wqe) {
+ spin_lock_irqsave(&qp->s_lock, flags);
ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ }
ret = 0;
bail:
return ret;
@@ -1283,19 +1328,12 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
* can defer SDMA restart until link goes ACTIVE without
* worrying about just how we got there.
*/
- if (qp->ibqp.qp_type == IB_QPT_SMI)
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ !(dd->ipath_flags & IPATH_HAS_SEND_DMA))
ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
plen, dwords);
- /* All non-VL15 packets are dropped if link is not ACTIVE */
- else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) {
- if (qp->s_wqe)
- ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
- ret = 0;
- } else if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
- ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
- plen, dwords);
else
- ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
+ ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
plen, dwords);
return ret;
@@ -1403,27 +1441,46 @@ bail:
* This is called from ipath_intr() at interrupt level when a PIO buffer is
* available after ipath_verbs_send() returned an error that no buffers were
* available. Return 1 if we consumed all the PIO buffers and we still have
- * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
+ * QPs waiting for buffers (for now, just restart the send tasklet and
* return zero).
*/
int ipath_ib_piobufavail(struct ipath_ibdev *dev)
{
+ struct list_head *list;
+ struct ipath_qp *qplist;
struct ipath_qp *qp;
unsigned long flags;
if (dev == NULL)
goto bail;
+ list = &dev->piowait;
+ qplist = NULL;
+
spin_lock_irqsave(&dev->pending_lock, flags);
- while (!list_empty(&dev->piowait)) {
- qp = list_entry(dev->piowait.next, struct ipath_qp,
- piowait);
+ while (!list_empty(list)) {
+ qp = list_entry(list->next, struct ipath_qp, piowait);
list_del_init(&qp->piowait);
- clear_bit(IPATH_S_BUSY, &qp->s_busy);
- tasklet_hi_schedule(&qp->s_task);
+ qp->pio_next = qplist;
+ qplist = qp;
+ atomic_inc(&qp->refcount);
}
spin_unlock_irqrestore(&dev->pending_lock, flags);
+ while (qplist != NULL) {
+ qp = qplist;
+ qplist = qp->pio_next;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
+ ipath_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+
+ /* Notify ipath_destroy_qp() if it is waiting. */
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+
bail:
return 0;
}
@@ -2145,11 +2202,12 @@ bail:
void ipath_unregister_ib_device(struct ipath_ibdev *dev)
{
struct ib_device *ibdev = &dev->ibdev;
-
- disable_timer(dev->dd);
+ u32 qps_inuse;
ib_unregister_device(ibdev);
+ disable_timer(dev->dd);
+
if (!list_empty(&dev->pending[0]) ||
!list_empty(&dev->pending[1]) ||
!list_empty(&dev->pending[2]))
@@ -2164,7 +2222,10 @@ void ipath_unregister_ib_device(struct ipath_ibdev *dev)
* Note that ipath_unregister_ib_device() can be called before all
* the QPs are destroyed!
*/
- ipath_free_all_qps(&dev->qp_table);
+ qps_inuse = ipath_free_all_qps(&dev->qp_table);
+ if (qps_inuse)
+ ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n",
+ qps_inuse);
kfree(dev->qp_table.table);
kfree(dev->lk_table.table);
kfree(dev->txreq_bufs);
@@ -2215,17 +2276,14 @@ static ssize_t show_stats(struct device *device, struct device_attribute *attr,
"RC OTH NAKs %d\n"
"RC timeouts %d\n"
"RC RDMA dup %d\n"
- "RC stalls %d\n"
"piobuf wait %d\n"
- "no piobuf %d\n"
"unaligned %d\n"
"PKT drops %d\n"
"WQE errs %d\n",
dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
dev->n_other_naks, dev->n_timeouts,
- dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
- dev->n_no_piobuf, dev->n_unaligned,
+ dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned,
dev->n_pkt_drops, dev->n_wqe_errs);
for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
const struct ipath_opcode_stats *si = &dev->opstats[i];
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 6514aa8306c..9d12ae8a778 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -74,6 +74,11 @@
#define IPATH_POST_RECV_OK 0x02
#define IPATH_PROCESS_RECV_OK 0x04
#define IPATH_PROCESS_SEND_OK 0x08
+#define IPATH_PROCESS_NEXT_SEND_OK 0x10
+#define IPATH_FLUSH_SEND 0x20
+#define IPATH_FLUSH_RECV 0x40
+#define IPATH_PROCESS_OR_FLUSH_SEND \
+ (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
/* IB Performance Manager status values */
#define IB_PMA_SAMPLE_STATUS_DONE 0x00
@@ -353,12 +358,14 @@ struct ipath_qp {
struct ib_qp ibqp;
struct ipath_qp *next; /* link list for QPN hash table */
struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
+ struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */
struct list_head piowait; /* link for wait PIO buf */
struct list_head timerwait; /* link for waiting for timeouts */
struct ib_ah_attr remote_ah_attr;
struct ipath_ib_header s_hdr; /* next packet header to send */
atomic_t refcount;
wait_queue_head_t wait;
+ wait_queue_head_t wait_dma;
struct tasklet_struct s_task;
struct ipath_mmap_info *ip;
struct ipath_sge_state *s_cur_sge;
@@ -369,7 +376,7 @@ struct ipath_qp {
struct ipath_sge_state s_rdma_read_sge;
struct ipath_sge_state r_sge; /* current receive data */
spinlock_t s_lock;
- unsigned long s_busy;
+ atomic_t s_dma_busy;
u16 s_pkt_delay;
u16 s_hdrwords; /* size of s_hdr in 32 bit words */
u32 s_cur_size; /* size of send packet in bytes */
@@ -383,6 +390,7 @@ struct ipath_qp {
u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
u64 r_wr_id; /* ID for current receive WQE */
+ unsigned long r_aflags;
u32 r_len; /* total length of r_sge */
u32 r_rcv_len; /* receive data len processed */
u32 r_psn; /* expected rcv packet sequence number */
@@ -394,8 +402,7 @@ struct ipath_qp {
u8 r_state; /* opcode of last packet received */
u8 r_nak_state; /* non-zero if NAK is pending */
u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
- u8 r_reuse_sge; /* for UC receive errors */
- u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
+ u8 r_flags;
u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
u8 r_head_ack_queue; /* index into s_ack_queue[] */
u8 qp_access_flags;
@@ -404,13 +411,13 @@ struct ipath_qp {
u8 s_rnr_retry_cnt;
u8 s_retry; /* requester retry counter */
u8 s_rnr_retry; /* requester RNR retry counter */
- u8 s_wait_credit; /* limit number of unacked packets sent */
u8 s_pkey_index; /* PKEY index to use */
u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
u8 s_tail_ack_queue; /* index into s_ack_queue[] */
u8 s_flags;
u8 s_dmult;
+ u8 s_draining;
u8 timeout; /* Timeout for this QP */
enum ib_mtu path_mtu;
u32 remote_qpn;
@@ -428,16 +435,40 @@ struct ipath_qp {
struct ipath_sge r_sg_list[0]; /* verified SGEs */
};
-/* Bit definition for s_busy. */
-#define IPATH_S_BUSY 0
+/*
+ * Atomic bit definitions for r_aflags.
+ */
+#define IPATH_R_WRID_VALID 0
+
+/*
+ * Bit definitions for r_flags.
+ */
+#define IPATH_R_REUSE_SGE 0x01
+#define IPATH_R_RDMAR_SEQ 0x02
/*
* Bit definitions for s_flags.
+ *
+ * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
+ * before processing the next SWQE
+ * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
+ * before processing the next SWQE
+ * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
+ * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
+ * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ * next send completion entry not via send DMA.
*/
#define IPATH_S_SIGNAL_REQ_WR 0x01
#define IPATH_S_FENCE_PENDING 0x02
#define IPATH_S_RDMAR_PENDING 0x04
#define IPATH_S_ACK_PENDING 0x08
+#define IPATH_S_BUSY 0x10
+#define IPATH_S_WAITING 0x20
+#define IPATH_S_WAIT_SSN_CREDIT 0x40
+#define IPATH_S_WAIT_DMA 0x80
+
+#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
+ IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
#define IPATH_PSN_CREDIT 512
@@ -573,13 +604,11 @@ struct ipath_ibdev {
u32 n_rnr_naks;
u32 n_other_naks;
u32 n_timeouts;
- u32 n_rc_stalls;
u32 n_pkt_drops;
u32 n_vl15_dropped;
u32 n_wqe_errs;
u32 n_rdma_dup_busy;
u32 n_piowait;
- u32 n_no_piobuf;
u32 n_unaligned;
u32 port_cap_flags;
u32 pma_sample_start;
@@ -657,6 +686,17 @@ static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
return container_of(ibdev, struct ipath_ibdev, ibdev);
}
+/*
+ * This must be called with s_lock held.
+ */
+static inline void ipath_schedule_send(struct ipath_qp *qp)
+{
+ if (qp->s_flags & IPATH_S_ANY_WAIT)
+ qp->s_flags &= ~IPATH_S_ANY_WAIT;
+ if (!(qp->s_flags & IPATH_S_BUSY))
+ tasklet_hi_schedule(&qp->s_task);
+}
+
int ipath_process_mad(struct ib_device *ibdev,
int mad_flags,
u8 port_num,
@@ -706,12 +746,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr);
-void ipath_free_all_qps(struct ipath_qp_table *qpt);
+unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
-void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
-
void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
@@ -729,7 +767,9 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc);
+void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
+
+void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 2f199c5c4a7..4521319b140 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -246,7 +246,7 @@ err_mtt:
if (context)
ib_umem_release(cq->umem);
else
- mlx4_ib_free_cq_buf(dev, &cq->buf, entries);
+ mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
err_db:
if (!context)
@@ -434,7 +434,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
ib_umem_release(mcq->umem);
} else {
- mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
+ mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
mlx4_db_free(dev->dev, &mcq->db);
}
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 9f7364a9096..a4e9269a29b 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -91,10 +91,6 @@ unsigned int nes_debug_level = 0;
module_param_named(debug_level, nes_debug_level, uint, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug output level");
-unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
-module_param(nes_lro_max_aggr, int, NES_LRO_MAX_AGGR);
-MODULE_PARM_DESC(nes_mro_max_aggr, " nic LRO MAX packet aggregation");
-
LIST_HEAD(nes_adapter_list);
static LIST_HEAD(nes_dev_list);
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 1f9f7bf7386..61b46e9c7d2 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -173,7 +173,6 @@ extern int disable_mpa_crc;
extern unsigned int send_first;
extern unsigned int nes_drv_opt;
extern unsigned int nes_debug_level;
-extern unsigned int nes_lro_max_aggr;
extern struct list_head nes_adapter_list;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8dc70f9bad2..d3278f111ca 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -42,6 +42,10 @@
#include "nes.h"
+static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
+module_param(nes_lro_max_aggr, uint, 0444);
+MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
+
static u32 crit_err_count;
u32 int_mod_timer_init;
u32 int_mod_cq_depth_256;
@@ -1738,7 +1742,7 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
jumbomode = 1;
nes_nic_init_timer_defaults(nesdev, jumbomode);
}
- nesvnic->lro_mgr.max_aggr = NES_LRO_MAX_AGGR;
+ nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr;
nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS;
nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc;
nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 9044f880353..ca126fc2b85 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -334,6 +334,7 @@ struct ipoib_dev_priv {
#endif
int hca_caps;
struct ipoib_ethtool_st ethtool;
+ struct timer_list poll_timer;
};
struct ipoib_ah {
@@ -404,6 +405,7 @@ extern struct workqueue_struct *ipoib_workqueue;
int ipoib_poll(struct napi_struct *napi, int budget);
void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
+void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 97b815c1a3f..f429bce24c2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -461,6 +461,26 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
netif_rx_schedule(dev, &priv->napi);
}
+static void drain_tx_cq(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ while (poll_tx(priv))
+ ; /* nothing */
+
+ if (netif_queue_stopped(dev))
+ mod_timer(&priv->poll_timer, jiffies + 1);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+}
+
+void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
+{
+ drain_tx_cq((struct net_device *)dev_ptr);
+}
+
static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ib_ah *address, u32 qpn,
@@ -555,12 +575,22 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
else
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
+ if (++priv->tx_outstanding == ipoib_sendq_size) {
+ ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+ ipoib_warn(priv, "request notify on send CQ failed\n");
+ netif_stop_queue(dev);
+ }
+
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, tx_req, phead, hlen))) {
ipoib_warn(priv, "post_send failed\n");
++dev->stats.tx_errors;
+ --priv->tx_outstanding;
ipoib_dma_unmap_tx(priv->ca, tx_req);
dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
} else {
dev->trans_start = jiffies;
@@ -568,14 +598,11 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
++priv->tx_head;
skb_orphan(skb);
- if (++priv->tx_outstanding == ipoib_sendq_size) {
- ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
- netif_stop_queue(dev);
- }
}
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
- poll_tx(priv);
+ while (poll_tx(priv))
+ ; /* nothing */
}
static void __ipoib_reap_ah(struct net_device *dev)
@@ -609,6 +636,11 @@ void ipoib_reap_ah(struct work_struct *work)
round_jiffies_relative(HZ));
}
+static void ipoib_ib_tx_timer_func(unsigned long ctx)
+{
+ drain_tx_cq((struct net_device *)ctx);
+}
+
int ipoib_ib_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -645,6 +677,10 @@ int ipoib_ib_dev_open(struct net_device *dev)
queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
round_jiffies_relative(HZ));
+ init_timer(&priv->poll_timer);
+ priv->poll_timer.function = ipoib_ib_tx_timer_func;
+ priv->poll_timer.data = (unsigned long)dev;
+
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
return 0;
@@ -810,6 +846,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
ipoib_dbg(priv, "All sends and receives done.\n");
timeout:
+ del_timer_sync(&priv->poll_timer);
qp_attr.qp_state = IB_QPS_RESET;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index c1e7ece1fd4..8766d29ce3b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -187,7 +187,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
goto out_free_mr;
}
- priv->send_cq = ib_create_cq(priv->ca, NULL, NULL, dev, ipoib_sendq_size, 0);
+ priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
+ dev, ipoib_sendq_size, 0);
if (IS_ERR(priv->send_cq)) {
printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 92b683411d5..3ad8bd9f754 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -14,7 +14,7 @@ if INPUT_MISC
config INPUT_PCSPKR
tristate "PC Speaker support"
- depends on ALPHA || X86 || MIPS || PPC_PREP || PPC_CHRP || PPC_PSERIES
+ depends on PCSPKR_PLATFORM
depends on SND_PCSP=n
help
Say Y here if you want the standard PC Speaker to be used for
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 02b3ad8c082..edfedd9a166 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -69,6 +69,7 @@
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/hil.h>
+#include <linux/semaphore.h>
#include <asm/io.h>
#include <asm/system.h>
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index 3b4e13b9ce1..f451c7351a9 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -25,7 +25,7 @@
#elif defined(__arm__)
/* defined in include/asm-arm/arch-xxx/irqs.h */
#include <asm/irq.h>
-#elif defined(CONFIG_SUPERH64)
+#elif defined(CONFIG_SH_CAYMAN)
#include <asm/irq.h>
#else
# define I8042_KBD_IRQ 1
diff --git a/drivers/isdn/capi/capiutil.c b/drivers/isdn/capi/capiutil.c
index ebef4ce1b00..29419a8d31d 100644
--- a/drivers/isdn/capi/capiutil.c
+++ b/drivers/isdn/capi/capiutil.c
@@ -948,17 +948,17 @@ int __init cdebug_init(void)
{
g_cmsg= kmalloc(sizeof(_cmsg), GFP_KERNEL);
if (!g_cmsg)
- return ENOMEM;
+ return -ENOMEM;
g_debbuf = kmalloc(sizeof(_cdebbuf), GFP_KERNEL);
if (!g_debbuf) {
kfree(g_cmsg);
- return ENOMEM;
+ return -ENOMEM;
}
g_debbuf->buf = kmalloc(CDEBUG_GSIZE, GFP_KERNEL);
if (!g_debbuf->buf) {
kfree(g_cmsg);
kfree(g_debbuf);
- return ENOMEM;;
+ return -ENOMEM;;
}
g_debbuf->size = CDEBUG_GSIZE;
g_debbuf->buf[0] = 0;
diff --git a/drivers/isdn/hysdn/Kconfig b/drivers/isdn/hysdn/Kconfig
index c6d8a704298..c9e4231968e 100644
--- a/drivers/isdn/hysdn/Kconfig
+++ b/drivers/isdn/hysdn/Kconfig
@@ -3,7 +3,7 @@
#
config HYSDN
tristate "Hypercope HYSDN cards (Champ, Ergo, Metro) support (module only)"
- depends on m && PROC_FS && PCI && BROKEN_ON_SMP
+ depends on m && PROC_FS && PCI
help
Say Y here if you have one of Hypercope's active PCI ISDN cards
Champ, Ergo and Metro. You will then get a module called hysdn.
diff --git a/drivers/isdn/hysdn/boardergo.c b/drivers/isdn/hysdn/boardergo.c
index 6cdbad3a992..3eb096f0ae1 100644
--- a/drivers/isdn/hysdn/boardergo.c
+++ b/drivers/isdn/hysdn/boardergo.c
@@ -64,10 +64,11 @@ ergo_interrupt(int intno, void *dev_id)
} /* ergo_interrupt */
/******************************************************************************/
-/* ergo_irq_bh is the function called by the immediate kernel task list after */
-/* being activated with queue_task and no interrupts active. This task is the */
-/* only one handling data transfer from or to the card after booting. The task */
-/* may be queued from everywhere (interrupts included). */
+/* ergo_irq_bh will be called as part of the kernel clearing its shared work */
+/* queue sometime after a call to schedule_work has been made passing our */
+/* work_struct. This task is the only one handling data transfer from or to */
+/* the card after booting. The task may be queued from everywhere */
+/* (interrupts included). */
/******************************************************************************/
static void
ergo_irq_bh(struct work_struct *ugli_api)
@@ -90,7 +91,6 @@ ergo_irq_bh(struct work_struct *ugli_api)
card->hw_lock = 1; /* we now lock the hardware */
do {
- sti(); /* reenable other ints */
again = 0; /* assume loop not to be repeated */
if (!dpr->ToHyFlag) {
@@ -110,7 +110,6 @@ ergo_irq_bh(struct work_struct *ugli_api)
again = 1; /* restart loop */
}
} /* a message has arrived for us */
- cli(); /* no further ints */
if (again) {
dpr->ToHyInt = 1;
dpr->ToPcInt = 1; /* interrupt to E1 for all cards */
@@ -242,7 +241,6 @@ ergo_writebootimg(struct HYSDN_CARD *card, unsigned char *buf,
byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RUN); /* start E1 processor */
/* the interrupts are still masked */
- sti();
msleep_interruptible(20); /* Timeout 20ms */
if (((tDpramBootSpooler *) card->dpram)->Len != DPRAM_SPOOLER_DATA_SIZE) {
@@ -276,7 +274,6 @@ ergo_writebootseq(struct HYSDN_CARD *card, unsigned char *buf, int len)
dst = sp->Data; /* point to data in spool structure */
buflen = sp->Len; /* maximum len of spooled data */
wr_mirror = sp->WrPtr; /* only once read */
- sti();
/* try until all bytes written or error */
i = 0x1000; /* timeout value */
@@ -380,7 +377,6 @@ ergo_waitpofready(struct HYSDN_CARD *card)
#endif /* CONFIG_HYSDN_CAPI */
return (0); /* success */
} /* data has arrived */
- sti();
msleep_interruptible(50); /* Timeout 50ms */
} /* wait until timeout */
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 20978205cd0..b8b9e44f7f4 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -37,7 +37,7 @@
#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/platform_device.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#ifdef CONFIG_PPC
@@ -102,7 +102,7 @@ static struct adb_handler {
} adb_handler[16];
/*
- * The adb_handler_sem mutex protects all accesses to the original_address
+ * The adb_handler_mutex mutex protects all accesses to the original_address
* and handler_id fields of adb_handler[i] for all i, and changes to the
* handler field.
* Accesses to the handler field are protected by the adb_handler_lock
@@ -110,7 +110,7 @@ static struct adb_handler {
* time adb_unregister returns, we know that the old handler isn't being
* called.
*/
-static DECLARE_MUTEX(adb_handler_sem);
+static DEFINE_MUTEX(adb_handler_mutex);
static DEFINE_RWLOCK(adb_handler_lock);
#if 0
@@ -355,7 +355,7 @@ do_adb_reset_bus(void)
msleep(500);
}
- down(&adb_handler_sem);
+ mutex_lock(&adb_handler_mutex);
write_lock_irq(&adb_handler_lock);
memset(adb_handler, 0, sizeof(adb_handler));
write_unlock_irq(&adb_handler_lock);
@@ -376,7 +376,7 @@ do_adb_reset_bus(void)
if (adb_controller->autopoll)
adb_controller->autopoll(autopoll_devs);
}
- up(&adb_handler_sem);
+ mutex_unlock(&adb_handler_mutex);
blocking_notifier_call_chain(&adb_client_list,
ADB_MSG_POST_RESET, NULL);
@@ -454,7 +454,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids,
{
int i;
- down(&adb_handler_sem);
+ mutex_lock(&adb_handler_mutex);
ids->nids = 0;
for (i = 1; i < 16; i++) {
if ((adb_handler[i].original_address == default_id) &&
@@ -472,7 +472,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids,
ids->id[ids->nids++] = i;
}
}
- up(&adb_handler_sem);
+ mutex_unlock(&adb_handler_mutex);
return ids->nids;
}
@@ -481,7 +481,7 @@ adb_unregister(int index)
{
int ret = -ENODEV;
- down(&adb_handler_sem);
+ mutex_lock(&adb_handler_mutex);
write_lock_irq(&adb_handler_lock);
if (adb_handler[index].handler) {
while(adb_handler[index].busy) {
@@ -493,7 +493,7 @@ adb_unregister(int index)
adb_handler[index].handler = NULL;
}
write_unlock_irq(&adb_handler_lock);
- up(&adb_handler_sem);
+ mutex_unlock(&adb_handler_mutex);
return ret;
}
@@ -557,19 +557,19 @@ adb_try_handler_change(int address, int new_id)
{
int ret;
- down(&adb_handler_sem);
+ mutex_lock(&adb_handler_mutex);
ret = try_handler_change(address, new_id);
- up(&adb_handler_sem);
+ mutex_unlock(&adb_handler_mutex);
return ret;
}
int
adb_get_infos(int address, int *original_address, int *handler_id)
{
- down(&adb_handler_sem);
+ mutex_lock(&adb_handler_mutex);
*original_address = adb_handler[address].original_address;
*handler_id = adb_handler[address].handler_id;
- up(&adb_handler_sem);
+ mutex_unlock(&adb_handler_mutex);
return (*original_address != 0);
}
@@ -628,10 +628,10 @@ do_adb_query(struct adb_request *req)
case ADB_QUERY_GETDEVINFO:
if (req->nbytes < 3)
break;
- down(&adb_handler_sem);
+ mutex_lock(&adb_handler_mutex);
req->reply[0] = adb_handler[req->data[2]].original_address;
req->reply[1] = adb_handler[req->data[2]].handler_id;
- up(&adb_handler_sem);
+ mutex_unlock(&adb_handler_mutex);
req->complete = 1;
req->reply_len = 2;
adb_write_done(req);
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 1e0a69a5e81..ddfb426a9ab 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -122,6 +122,7 @@
#include <linux/kmod.h>
#include <linux/i2c.h>
#include <linux/kthread.h>
+#include <linux/mutex.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
@@ -169,7 +170,7 @@ static int rackmac;
static s32 dimm_output_clamp;
static int fcu_rpm_shift;
static int fcu_tickle_ticks;
-static DECLARE_MUTEX(driver_lock);
+static DEFINE_MUTEX(driver_lock);
/*
* We have 3 types of CPU PID control. One is "split" old style control
@@ -729,9 +730,9 @@ static void fetch_cpu_pumps_minmax(void)
static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
ssize_t r; \
- down(&driver_lock); \
+ mutex_lock(&driver_lock); \
r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \
- up(&driver_lock); \
+ mutex_unlock(&driver_lock); \
return r; \
}
#define BUILD_SHOW_FUNC_INT(name, data) \
@@ -1803,11 +1804,11 @@ static int main_control_loop(void *x)
{
DBG("main_control_loop started\n");
- down(&driver_lock);
+ mutex_lock(&driver_lock);
if (start_fcu() < 0) {
printk(KERN_ERR "kfand: failed to start FCU\n");
- up(&driver_lock);
+ mutex_unlock(&driver_lock);
goto out;
}
@@ -1822,14 +1823,14 @@ static int main_control_loop(void *x)
fcu_tickle_ticks = FCU_TICKLE_TICKS;
- up(&driver_lock);
+ mutex_unlock(&driver_lock);
while (state == state_attached) {
unsigned long elapsed, start;
start = jiffies;
- down(&driver_lock);
+ mutex_lock(&driver_lock);
/* Tickle the FCU just in case */
if (--fcu_tickle_ticks < 0) {
@@ -1861,7 +1862,7 @@ static int main_control_loop(void *x)
do_monitor_slots(&slots_state);
else
do_monitor_drives(&drives_state);
- up(&driver_lock);
+ mutex_unlock(&driver_lock);
if (critical_state == 1) {
printk(KERN_WARNING "Temperature control detected a critical condition\n");
@@ -2019,13 +2020,13 @@ static void detach_fcu(void)
*/
static int therm_pm72_attach(struct i2c_adapter *adapter)
{
- down(&driver_lock);
+ mutex_lock(&driver_lock);
/* Check state */
if (state == state_detached)
state = state_attaching;
if (state != state_attaching) {
- up(&driver_lock);
+ mutex_unlock(&driver_lock);
return 0;
}
@@ -2054,7 +2055,7 @@ static int therm_pm72_attach(struct i2c_adapter *adapter)
state = state_attached;
start_control_loops();
}
- up(&driver_lock);
+ mutex_unlock(&driver_lock);
return 0;
}
@@ -2065,16 +2066,16 @@ static int therm_pm72_attach(struct i2c_adapter *adapter)
*/
static int therm_pm72_detach(struct i2c_adapter *adapter)
{
- down(&driver_lock);
+ mutex_lock(&driver_lock);
if (state != state_detached)
state = state_detaching;
/* Stop control loops if any */
DBG("stopping control loops\n");
- up(&driver_lock);
+ mutex_unlock(&driver_lock);
stop_control_loops();
- down(&driver_lock);
+ mutex_lock(&driver_lock);
if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) {
DBG("lost U3-0, disposing control loops\n");
@@ -2090,7 +2091,7 @@ static int therm_pm72_detach(struct i2c_adapter *adapter)
if (u3_0 == NULL && u3_1 == NULL)
state = state_detached;
- up(&driver_lock);
+ mutex_unlock(&driver_lock);
return 0;
}
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 797918d0e59..7f2be4baaed 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
#include <asm/prom.h>
#include <asm/smu.h>
#include <asm/pmac_low_i2c.h>
@@ -36,7 +36,7 @@
struct wf_sat {
int nr;
atomic_t refcnt;
- struct semaphore mutex;
+ struct mutex mutex;
unsigned long last_read; /* jiffies when cache last updated */
u8 cache[16];
struct i2c_client i2c;
@@ -163,7 +163,7 @@ static int wf_sat_get(struct wf_sensor *sr, s32 *value)
if (sat->i2c.adapter == NULL)
return -ENODEV;
- down(&sat->mutex);
+ mutex_lock(&sat->mutex);
if (time_after(jiffies, (sat->last_read + MAX_AGE))) {
err = wf_sat_read_cache(sat);
if (err)
@@ -182,7 +182,7 @@ static int wf_sat_get(struct wf_sensor *sr, s32 *value)
err = 0;
fail:
- up(&sat->mutex);
+ mutex_unlock(&sat->mutex);
return err;
}
@@ -233,7 +233,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
sat->nr = -1;
sat->node = of_node_get(dev);
atomic_set(&sat->refcnt, 0);
- init_MUTEX(&sat->mutex);
+ mutex_init(&sat->mutex);
sat->i2c.addr = (addr >> 1) & 0x7f;
sat->i2c.adapter = adapter;
sat->i2c.driver = &wf_sat_driver;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 0b8511776b3..10748240cb2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -250,6 +250,7 @@ static int linear_run (mddev_t *mddev)
{
linear_conf_t *conf;
+ mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
if (!conf)
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 42ee1a2dc14..4f4d1f38384 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -417,6 +417,7 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
+ mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 818b4828409..914c04ddec7 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -280,6 +280,7 @@ static int raid0_run (mddev_t *mddev)
(mddev->chunk_size>>1)-1);
blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
+ mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
if (!conf)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6778b7cb39b..ac409b7d83f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1935,6 +1935,9 @@ static int run(mddev_t *mddev)
if (!conf->r1bio_pool)
goto out_no_mem;
+ spin_lock_init(&conf->device_lock);
+ mddev->queue->queue_lock = &conf->device_lock;
+
rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
@@ -1958,7 +1961,6 @@ static int run(mddev_t *mddev)
}
conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
- spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
spin_lock_init(&conf->resync_lock);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5938fa96292..8536ede1e71 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -886,7 +886,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
raid10_find_phys(conf, r10_bio);
retry_write:
- blocked_rdev = 0;
+ blocked_rdev = NULL;
rcu_read_lock();
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
@@ -2082,6 +2082,9 @@ static int run(mddev_t *mddev)
goto out_free_conf;
}
+ spin_lock_init(&conf->device_lock);
+ mddev->queue->queue_lock = &conf->device_lock;
+
rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
@@ -2103,7 +2106,6 @@ static int run(mddev_t *mddev)
disk->head_position = 0;
}
- spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
spin_lock_init(&conf->resync_lock);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 087eee0cb80..93fde48c0f4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2369,8 +2369,8 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
/* complete a check operation */
if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
- clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
- clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
+ clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
+ clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
if (s->failed == 0) {
if (sh->ops.zero_sum_result == 0)
/* parity is correct (on disc,
@@ -2400,16 +2400,6 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
canceled_check = 1; /* STRIPE_INSYNC is not set */
}
- /* check if we can clear a parity disk reconstruct */
- if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
- test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
-
- clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
- clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
- }
-
/* start a new check operation if there are no failures, the stripe is
* not insync, and a repair is not in flight
*/
@@ -2424,6 +2414,17 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
}
}
+ /* check if we can clear a parity disk reconstruct */
+ if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
+ test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+
+ clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+ }
+
+
/* Wait for check parity and compute block operations to complete
* before write-back. If a failure occurred while the check operation
* was in flight we need to cycle this stripe through handle_stripe
@@ -4256,6 +4257,7 @@ static int run(mddev_t *mddev)
goto abort;
}
spin_lock_init(&conf->device_lock);
+ mddev->queue->queue_lock = &conf->device_lock;
init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index ddf57e135c6..7a7803b5d49 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -89,8 +89,7 @@ config DVB_CORE
config VIDEO_MEDIA
tristate
- default DVB_CORE || VIDEO_DEV
- depends on DVB_CORE || VIDEO_DEV
+ default (DVB_CORE && (VIDEO_DEV = n)) || (VIDEO_DEV && (DVB_CORE = n)) || (DVB_CORE && VIDEO_DEV)
comment "Multimedia drivers"
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 73f742c7e81..cc11c4c0e7e 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,6 +2,8 @@
# Makefile for the kernel multimedia device drivers.
#
+obj-y := common/
+
obj-$(CONFIG_VIDEO_MEDIA) += common/
# Since hybrid devices are here, should be compiled if DVB and/or V4L
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 5be85ff53e1..d6206540476 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -1,6 +1,6 @@
config MEDIA_ATTACH
bool "Load and attach frontend and tuner driver modules as needed"
- depends on DVB_CORE
+ depends on VIDEO_MEDIA
depends on MODULES
help
Remove the static dependency of DVB card drivers on all
@@ -19,10 +19,10 @@ config MEDIA_ATTACH
config MEDIA_TUNER
tristate
- default DVB_CORE || VIDEO_DEV
- depends on DVB_CORE || VIDEO_DEV
- select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMIZE
- select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMIZE
+ default VIDEO_MEDIA && I2C
+ depends on VIDEO_MEDIA && I2C
+ select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMIZE && HOTPLUG
+ select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMIZE && HOTPLUG
select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMIZE
select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMIZE
select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMIZE
@@ -46,7 +46,7 @@ if MEDIA_TUNER_CUSTOMIZE
config MEDIA_TUNER_SIMPLE
tristate "Simple tuner support"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
select MEDIA_TUNER_TDA9887
default m if MEDIA_TUNER_CUSTOMIZE
help
@@ -54,7 +54,7 @@ config MEDIA_TUNER_SIMPLE
config MEDIA_TUNER_TDA8290
tristate "TDA 8290/8295 + 8275(a)/18271 tuner combo"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
select MEDIA_TUNER_TDA827X
select MEDIA_TUNER_TDA18271
default m if MEDIA_TUNER_CUSTOMIZE
@@ -63,21 +63,21 @@ config MEDIA_TUNER_TDA8290
config MEDIA_TUNER_TDA827X
tristate "Philips TDA827X silicon tuner"
- depends on DVB_CORE && I2C
+ depends on VIDEO_MEDIA && I2C
default m if DVB_FE_CUSTOMISE
help
A DVB-T silicon tuner module. Say Y when you want to support this tuner.
config MEDIA_TUNER_TDA18271
tristate "NXP TDA18271 silicon tuner"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
default m if DVB_FE_CUSTOMISE
help
A silicon tuner module. Say Y when you want to support this tuner.
config MEDIA_TUNER_TDA9887
tristate "TDA 9885/6/7 analog IF demodulator"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
default m if MEDIA_TUNER_CUSTOMIZE
help
Say Y here to include support for Philips TDA9885/6/7
@@ -85,67 +85,79 @@ config MEDIA_TUNER_TDA9887
config MEDIA_TUNER_TEA5761
tristate "TEA 5761 radio tuner (EXPERIMENTAL)"
- depends on I2C && EXPERIMENTAL
+ depends on VIDEO_MEDIA && I2C
+ depends on EXPERIMENTAL
default m if MEDIA_TUNER_CUSTOMIZE
help
Say Y here to include support for the Philips TEA5761 radio tuner.
config MEDIA_TUNER_TEA5767
tristate "TEA 5767 radio tuner"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
default m if MEDIA_TUNER_CUSTOMIZE
help
Say Y here to include support for the Philips TEA5767 radio tuner.
config MEDIA_TUNER_MT20XX
tristate "Microtune 2032 / 2050 tuners"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
default m if MEDIA_TUNER_CUSTOMIZE
help
Say Y here to include support for the MT2032 / MT2050 tuner.
config MEDIA_TUNER_MT2060
tristate "Microtune MT2060 silicon IF tuner"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
default m if DVB_FE_CUSTOMISE
help
A driver for the silicon IF tuner MT2060 from Microtune.
config MEDIA_TUNER_MT2266
tristate "Microtune MT2266 silicon tuner"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
default m if DVB_FE_CUSTOMISE
help
A driver for the silicon baseband tuner MT2266 from Microtune.
config MEDIA_TUNER_MT2131
tristate "Microtune MT2131 silicon tuner"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
default m if DVB_FE_CUSTOMISE
help
A driver for the silicon baseband tuner MT2131 from Microtune.
config MEDIA_TUNER_QT1010
tristate "Quantek QT1010 silicon tuner"
- depends on DVB_CORE && I2C
+ depends on VIDEO_MEDIA && I2C
default m if DVB_FE_CUSTOMISE
help
A driver for the silicon tuner QT1010 from Quantek.
config MEDIA_TUNER_XC2028
tristate "XCeive xc2028/xc3028 tuners"
- depends on I2C && FW_LOADER
+ depends on VIDEO_MEDIA && I2C
+ depends on HOTPLUG
+ select FW_LOADER
default m if MEDIA_TUNER_CUSTOMIZE
help
Say Y here to include support for the xc2028/xc3028 tuners.
config MEDIA_TUNER_XC5000
tristate "Xceive XC5000 silicon tuner"
- depends on I2C
+ depends on VIDEO_MEDIA && I2C
+ depends on HOTPLUG
+ select FW_LOADER
default m if DVB_FE_CUSTOMISE
help
A driver for the silicon tuner XC5000 from Xceive.
This device is only used inside a SiP called togther with a
demodulator for now.
+config MEDIA_TUNER_MXL5005S
+ tristate "MaxLinear MSL5005S silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A driver for the silicon tuner MXL5005S from MaxLinear.
+
endif # MEDIA_TUNER_CUSTOMIZE
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index 236d9932fd9..55f7e670629 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
obj-$(CONFIG_MEDIA_TUNER_MT2131) += mt2131.o
+obj-$(CONFIG_MEDIA_TUNER_MXL5005S) += mxl5005s.o
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/common/tuners/mxl5005s.c b/drivers/media/common/tuners/mxl5005s.c
new file mode 100644
index 00000000000..5d05b5390f6
--- /dev/null
+++ b/drivers/media/common/tuners/mxl5005s.c
@@ -0,0 +1,4110 @@
+/*
+ MaxLinear MXL5005S VSB/QAM/DVBT tuner driver
+
+ Copyright (C) 2008 MaxLinear
+ Copyright (C) 2006 Steven Toth <stoth@hauppauge.com>
+ Functions:
+ mxl5005s_reset()
+ mxl5005s_writereg()
+ mxl5005s_writeregs()
+ mxl5005s_init()
+ mxl5005s_reconfigure()
+ mxl5005s_AssignTunerMode()
+ mxl5005s_set_params()
+ mxl5005s_get_frequency()
+ mxl5005s_get_bandwidth()
+ mxl5005s_release()
+ mxl5005s_attach()
+
+ Copyright (C) 2008 Realtek
+ Copyright (C) 2008 Jan Hoogenraad
+ Functions:
+ mxl5005s_SetRfFreqHz()
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+/*
+ History of this driver (Steven Toth):
+ I was given a public release of a linux driver that included
+ support for the MaxLinear MXL5005S silicon tuner. Analysis of
+ the tuner driver showed clearly three things.
+
+ 1. The tuner driver didn't support the LinuxTV tuner API
+ so the code Realtek added had to be removed.
+
+ 2. A significant amount of the driver is reference driver code
+ from MaxLinear, I felt it was important to identify and
+ preserve this.
+
+ 3. New code has to be added to interface correctly with the
+ LinuxTV API, as a regular kernel module.
+
+ Other than the reference driver enum's, I've clearly marked
+ sections of the code and retained the copyright of the
+ respective owners.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "dvb_frontend.h"
+#include "mxl5005s.h"
+
+static int debug;
+
+#define dprintk(level, arg...) do { \
+ if (level <= debug) \
+ printk(arg); \
+ } while (0)
+
+#define TUNER_REGS_NUM 104
+#define INITCTRL_NUM 40
+
+#ifdef _MXL_PRODUCTION
+#define CHCTRL_NUM 39
+#else
+#define CHCTRL_NUM 36
+#endif
+
+#define MXLCTRL_NUM 189
+#define MASTER_CONTROL_ADDR 9
+
+/* Enumeration of Master Control Register State */
+enum master_control_state {
+ MC_LOAD_START = 1,
+ MC_POWER_DOWN,
+ MC_SYNTH_RESET,
+ MC_SEQ_OFF
+};
+
+/* Enumeration of MXL5005 Tuner Modulation Type */
+enum {
+ MXL_DEFAULT_MODULATION = 0,
+ MXL_DVBT,
+ MXL_ATSC,
+ MXL_QAM,
+ MXL_ANALOG_CABLE,
+ MXL_ANALOG_OTA
+} tuner_modu_type;
+
+/* MXL5005 Tuner Register Struct */
+struct TunerReg {
+ u16 Reg_Num; /* Tuner Register Address */
+ u16 Reg_Val; /* Current sw programmed value waiting to be writen */
+};
+
+enum {
+ /* Initialization Control Names */
+ DN_IQTN_AMP_CUT = 1, /* 1 */
+ BB_MODE, /* 2 */
+ BB_BUF, /* 3 */
+ BB_BUF_OA, /* 4 */
+ BB_ALPF_BANDSELECT, /* 5 */
+ BB_IQSWAP, /* 6 */
+ BB_DLPF_BANDSEL, /* 7 */
+ RFSYN_CHP_GAIN, /* 8 */
+ RFSYN_EN_CHP_HIGAIN, /* 9 */
+ AGC_IF, /* 10 */
+ AGC_RF, /* 11 */
+ IF_DIVVAL, /* 12 */
+ IF_VCO_BIAS, /* 13 */
+ CHCAL_INT_MOD_IF, /* 14 */
+ CHCAL_FRAC_MOD_IF, /* 15 */
+ DRV_RES_SEL, /* 16 */
+ I_DRIVER, /* 17 */
+ EN_AAF, /* 18 */
+ EN_3P, /* 19 */
+ EN_AUX_3P, /* 20 */
+ SEL_AAF_BAND, /* 21 */
+ SEQ_ENCLK16_CLK_OUT, /* 22 */
+ SEQ_SEL4_16B, /* 23 */
+ XTAL_CAPSELECT, /* 24 */
+ IF_SEL_DBL, /* 25 */
+ RFSYN_R_DIV, /* 26 */
+ SEQ_EXTSYNTHCALIF, /* 27 */
+ SEQ_EXTDCCAL, /* 28 */
+ AGC_EN_RSSI, /* 29 */
+ RFA_ENCLKRFAGC, /* 30 */
+ RFA_RSSI_REFH, /* 31 */
+ RFA_RSSI_REF, /* 32 */
+ RFA_RSSI_REFL, /* 33 */
+ RFA_FLR, /* 34 */
+ RFA_CEIL, /* 35 */
+ SEQ_EXTIQFSMPULSE, /* 36 */
+ OVERRIDE_1, /* 37 */
+ BB_INITSTATE_DLPF_TUNE, /* 38 */
+ TG_R_DIV, /* 39 */
+ EN_CHP_LIN_B, /* 40 */
+
+ /* Channel Change Control Names */
+ DN_POLY = 51, /* 51 */
+ DN_RFGAIN, /* 52 */
+ DN_CAP_RFLPF, /* 53 */
+ DN_EN_VHFUHFBAR, /* 54 */
+ DN_GAIN_ADJUST, /* 55 */
+ DN_IQTNBUF_AMP, /* 56 */
+ DN_IQTNGNBFBIAS_BST, /* 57 */
+ RFSYN_EN_OUTMUX, /* 58 */
+ RFSYN_SEL_VCO_OUT, /* 59 */
+ RFSYN_SEL_VCO_HI, /* 60 */
+ RFSYN_SEL_DIVM, /* 61 */
+ RFSYN_RF_DIV_BIAS, /* 62 */
+ DN_SEL_FREQ, /* 63 */
+ RFSYN_VCO_BIAS, /* 64 */
+ CHCAL_INT_MOD_RF, /* 65 */
+ CHCAL_FRAC_MOD_RF, /* 66 */
+ RFSYN_LPF_R, /* 67 */
+ CHCAL_EN_INT_RF, /* 68 */
+ TG_LO_DIVVAL, /* 69 */
+ TG_LO_SELVAL, /* 70 */
+ TG_DIV_VAL, /* 71 */
+ TG_VCO_BIAS, /* 72 */
+ SEQ_EXTPOWERUP, /* 73 */
+ OVERRIDE_2, /* 74 */
+ OVERRIDE_3, /* 75 */
+ OVERRIDE_4, /* 76 */
+ SEQ_FSM_PULSE, /* 77 */
+ GPIO_4B, /* 78 */
+ GPIO_3B, /* 79 */
+ GPIO_4, /* 80 */
+ GPIO_3, /* 81 */
+ GPIO_1B, /* 82 */
+ DAC_A_ENABLE, /* 83 */
+ DAC_B_ENABLE, /* 84 */
+ DAC_DIN_A, /* 85 */
+ DAC_DIN_B, /* 86 */
+#ifdef _MXL_PRODUCTION
+ RFSYN_EN_DIV, /* 87 */
+ RFSYN_DIVM, /* 88 */
+ DN_BYPASS_AGC_I2C /* 89 */
+#endif
+} MXL5005_ControlName;
+
+/*
+ * The following context is source code provided by MaxLinear.
+ * MaxLinear source code - Common_MXL.h (?)
+ */
+
+/* Constants */
+#define MXL5005S_REG_WRITING_TABLE_LEN_MAX 104
+#define MXL5005S_LATCH_BYTE 0xfe
+
+/* Register address, MSB, and LSB */
+#define MXL5005S_BB_IQSWAP_ADDR 59
+#define MXL5005S_BB_IQSWAP_MSB 0
+#define MXL5005S_BB_IQSWAP_LSB 0
+
+#define MXL5005S_BB_DLPF_BANDSEL_ADDR 53
+#define MXL5005S_BB_DLPF_BANDSEL_MSB 4
+#define MXL5005S_BB_DLPF_BANDSEL_LSB 3
+
+/* Standard modes */
+enum {
+ MXL5005S_STANDARD_DVBT,
+ MXL5005S_STANDARD_ATSC,
+};
+#define MXL5005S_STANDARD_MODE_NUM 2
+
+/* Bandwidth modes */
+enum {
+ MXL5005S_BANDWIDTH_6MHZ = 6000000,
+ MXL5005S_BANDWIDTH_7MHZ = 7000000,
+ MXL5005S_BANDWIDTH_8MHZ = 8000000,
+};
+#define MXL5005S_BANDWIDTH_MODE_NUM 3
+
+/* MXL5005 Tuner Control Struct */
+struct TunerControl {
+ u16 Ctrl_Num; /* Control Number */
+ u16 size; /* Number of bits to represent Value */
+ u16 addr[25]; /* Array of Tuner Register Address for each bit pos */
+ u16 bit[25]; /* Array of bit pos in Reg Addr for each bit pos */
+ u16 val[25]; /* Binary representation of Value */
+};
+
+/* MXL5005 Tuner Struct */
+struct mxl5005s_state {
+ u8 Mode; /* 0: Analog Mode ; 1: Digital Mode */
+ u8 IF_Mode; /* for Analog Mode, 0: zero IF; 1: low IF */
+ u32 Chan_Bandwidth; /* filter channel bandwidth (6, 7, 8) */
+ u32 IF_OUT; /* Desired IF Out Frequency */
+ u16 IF_OUT_LOAD; /* IF Out Load Resistor (200/300 Ohms) */
+ u32 RF_IN; /* RF Input Frequency */
+ u32 Fxtal; /* XTAL Frequency */
+ u8 AGC_Mode; /* AGC Mode 0: Dual AGC; 1: Single AGC */
+ u16 TOP; /* Value: take over point */
+ u8 CLOCK_OUT; /* 0: turn off clk out; 1: turn on clock out */
+ u8 DIV_OUT; /* 4MHz or 16MHz */
+ u8 CAPSELECT; /* 0: disable On-Chip pulling cap; 1: enable */
+ u8 EN_RSSI; /* 0: disable RSSI; 1: enable RSSI */
+
+ /* Modulation Type; */
+ /* 0 - Default; 1 - DVB-T; 2 - ATSC; 3 - QAM; 4 - Analog Cable */
+ u8 Mod_Type;
+
+ /* Tracking Filter Type */
+ /* 0 - Default; 1 - Off; 2 - Type C; 3 - Type C-H */
+ u8 TF_Type;
+
+ /* Calculated Settings */
+ u32 RF_LO; /* Synth RF LO Frequency */
+ u32 IF_LO; /* Synth IF LO Frequency */
+ u32 TG_LO; /* Synth TG_LO Frequency */
+
+ /* Pointers to ControlName Arrays */
+ u16 Init_Ctrl_Num; /* Number of INIT Control Names */
+ struct TunerControl
+ Init_Ctrl[INITCTRL_NUM]; /* INIT Control Names Array Pointer */
+
+ u16 CH_Ctrl_Num; /* Number of CH Control Names */
+ struct TunerControl
+ CH_Ctrl[CHCTRL_NUM]; /* CH Control Name Array Pointer */
+
+ u16 MXL_Ctrl_Num; /* Number of MXL Control Names */
+ struct TunerControl
+ MXL_Ctrl[MXLCTRL_NUM]; /* MXL Control Name Array Pointer */
+
+ /* Pointer to Tuner Register Array */
+ u16 TunerRegs_Num; /* Number of Tuner Registers */
+ struct TunerReg
+ TunerRegs[TUNER_REGS_NUM]; /* Tuner Register Array Pointer */
+
+ /* Linux driver framework specific */
+ struct mxl5005s_config *config;
+ struct dvb_frontend *frontend;
+ struct i2c_adapter *i2c;
+
+ /* Cache values */
+ u32 current_mode;
+
+};
+
+static u16 MXL_GetMasterControl(u8 *MasterReg, int state);
+static u16 MXL_ControlWrite(struct dvb_frontend *fe, u16 ControlNum, u32 value);
+static u16 MXL_ControlRead(struct dvb_frontend *fe, u16 controlNum, u32 *value);
+static void MXL_RegWriteBit(struct dvb_frontend *fe, u8 address, u8 bit,
+ u8 bitVal);
+static u16 MXL_GetCHRegister(struct dvb_frontend *fe, u8 *RegNum,
+ u8 *RegVal, int *count);
+static u32 MXL_Ceiling(u32 value, u32 resolution);
+static u16 MXL_RegRead(struct dvb_frontend *fe, u8 RegNum, u8 *RegVal);
+static u16 MXL_ControlWrite_Group(struct dvb_frontend *fe, u16 controlNum,
+ u32 value, u16 controlGroup);
+static u16 MXL_SetGPIO(struct dvb_frontend *fe, u8 GPIO_Num, u8 GPIO_Val);
+static u16 MXL_GetInitRegister(struct dvb_frontend *fe, u8 *RegNum,
+ u8 *RegVal, int *count);
+static u32 MXL_GetXtalInt(u32 Xtal_Freq);
+static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq);
+static void MXL_SynthIFLO_Calc(struct dvb_frontend *fe);
+static void MXL_SynthRFTGLO_Calc(struct dvb_frontend *fe);
+static u16 MXL_GetCHRegister_ZeroIF(struct dvb_frontend *fe, u8 *RegNum,
+ u8 *RegVal, int *count);
+static int mxl5005s_writeregs(struct dvb_frontend *fe, u8 *addrtable,
+ u8 *datatable, u8 len);
+static u16 MXL_IFSynthInit(struct dvb_frontend *fe);
+static int mxl5005s_AssignTunerMode(struct dvb_frontend *fe, u32 mod_type,
+ u32 bandwidth);
+static int mxl5005s_reconfigure(struct dvb_frontend *fe, u32 mod_type,
+ u32 bandwidth);
+
+/* ----------------------------------------------------------------
+ * Begin: Custom code salvaged from the Realtek driver.
+ * Copyright (C) 2008 Realtek
+ * Copyright (C) 2008 Jan Hoogenraad
+ * This code is placed under the terms of the GNU General Public License
+ *
+ * Released by Realtek under GPLv2.
+ * Thanks to Realtek for a lot of support we received !
+ *
+ * Revision: 080314 - original version
+ */
+
+static int mxl5005s_SetRfFreqHz(struct dvb_frontend *fe, unsigned long RfFreqHz)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ unsigned char AddrTable[MXL5005S_REG_WRITING_TABLE_LEN_MAX];
+ unsigned char ByteTable[MXL5005S_REG_WRITING_TABLE_LEN_MAX];
+ int TableLen;
+
+ u32 IfDivval = 0;
+ unsigned char MasterControlByte;
+
+ dprintk(1, "%s() freq=%ld\n", __func__, RfFreqHz);
+
+ /* Set MxL5005S tuner RF frequency according to example code. */
+
+ /* Tuner RF frequency setting stage 0 */
+ MXL_GetMasterControl(ByteTable, MC_SYNTH_RESET);
+ AddrTable[0] = MASTER_CONTROL_ADDR;
+ ByteTable[0] |= state->config->AgcMasterByte;
+
+ mxl5005s_writeregs(fe, AddrTable, ByteTable, 1);
+
+ /* Tuner RF frequency setting stage 1 */
+ MXL_TuneRF(fe, RfFreqHz);
+
+ MXL_ControlRead(fe, IF_DIVVAL, &IfDivval);
+
+ MXL_ControlWrite(fe, SEQ_FSM_PULSE, 0);
+ MXL_ControlWrite(fe, SEQ_EXTPOWERUP, 1);
+ MXL_ControlWrite(fe, IF_DIVVAL, 8);
+ MXL_GetCHRegister(fe, AddrTable, ByteTable, &TableLen);
+
+ MXL_GetMasterControl(&MasterControlByte, MC_LOAD_START);
+ AddrTable[TableLen] = MASTER_CONTROL_ADDR ;
+ ByteTable[TableLen] = MasterControlByte |
+ state->config->AgcMasterByte;
+ TableLen += 1;
+
+ mxl5005s_writeregs(fe, AddrTable, ByteTable, TableLen);
+
+ /* Wait 30 ms. */
+ msleep(150);
+
+ /* Tuner RF frequency setting stage 2 */
+ MXL_ControlWrite(fe, SEQ_FSM_PULSE, 1);
+ MXL_ControlWrite(fe, IF_DIVVAL, IfDivval);
+ MXL_GetCHRegister_ZeroIF(fe, AddrTable, ByteTable, &TableLen);
+
+ MXL_GetMasterControl(&MasterControlByte, MC_LOAD_START);
+ AddrTable[TableLen] = MASTER_CONTROL_ADDR ;
+ ByteTable[TableLen] = MasterControlByte |
+ state->config->AgcMasterByte ;
+ TableLen += 1;
+
+ mxl5005s_writeregs(fe, AddrTable, ByteTable, TableLen);
+
+ msleep(100);
+
+ return 0;
+}
+/* End: Custom code taken from the Realtek driver */
+
+/* ----------------------------------------------------------------
+ * Begin: Reference driver code found in the Realtek driver.
+ * Copyright (C) 2008 MaxLinear
+ */
+static u16 MXL5005_RegisterInit(struct dvb_frontend *fe)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ state->TunerRegs_Num = TUNER_REGS_NUM ;
+
+ state->TunerRegs[0].Reg_Num = 9 ;
+ state->TunerRegs[0].Reg_Val = 0x40 ;
+
+ state->TunerRegs[1].Reg_Num = 11 ;
+ state->TunerRegs[1].Reg_Val = 0x19 ;
+
+ state->TunerRegs[2].Reg_Num = 12 ;
+ state->TunerRegs[2].Reg_Val = 0x60 ;
+
+ state->TunerRegs[3].Reg_Num = 13 ;
+ state->TunerRegs[3].Reg_Val = 0x00 ;
+
+ state->TunerRegs[4].Reg_Num = 14 ;
+ state->TunerRegs[4].Reg_Val = 0x00 ;
+
+ state->TunerRegs[5].Reg_Num = 15 ;
+ state->TunerRegs[5].Reg_Val = 0xC0 ;
+
+ state->TunerRegs[6].Reg_Num = 16 ;
+ state->TunerRegs[6].Reg_Val = 0x00 ;
+
+ state->TunerRegs[7].Reg_Num = 17 ;
+ state->TunerRegs[7].Reg_Val = 0x00 ;
+
+ state->TunerRegs[8].Reg_Num = 18 ;
+ state->TunerRegs[8].Reg_Val = 0x00 ;
+
+ state->TunerRegs[9].Reg_Num = 19 ;
+ state->TunerRegs[9].Reg_Val = 0x34 ;
+
+ state->TunerRegs[10].Reg_Num = 21 ;
+ state->TunerRegs[10].Reg_Val = 0x00 ;
+
+ state->TunerRegs[11].Reg_Num = 22 ;
+ state->TunerRegs[11].Reg_Val = 0x6B ;
+
+ state->TunerRegs[12].Reg_Num = 23 ;
+ state->TunerRegs[12].Reg_Val = 0x35 ;
+
+ state->TunerRegs[13].Reg_Num = 24 ;
+ state->TunerRegs[13].Reg_Val = 0x70 ;
+
+ state->TunerRegs[14].Reg_Num = 25 ;
+ state->TunerRegs[14].Reg_Val = 0x3E ;
+
+ state->TunerRegs[15].Reg_Num = 26 ;
+ state->TunerRegs[15].Reg_Val = 0x82 ;
+
+ state->TunerRegs[16].Reg_Num = 31 ;
+ state->TunerRegs[16].Reg_Val = 0x00 ;
+
+ state->TunerRegs[17].Reg_Num = 32 ;
+ state->TunerRegs[17].Reg_Val = 0x40 ;
+
+ state->TunerRegs[18].Reg_Num = 33 ;
+ state->TunerRegs[18].Reg_Val = 0x53 ;
+
+ state->TunerRegs[19].Reg_Num = 34 ;
+ state->TunerRegs[19].Reg_Val = 0x81 ;
+
+ state->TunerRegs[20].Reg_Num = 35 ;
+ state->TunerRegs[20].Reg_Val = 0xC9 ;
+
+ state->TunerRegs[21].Reg_Num = 36 ;
+ state->TunerRegs[21].Reg_Val = 0x01 ;
+
+ state->TunerRegs[22].Reg_Num = 37 ;
+ state->TunerRegs[22].Reg_Val = 0x00 ;
+
+ state->TunerRegs[23].Reg_Num = 41 ;
+ state->TunerRegs[23].Reg_Val = 0x00 ;
+
+ state->TunerRegs[24].Reg_Num = 42 ;
+ state->TunerRegs[24].Reg_Val = 0xF8 ;
+
+ state->TunerRegs[25].Reg_Num = 43 ;
+ state->TunerRegs[25].Reg_Val = 0x43 ;
+
+ state->TunerRegs[26].Reg_Num = 44 ;
+ state->TunerRegs[26].Reg_Val = 0x20 ;
+
+ state->TunerRegs[27].Reg_Num = 45 ;
+ state->TunerRegs[27].Reg_Val = 0x80 ;
+
+ state->TunerRegs[28].Reg_Num = 46 ;
+ state->TunerRegs[28].Reg_Val = 0x88 ;
+
+ state->TunerRegs[29].Reg_Num = 47 ;
+ state->TunerRegs[29].Reg_Val = 0x86 ;
+
+ state->TunerRegs[30].Reg_Num = 48 ;
+ state->TunerRegs[30].Reg_Val = 0x00 ;
+
+ state->TunerRegs[31].Reg_Num = 49 ;
+ state->TunerRegs[31].Reg_Val = 0x00 ;
+
+ state->TunerRegs[32].Reg_Num = 53 ;
+ state->TunerRegs[32].Reg_Val = 0x94 ;
+
+ state->TunerRegs[33].Reg_Num = 54 ;
+ state->TunerRegs[33].Reg_Val = 0xFA ;
+
+ state->TunerRegs[34].Reg_Num = 55 ;
+ state->TunerRegs[34].Reg_Val = 0x92 ;
+
+ state->TunerRegs[35].Reg_Num = 56 ;
+ state->TunerRegs[35].Reg_Val = 0x80 ;
+
+ state->TunerRegs[36].Reg_Num = 57 ;
+ state->TunerRegs[36].Reg_Val = 0x41 ;
+
+ state->TunerRegs[37].Reg_Num = 58 ;
+ state->TunerRegs[37].Reg_Val = 0xDB ;
+
+ state->TunerRegs[38].Reg_Num = 59 ;
+ state->TunerRegs[38].Reg_Val = 0x00 ;
+
+ state->TunerRegs[39].Reg_Num = 60 ;
+ state->TunerRegs[39].Reg_Val = 0x00 ;
+
+ state->TunerRegs[40].Reg_Num = 61 ;
+ state->TunerRegs[40].Reg_Val = 0x00 ;
+
+ state->TunerRegs[41].Reg_Num = 62 ;
+ state->TunerRegs[41].Reg_Val = 0x00 ;
+
+ state->TunerRegs[42].Reg_Num = 65 ;
+ state->TunerRegs[42].Reg_Val = 0xF8 ;
+
+ state->TunerRegs[43].Reg_Num = 66 ;
+ state->TunerRegs[43].Reg_Val = 0xE4 ;
+
+ state->TunerRegs[44].Reg_Num = 67 ;
+ state->TunerRegs[44].Reg_Val = 0x90 ;
+
+ state->TunerRegs[45].Reg_Num = 68 ;
+ state->TunerRegs[45].Reg_Val = 0xC0 ;
+
+ state->TunerRegs[46].Reg_Num = 69 ;
+ state->TunerRegs[46].Reg_Val = 0x01 ;
+
+ state->TunerRegs[47].Reg_Num = 70 ;
+ state->TunerRegs[47].Reg_Val = 0x50 ;
+
+ state->TunerRegs[48].Reg_Num = 71 ;
+ state->TunerRegs[48].Reg_Val = 0x06 ;
+
+ state->TunerRegs[49].Reg_Num = 72 ;
+ state->TunerRegs[49].Reg_Val = 0x00 ;
+
+ state->TunerRegs[50].Reg_Num = 73 ;
+ state->TunerRegs[50].Reg_Val = 0x20 ;
+
+ state->TunerRegs[51].Reg_Num = 76 ;
+ state->TunerRegs[51].Reg_Val = 0xBB ;
+
+ state->TunerRegs[52].Reg_Num = 77 ;
+ state->TunerRegs[52].Reg_Val = 0x13 ;
+
+ state->TunerRegs[53].Reg_Num = 81 ;
+ state->TunerRegs[53].Reg_Val = 0x04 ;
+
+ state->TunerRegs[54].Reg_Num = 82 ;
+ state->TunerRegs[54].Reg_Val = 0x75 ;
+
+ state->TunerRegs[55].Reg_Num = 83 ;
+ state->TunerRegs[55].Reg_Val = 0x00 ;
+
+ state->TunerRegs[56].Reg_Num = 84 ;
+ state->TunerRegs[56].Reg_Val = 0x00 ;
+
+ state->TunerRegs[57].Reg_Num = 85 ;
+ state->TunerRegs[57].Reg_Val = 0x00 ;
+
+ state->TunerRegs[58].Reg_Num = 91 ;
+ state->TunerRegs[58].Reg_Val = 0x70 ;
+
+ state->TunerRegs[59].Reg_Num = 92 ;
+ state->TunerRegs[59].Reg_Val = 0x00 ;
+
+ state->TunerRegs[60].Reg_Num = 93 ;
+ state->TunerRegs[60].Reg_Val = 0x00 ;
+
+ state->TunerRegs[61].Reg_Num = 94 ;
+ state->TunerRegs[61].Reg_Val = 0x00 ;
+
+ state->TunerRegs[62].Reg_Num = 95 ;
+ state->TunerRegs[62].Reg_Val = 0x0C ;
+
+ state->TunerRegs[63].Reg_Num = 96 ;
+ state->TunerRegs[63].Reg_Val = 0x00 ;
+
+ state->TunerRegs[64].Reg_Num = 97 ;
+ state->TunerRegs[64].Reg_Val = 0x00 ;
+
+ state->TunerRegs[65].Reg_Num = 98 ;
+ state->TunerRegs[65].Reg_Val = 0xE2 ;
+
+ state->TunerRegs[66].Reg_Num = 99 ;
+ state->TunerRegs[66].Reg_Val = 0x00 ;
+
+ state->TunerRegs[67].Reg_Num = 100 ;
+ state->TunerRegs[67].Reg_Val = 0x00 ;
+
+ state->TunerRegs[68].Reg_Num = 101 ;
+ state->TunerRegs[68].Reg_Val = 0x12 ;
+
+ state->TunerRegs[69].Reg_Num = 102 ;
+ state->TunerRegs[69].Reg_Val = 0x80 ;
+
+ state->TunerRegs[70].Reg_Num = 103 ;
+ state->TunerRegs[70].Reg_Val = 0x32 ;
+
+ state->TunerRegs[71].Reg_Num = 104 ;
+ state->TunerRegs[71].Reg_Val = 0xB4 ;
+
+ state->TunerRegs[72].Reg_Num = 105 ;
+ state->TunerRegs[72].Reg_Val = 0x60 ;
+
+ state->TunerRegs[73].Reg_Num = 106 ;
+ state->TunerRegs[73].Reg_Val = 0x83 ;
+
+ state->TunerRegs[74].Reg_Num = 107 ;
+ state->TunerRegs[74].Reg_Val = 0x84 ;
+
+ state->TunerRegs[75].Reg_Num = 108 ;
+ state->TunerRegs[75].Reg_Val = 0x9C ;
+
+ state->TunerRegs[76].Reg_Num = 109 ;
+ state->TunerRegs[76].Reg_Val = 0x02 ;
+
+ state->TunerRegs[77].Reg_Num = 110 ;
+ state->TunerRegs[77].Reg_Val = 0x81 ;
+
+ state->TunerRegs[78].Reg_Num = 111 ;
+ state->TunerRegs[78].Reg_Val = 0xC0 ;
+
+ state->TunerRegs[79].Reg_Num = 112 ;
+ state->TunerRegs[79].Reg_Val = 0x10 ;
+
+ state->TunerRegs[80].Reg_Num = 131 ;
+ state->TunerRegs[80].Reg_Val = 0x8A ;
+
+ state->TunerRegs[81].Reg_Num = 132 ;
+ state->TunerRegs[81].Reg_Val = 0x10 ;
+
+ state->TunerRegs[82].Reg_Num = 133 ;
+ state->TunerRegs[82].Reg_Val = 0x24 ;
+
+ state->TunerRegs[83].Reg_Num = 134 ;
+ state->TunerRegs[83].Reg_Val = 0x00 ;
+
+ state->TunerRegs[84].Reg_Num = 135 ;
+ state->TunerRegs[84].Reg_Val = 0x00 ;
+
+ state->TunerRegs[85].Reg_Num = 136 ;
+ state->TunerRegs[85].Reg_Val = 0x7E ;
+
+ state->TunerRegs[86].Reg_Num = 137 ;
+ state->TunerRegs[86].Reg_Val = 0x40 ;
+
+ state->TunerRegs[87].Reg_Num = 138 ;
+ state->TunerRegs[87].Reg_Val = 0x38 ;
+
+ state->TunerRegs[88].Reg_Num = 146 ;
+ state->TunerRegs[88].Reg_Val = 0xF6 ;
+
+ state->TunerRegs[89].Reg_Num = 147 ;
+ state->TunerRegs[89].Reg_Val = 0x1A ;
+
+ state->TunerRegs[90].Reg_Num = 148 ;
+ state->TunerRegs[90].Reg_Val = 0x62 ;
+
+ state->TunerRegs[91].Reg_Num = 149 ;
+ state->TunerRegs[91].Reg_Val = 0x33 ;
+
+ state->TunerRegs[92].Reg_Num = 150 ;
+ state->TunerRegs[92].Reg_Val = 0x80 ;
+
+ state->TunerRegs[93].Reg_Num = 156 ;
+ state->TunerRegs[93].Reg_Val = 0x56 ;
+
+ state->TunerRegs[94].Reg_Num = 157 ;
+ state->TunerRegs[94].Reg_Val = 0x17 ;
+
+ state->TunerRegs[95].Reg_Num = 158 ;
+ state->TunerRegs[95].Reg_Val = 0xA9 ;
+
+ state->TunerRegs[96].Reg_Num = 159 ;
+ state->TunerRegs[96].Reg_Val = 0x00 ;
+
+ state->TunerRegs[97].Reg_Num = 160 ;
+ state->TunerRegs[97].Reg_Val = 0x00 ;
+
+ state->TunerRegs[98].Reg_Num = 161 ;
+ state->TunerRegs[98].Reg_Val = 0x00 ;
+
+ state->TunerRegs[99].Reg_Num = 162 ;
+ state->TunerRegs[99].Reg_Val = 0x40 ;
+
+ state->TunerRegs[100].Reg_Num = 166 ;
+ state->TunerRegs[100].Reg_Val = 0xAE ;
+
+ state->TunerRegs[101].Reg_Num = 167 ;
+ state->TunerRegs[101].Reg_Val = 0x1B ;
+
+ state->TunerRegs[102].Reg_Num = 168 ;
+ state->TunerRegs[102].Reg_Val = 0xF2 ;
+
+ state->TunerRegs[103].Reg_Num = 195 ;
+ state->TunerRegs[103].Reg_Val = 0x00 ;
+
+ return 0 ;
+}
+
+static u16 MXL5005_ControlInit(struct dvb_frontend *fe)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ state->Init_Ctrl_Num = INITCTRL_NUM;
+
+ state->Init_Ctrl[0].Ctrl_Num = DN_IQTN_AMP_CUT ;
+ state->Init_Ctrl[0].size = 1 ;
+ state->Init_Ctrl[0].addr[0] = 73;
+ state->Init_Ctrl[0].bit[0] = 7;
+ state->Init_Ctrl[0].val[0] = 0;
+
+ state->Init_Ctrl[1].Ctrl_Num = BB_MODE ;
+ state->Init_Ctrl[1].size = 1 ;
+ state->Init_Ctrl[1].addr[0] = 53;
+ state->Init_Ctrl[1].bit[0] = 2;
+ state->Init_Ctrl[1].val[0] = 1;
+
+ state->Init_Ctrl[2].Ctrl_Num = BB_BUF ;
+ state->Init_Ctrl[2].size = 2 ;
+ state->Init_Ctrl[2].addr[0] = 53;
+ state->Init_Ctrl[2].bit[0] = 1;
+ state->Init_Ctrl[2].val[0] = 0;
+ state->Init_Ctrl[2].addr[1] = 57;
+ state->Init_Ctrl[2].bit[1] = 0;
+ state->Init_Ctrl[2].val[1] = 1;
+
+ state->Init_Ctrl[3].Ctrl_Num = BB_BUF_OA ;
+ state->Init_Ctrl[3].size = 1 ;
+ state->Init_Ctrl[3].addr[0] = 53;
+ state->Init_Ctrl[3].bit[0] = 0;
+ state->Init_Ctrl[3].val[0] = 0;
+
+ state->Init_Ctrl[4].Ctrl_Num = BB_ALPF_BANDSELECT ;
+ state->Init_Ctrl[4].size = 3 ;
+ state->Init_Ctrl[4].addr[0] = 53;
+ state->Init_Ctrl[4].bit[0] = 5;
+ state->Init_Ctrl[4].val[0] = 0;
+ state->Init_Ctrl[4].addr[1] = 53;
+ state->Init_Ctrl[4].bit[1] = 6;
+ state->Init_Ctrl[4].val[1] = 0;
+ state->Init_Ctrl[4].addr[2] = 53;
+ state->Init_Ctrl[4].bit[2] = 7;
+ state->Init_Ctrl[4].val[2] = 1;
+
+ state->Init_Ctrl[5].Ctrl_Num = BB_IQSWAP ;
+ state->Init_Ctrl[5].size = 1 ;
+ state->Init_Ctrl[5].addr[0] = 59;
+ state->Init_Ctrl[5].bit[0] = 0;
+ state->Init_Ctrl[5].val[0] = 0;
+
+ state->Init_Ctrl[6].Ctrl_Num = BB_DLPF_BANDSEL ;
+ state->Init_Ctrl[6].size = 2 ;
+ state->Init_Ctrl[6].addr[0] = 53;
+ state->Init_Ctrl[6].bit[0] = 3;
+ state->Init_Ctrl[6].val[0] = 0;
+ state->Init_Ctrl[6].addr[1] = 53;
+ state->Init_Ctrl[6].bit[1] = 4;
+ state->Init_Ctrl[6].val[1] = 1;
+
+ state->Init_Ctrl[7].Ctrl_Num = RFSYN_CHP_GAIN ;
+ state->Init_Ctrl[7].size = 4 ;
+ state->Init_Ctrl[7].addr[0] = 22;
+ state->Init_Ctrl[7].bit[0] = 4;
+ state->Init_Ctrl[7].val[0] = 0;
+ state->Init_Ctrl[7].addr[1] = 22;
+ state->Init_Ctrl[7].bit[1] = 5;
+ state->Init_Ctrl[7].val[1] = 1;
+ state->Init_Ctrl[7].addr[2] = 22;
+ state->Init_Ctrl[7].bit[2] = 6;
+ state->Init_Ctrl[7].val[2] = 1;
+ state->Init_Ctrl[7].addr[3] = 22;
+ state->Init_Ctrl[7].bit[3] = 7;
+ state->Init_Ctrl[7].val[3] = 0;
+
+ state->Init_Ctrl[8].Ctrl_Num = RFSYN_EN_CHP_HIGAIN ;
+ state->Init_Ctrl[8].size = 1 ;
+ state->Init_Ctrl[8].addr[0] = 22;
+ state->Init_Ctrl[8].bit[0] = 2;
+ state->Init_Ctrl[8].val[0] = 0;
+
+ state->Init_Ctrl[9].Ctrl_Num = AGC_IF ;
+ state->Init_Ctrl[9].size = 4 ;
+ state->Init_Ctrl[9].addr[0] = 76;
+ state->Init_Ctrl[9].bit[0] = 0;
+ state->Init_Ctrl[9].val[0] = 1;
+ state->Init_Ctrl[9].addr[1] = 76;
+ state->Init_Ctrl[9].bit[1] = 1;
+ state->Init_Ctrl[9].val[1] = 1;
+ state->Init_Ctrl[9].addr[2] = 76;
+ state->Init_Ctrl[9].bit[2] = 2;
+ state->Init_Ctrl[9].val[2] = 0;
+ state->Init_Ctrl[9].addr[3] = 76;
+ state->Init_Ctrl[9].bit[3] = 3;
+ state->Init_Ctrl[9].val[3] = 1;
+
+ state->Init_Ctrl[10].Ctrl_Num = AGC_RF ;
+ state->Init_Ctrl[10].size = 4 ;
+ state->Init_Ctrl[10].addr[0] = 76;
+ state->Init_Ctrl[10].bit[0] = 4;
+ state->Init_Ctrl[10].val[0] = 1;
+ state->Init_Ctrl[10].addr[1] = 76;
+ state->Init_Ctrl[10].bit[1] = 5;
+ state->Init_Ctrl[10].val[1] = 1;
+ state->Init_Ctrl[10].addr[2] = 76;
+ state->Init_Ctrl[10].bit[2] = 6;
+ state->Init_Ctrl[10].val[2] = 0;
+ state->Init_Ctrl[10].addr[3] = 76;
+ state->Init_Ctrl[10].bit[3] = 7;
+ state->Init_Ctrl[10].val[3] = 1;
+
+ state->Init_Ctrl[11].Ctrl_Num = IF_DIVVAL ;
+ state->Init_Ctrl[11].size = 5 ;
+ state->Init_Ctrl[11].addr[0] = 43;
+ state->Init_Ctrl[11].bit[0] = 3;
+ state->Init_Ctrl[11].val[0] = 0;
+ state->Init_Ctrl[11].addr[1] = 43;
+ state->Init_Ctrl[11].bit[1] = 4;
+ state->Init_Ctrl[11].val[1] = 0;
+ state->Init_Ctrl[11].addr[2] = 43;
+ state->Init_Ctrl[11].bit[2] = 5;
+ state->Init_Ctrl[11].val[2] = 0;
+ state->Init_Ctrl[11].addr[3] = 43;
+ state->Init_Ctrl[11].bit[3] = 6;
+ state->Init_Ctrl[11].val[3] = 1;
+ state->Init_Ctrl[11].addr[4] = 43;
+ state->Init_Ctrl[11].bit[4] = 7;
+ state->Init_Ctrl[11].val[4] = 0;
+
+ state->Init_Ctrl[12].Ctrl_Num = IF_VCO_BIAS ;
+ state->Init_Ctrl[12].size = 6 ;
+ state->Init_Ctrl[12].addr[0] = 44;
+ state->Init_Ctrl[12].bit[0] = 2;
+ state->Init_Ctrl[12].val[0] = 0;
+ state->Init_Ctrl[12].addr[1] = 44;
+ state->Init_Ctrl[12].bit[1] = 3;
+ state->Init_Ctrl[12].val[1] = 0;
+ state->Init_Ctrl[12].addr[2] = 44;
+ state->Init_Ctrl[12].bit[2] = 4;
+ state->Init_Ctrl[12].val[2] = 0;
+ state->Init_Ctrl[12].addr[3] = 44;
+ state->Init_Ctrl[12].bit[3] = 5;
+ state->Init_Ctrl[12].val[3] = 1;
+ state->Init_Ctrl[12].addr[4] = 44;
+ state->Init_Ctrl[12].bit[4] = 6;
+ state->Init_Ctrl[12].val[4] = 0;
+ state->Init_Ctrl[12].addr[5] = 44;
+ state->Init_Ctrl[12].bit[5] = 7;
+ state->Init_Ctrl[12].val[5] = 0;
+
+ state->Init_Ctrl[13].Ctrl_Num = CHCAL_INT_MOD_IF ;
+ state->Init_Ctrl[13].size = 7 ;
+ state->Init_Ctrl[13].addr[0] = 11;
+ state->Init_Ctrl[13].bit[0] = 0;
+ state->Init_Ctrl[13].val[0] = 1;
+ state->Init_Ctrl[13].addr[1] = 11;
+ state->Init_Ctrl[13].bit[1] = 1;
+ state->Init_Ctrl[13].val[1] = 0;
+ state->Init_Ctrl[13].addr[2] = 11;
+ state->Init_Ctrl[13].bit[2] = 2;
+ state->Init_Ctrl[13].val[2] = 0;
+ state->Init_Ctrl[13].addr[3] = 11;
+ state->Init_Ctrl[13].bit[3] = 3;
+ state->Init_Ctrl[13].val[3] = 1;
+ state->Init_Ctrl[13].addr[4] = 11;
+ state->Init_Ctrl[13].bit[4] = 4;
+ state->Init_Ctrl[13].val[4] = 1;
+ state->Init_Ctrl[13].addr[5] = 11;
+ state->Init_Ctrl[13].bit[5] = 5;
+ state->Init_Ctrl[13].val[5] = 0;
+ state->Init_Ctrl[13].addr[6] = 11;
+ state->Init_Ctrl[13].bit[6] = 6;
+ state->Init_Ctrl[13].val[6] = 0;
+
+ state->Init_Ctrl[14].Ctrl_Num = CHCAL_FRAC_MOD_IF ;
+ state->Init_Ctrl[14].size = 16 ;
+ state->Init_Ctrl[14].addr[0] = 13;
+ state->Init_Ctrl[14].bit[0] = 0;
+ state->Init_Ctrl[14].val[0] = 0;
+ state->Init_Ctrl[14].addr[1] = 13;
+ state->Init_Ctrl[14].bit[1] = 1;
+ state->Init_Ctrl[14].val[1] = 0;
+ state->Init_Ctrl[14].addr[2] = 13;
+ state->Init_Ctrl[14].bit[2] = 2;
+ state->Init_Ctrl[14].val[2] = 0;
+ state->Init_Ctrl[14].addr[3] = 13;
+ state->Init_Ctrl[14].bit[3] = 3;
+ state->Init_Ctrl[14].val[3] = 0;
+ state->Init_Ctrl[14].addr[4] = 13;
+ state->Init_Ctrl[14].bit[4] = 4;
+ state->Init_Ctrl[14].val[4] = 0;
+ state->Init_Ctrl[14].addr[5] = 13;
+ state->Init_Ctrl[14].bit[5] = 5;
+ state->Init_Ctrl[14].val[5] = 0;
+ state->Init_Ctrl[14].addr[6] = 13;
+ state->Init_Ctrl[14].bit[6] = 6;
+ state->Init_Ctrl[14].val[6] = 0;
+ state->Init_Ctrl[14].addr[7] = 13;
+ state->Init_Ctrl[14].bit[7] = 7;
+ state->Init_Ctrl[14].val[7] = 0;
+ state->Init_Ctrl[14].addr[8] = 12;
+ state->Init_Ctrl[14].bit[8] = 0;
+ state->Init_Ctrl[14].val[8] = 0;
+ state->Init_Ctrl[14].addr[9] = 12;
+ state->Init_Ctrl[14].bit[9] = 1;
+ state->Init_Ctrl[14].val[9] = 0;
+ state->Init_Ctrl[14].addr[10] = 12;
+ state->Init_Ctrl[14].bit[10] = 2;
+ state->Init_Ctrl[14].val[10] = 0;
+ state->Init_Ctrl[14].addr[11] = 12;
+ state->Init_Ctrl[14].bit[11] = 3;
+ state->Init_Ctrl[14].val[11] = 0;
+ state->Init_Ctrl[14].addr[12] = 12;
+ state->Init_Ctrl[14].bit[12] = 4;
+ state->Init_Ctrl[14].val[12] = 0;
+ state->Init_Ctrl[14].addr[13] = 12;
+ state->Init_Ctrl[14].bit[13] = 5;
+ state->Init_Ctrl[14].val[13] = 1;
+ state->Init_Ctrl[14].addr[14] = 12;
+ state->Init_Ctrl[14].bit[14] = 6;
+ state->Init_Ctrl[14].val[14] = 1;
+ state->Init_Ctrl[14].addr[15] = 12;
+ state->Init_Ctrl[14].bit[15] = 7;
+ state->Init_Ctrl[14].val[15] = 0;
+
+ state->Init_Ctrl[15].Ctrl_Num = DRV_RES_SEL ;
+ state->Init_Ctrl[15].size = 3 ;
+ state->Init_Ctrl[15].addr[0] = 147;
+ state->Init_Ctrl[15].bit[0] = 2;
+ state->Init_Ctrl[15].val[0] = 0;
+ state->Init_Ctrl[15].addr[1] = 147;
+ state->Init_Ctrl[15].bit[1] = 3;
+ state->Init_Ctrl[15].val[1] = 1;
+ state->Init_Ctrl[15].addr[2] = 147;
+ state->Init_Ctrl[15].bit[2] = 4;
+ state->Init_Ctrl[15].val[2] = 1;
+
+ state->Init_Ctrl[16].Ctrl_Num = I_DRIVER ;
+ state->Init_Ctrl[16].size = 2 ;
+ state->Init_Ctrl[16].addr[0] = 147;
+ state->Init_Ctrl[16].bit[0] = 0;
+ state->Init_Ctrl[16].val[0] = 0;
+ state->Init_Ctrl[16].addr[1] = 147;
+ state->Init_Ctrl[16].bit[1] = 1;
+ state->Init_Ctrl[16].val[1] = 1;
+
+ state->Init_Ctrl[17].Ctrl_Num = EN_AAF ;
+ state->Init_Ctrl[17].size = 1 ;
+ state->Init_Ctrl[17].addr[0] = 147;
+ state->Init_Ctrl[17].bit[0] = 7;
+ state->Init_Ctrl[17].val[0] = 0;
+
+ state->Init_Ctrl[18].Ctrl_Num = EN_3P ;
+ state->Init_Ctrl[18].size = 1 ;
+ state->Init_Ctrl[18].addr[0] = 147;
+ state->Init_Ctrl[18].bit[0] = 6;
+ state->Init_Ctrl[18].val[0] = 0;
+
+ state->Init_Ctrl[19].Ctrl_Num = EN_AUX_3P ;
+ state->Init_Ctrl[19].size = 1 ;
+ state->Init_Ctrl[19].addr[0] = 156;
+ state->Init_Ctrl[19].bit[0] = 0;
+ state->Init_Ctrl[19].val[0] = 0;
+
+ state->Init_Ctrl[20].Ctrl_Num = SEL_AAF_BAND ;
+ state->Init_Ctrl[20].size = 1 ;
+ state->Init_Ctrl[20].addr[0] = 147;
+ state->Init_Ctrl[20].bit[0] = 5;
+ state->Init_Ctrl[20].val[0] = 0;
+
+ state->Init_Ctrl[21].Ctrl_Num = SEQ_ENCLK16_CLK_OUT ;
+ state->Init_Ctrl[21].size = 1 ;
+ state->Init_Ctrl[21].addr[0] = 137;
+ state->Init_Ctrl[21].bit[0] = 4;
+ state->Init_Ctrl[21].val[0] = 0;
+
+ state->Init_Ctrl[22].Ctrl_Num = SEQ_SEL4_16B ;
+ state->Init_Ctrl[22].size = 1 ;
+ state->Init_Ctrl[22].addr[0] = 137;
+ state->Init_Ctrl[22].bit[0] = 7;
+ state->Init_Ctrl[22].val[0] = 0;
+
+ state->Init_Ctrl[23].Ctrl_Num = XTAL_CAPSELECT ;
+ state->Init_Ctrl[23].size = 1 ;
+ state->Init_Ctrl[23].addr[0] = 91;
+ state->Init_Ctrl[23].bit[0] = 5;
+ state->Init_Ctrl[23].val[0] = 1;
+
+ state->Init_Ctrl[24].Ctrl_Num = IF_SEL_DBL ;
+ state->Init_Ctrl[24].size = 1 ;
+ state->Init_Ctrl[24].addr[0] = 43;
+ state->Init_Ctrl[24].bit[0] = 0;
+ state->Init_Ctrl[24].val[0] = 1;
+
+ state->Init_Ctrl[25].Ctrl_Num = RFSYN_R_DIV ;
+ state->Init_Ctrl[25].size = 2 ;
+ state->Init_Ctrl[25].addr[0] = 22;
+ state->Init_Ctrl[25].bit[0] = 0;
+ state->Init_Ctrl[25].val[0] = 1;
+ state->Init_Ctrl[25].addr[1] = 22;
+ state->Init_Ctrl[25].bit[1] = 1;
+ state->Init_Ctrl[25].val[1] = 1;
+
+ state->Init_Ctrl[26].Ctrl_Num = SEQ_EXTSYNTHCALIF ;
+ state->Init_Ctrl[26].size = 1 ;
+ state->Init_Ctrl[26].addr[0] = 134;
+ state->Init_Ctrl[26].bit[0] = 2;
+ state->Init_Ctrl[26].val[0] = 0;
+
+ state->Init_Ctrl[27].Ctrl_Num = SEQ_EXTDCCAL ;
+ state->Init_Ctrl[27].size = 1 ;
+ state->Init_Ctrl[27].addr[0] = 137;
+ state->Init_Ctrl[27].bit[0] = 3;
+ state->Init_Ctrl[27].val[0] = 0;
+
+ state->Init_Ctrl[28].Ctrl_Num = AGC_EN_RSSI ;
+ state->Init_Ctrl[28].size = 1 ;
+ state->Init_Ctrl[28].addr[0] = 77;
+ state->Init_Ctrl[28].bit[0] = 7;
+ state->Init_Ctrl[28].val[0] = 0;
+
+ state->Init_Ctrl[29].Ctrl_Num = RFA_ENCLKRFAGC ;
+ state->Init_Ctrl[29].size = 1 ;
+ state->Init_Ctrl[29].addr[0] = 166;
+ state->Init_Ctrl[29].bit[0] = 7;
+ state->Init_Ctrl[29].val[0] = 1;
+
+ state->Init_Ctrl[30].Ctrl_Num = RFA_RSSI_REFH ;
+ state->Init_Ctrl[30].size = 3 ;
+ state->Init_Ctrl[30].addr[0] = 166;
+ state->Init_Ctrl[30].bit[0] = 0;
+ state->Init_Ctrl[30].val[0] = 0;
+ state->Init_Ctrl[30].addr[1] = 166;
+ state->Init_Ctrl[30].bit[1] = 1;
+ state->Init_Ctrl[30].val[1] = 1;
+ state->Init_Ctrl[30].addr[2] = 166;
+ state->Init_Ctrl[30].bit[2] = 2;
+ state->Init_Ctrl[30].val[2] = 1;
+
+ state->Init_Ctrl[31].Ctrl_Num = RFA_RSSI_REF ;
+ state->Init_Ctrl[31].size = 3 ;
+ state->Init_Ctrl[31].addr[0] = 166;
+ state->Init_Ctrl[31].bit[0] = 3;
+ state->Init_Ctrl[31].val[0] = 1;
+ state->Init_Ctrl[31].addr[1] = 166;
+ state->Init_Ctrl[31].bit[1] = 4;
+ state->Init_Ctrl[31].val[1] = 0;
+ state->Init_Ctrl[31].addr[2] = 166;
+ state->Init_Ctrl[31].bit[2] = 5;
+ state->Init_Ctrl[31].val[2] = 1;
+
+ state->Init_Ctrl[32].Ctrl_Num = RFA_RSSI_REFL ;
+ state->Init_Ctrl[32].size = 3 ;
+ state->Init_Ctrl[32].addr[0] = 167;
+ state->Init_Ctrl[32].bit[0] = 0;
+ state->Init_Ctrl[32].val[0] = 1;
+ state->Init_Ctrl[32].addr[1] = 167;
+ state->Init_Ctrl[32].bit[1] = 1;
+ state->Init_Ctrl[32].val[1] = 1;
+ state->Init_Ctrl[32].addr[2] = 167;
+ state->Init_Ctrl[32].bit[2] = 2;
+ state->Init_Ctrl[32].val[2] = 0;
+
+ state->Init_Ctrl[33].Ctrl_Num = RFA_FLR ;
+ state->Init_Ctrl[33].size = 4 ;
+ state->Init_Ctrl[33].addr[0] = 168;
+ state->Init_Ctrl[33].bit[0] = 0;
+ state->Init_Ctrl[33].val[0] = 0;
+ state->Init_Ctrl[33].addr[1] = 168;
+ state->Init_Ctrl[33].bit[1] = 1;
+ state->Init_Ctrl[33].val[1] = 1;
+ state->Init_Ctrl[33].addr[2] = 168;
+ state->Init_Ctrl[33].bit[2] = 2;
+ state->Init_Ctrl[33].val[2] = 0;
+ state->Init_Ctrl[33].addr[3] = 168;
+ state->Init_Ctrl[33].bit[3] = 3;
+ state->Init_Ctrl[33].val[3] = 0;
+
+ state->Init_Ctrl[34].Ctrl_Num = RFA_CEIL ;
+ state->Init_Ctrl[34].size = 4 ;
+ state->Init_Ctrl[34].addr[0] = 168;
+ state->Init_Ctrl[34].bit[0] = 4;
+ state->Init_Ctrl[34].val[0] = 1;
+ state->Init_Ctrl[34].addr[1] = 168;
+ state->Init_Ctrl[34].bit[1] = 5;
+ state->Init_Ctrl[34].val[1] = 1;
+ state->Init_Ctrl[34].addr[2] = 168;
+ state->Init_Ctrl[34].bit[2] = 6;
+ state->Init_Ctrl[34].val[2] = 1;
+ state->Init_Ctrl[34].addr[3] = 168;
+ state->Init_Ctrl[34].bit[3] = 7;
+ state->Init_Ctrl[34].val[3] = 1;
+
+ state->Init_Ctrl[35].Ctrl_Num = SEQ_EXTIQFSMPULSE ;
+ state->Init_Ctrl[35].size = 1 ;
+ state->Init_Ctrl[35].addr[0] = 135;
+ state->Init_Ctrl[35].bit[0] = 0;
+ state->Init_Ctrl[35].val[0] = 0;
+
+ state->Init_Ctrl[36].Ctrl_Num = OVERRIDE_1 ;
+ state->Init_Ctrl[36].size = 1 ;
+ state->Init_Ctrl[36].addr[0] = 56;
+ state->Init_Ctrl[36].bit[0] = 3;
+ state->Init_Ctrl[36].val[0] = 0;
+
+ state->Init_Ctrl[37].Ctrl_Num = BB_INITSTATE_DLPF_TUNE ;
+ state->Init_Ctrl[37].size = 7 ;
+ state->Init_Ctrl[37].addr[0] = 59;
+ state->Init_Ctrl[37].bit[0] = 1;
+ state->Init_Ctrl[37].val[0] = 0;
+ state->Init_Ctrl[37].addr[1] = 59;
+ state->Init_Ctrl[37].bit[1] = 2;
+ state->Init_Ctrl[37].val[1] = 0;
+ state->Init_Ctrl[37].addr[2] = 59;
+ state->Init_Ctrl[37].bit[2] = 3;
+ state->Init_Ctrl[37].val[2] = 0;
+ state->Init_Ctrl[37].addr[3] = 59;
+ state->Init_Ctrl[37].bit[3] = 4;
+ state->Init_Ctrl[37].val[3] = 0;
+ state->Init_Ctrl[37].addr[4] = 59;
+ state->Init_Ctrl[37].bit[4] = 5;
+ state->Init_Ctrl[37].val[4] = 0;
+ state->Init_Ctrl[37].addr[5] = 59;
+ state->Init_Ctrl[37].bit[5] = 6;
+ state->Init_Ctrl[37].val[5] = 0;
+ state->Init_Ctrl[37].addr[6] = 59;
+ state->Init_Ctrl[37].bit[6] = 7;
+ state->Init_Ctrl[37].val[6] = 0;
+
+ state->Init_Ctrl[38].Ctrl_Num = TG_R_DIV ;
+ state->Init_Ctrl[38].size = 6 ;
+ state->Init_Ctrl[38].addr[0] = 32;
+ state->Init_Ctrl[38].bit[0] = 2;
+ state->Init_Ctrl[38].val[0] = 0;
+ state->Init_Ctrl[38].addr[1] = 32;
+ state->Init_Ctrl[38].bit[1] = 3;
+ state->Init_Ctrl[38].val[1] = 0;
+ state->Init_Ctrl[38].addr[2] = 32;
+ state->Init_Ctrl[38].bit[2] = 4;
+ state->Init_Ctrl[38].val[2] = 0;
+ state->Init_Ctrl[38].addr[3] = 32;
+ state->Init_Ctrl[38].bit[3] = 5;
+ state->Init_Ctrl[38].val[3] = 0;
+ state->Init_Ctrl[38].addr[4] = 32;
+ state->Init_Ctrl[38].bit[4] = 6;
+ state->Init_Ctrl[38].val[4] = 1;
+ state->Init_Ctrl[38].addr[5] = 32;
+ state->Init_Ctrl[38].bit[5] = 7;
+ state->Init_Ctrl[38].val[5] = 0;
+
+ state->Init_Ctrl[39].Ctrl_Num = EN_CHP_LIN_B ;
+ state->Init_Ctrl[39].size = 1 ;
+ state->Init_Ctrl[39].addr[0] = 25;
+ state->Init_Ctrl[39].bit[0] = 3;
+ state->Init_Ctrl[39].val[0] = 1;
+
+
+ state->CH_Ctrl_Num = CHCTRL_NUM ;
+
+ state->CH_Ctrl[0].Ctrl_Num = DN_POLY ;
+ state->CH_Ctrl[0].size = 2 ;
+ state->CH_Ctrl[0].addr[0] = 68;
+ state->CH_Ctrl[0].bit[0] = 6;
+ state->CH_Ctrl[0].val[0] = 1;
+ state->CH_Ctrl[0].addr[1] = 68;
+ state->CH_Ctrl[0].bit[1] = 7;
+ state->CH_Ctrl[0].val[1] = 1;
+
+ state->CH_Ctrl[1].Ctrl_Num = DN_RFGAIN ;
+ state->CH_Ctrl[1].size = 2 ;
+ state->CH_Ctrl[1].addr[0] = 70;
+ state->CH_Ctrl[1].bit[0] = 6;
+ state->CH_Ctrl[1].val[0] = 1;
+ state->CH_Ctrl[1].addr[1] = 70;
+ state->CH_Ctrl[1].bit[1] = 7;
+ state->CH_Ctrl[1].val[1] = 0;
+
+ state->CH_Ctrl[2].Ctrl_Num = DN_CAP_RFLPF ;
+ state->CH_Ctrl[2].size = 9 ;
+ state->CH_Ctrl[2].addr[0] = 69;
+ state->CH_Ctrl[2].bit[0] = 5;
+ state->CH_Ctrl[2].val[0] = 0;
+ state->CH_Ctrl[2].addr[1] = 69;
+ state->CH_Ctrl[2].bit[1] = 6;
+ state->CH_Ctrl[2].val[1] = 0;
+ state->CH_Ctrl[2].addr[2] = 69;
+ state->CH_Ctrl[2].bit[2] = 7;
+ state->CH_Ctrl[2].val[2] = 0;
+ state->CH_Ctrl[2].addr[3] = 68;
+ state->CH_Ctrl[2].bit[3] = 0;
+ state->CH_Ctrl[2].val[3] = 0;
+ state->CH_Ctrl[2].addr[4] = 68;
+ state->CH_Ctrl[2].bit[4] = 1;
+ state->CH_Ctrl[2].val[4] = 0;
+ state->CH_Ctrl[2].addr[5] = 68;
+ state->CH_Ctrl[2].bit[5] = 2;
+ state->CH_Ctrl[2].val[5] = 0;
+ state->CH_Ctrl[2].addr[6] = 68;
+ state->CH_Ctrl[2].bit[6] = 3;
+ state->CH_Ctrl[2].val[6] = 0;
+ state->CH_Ctrl[2].addr[7] = 68;
+ state->CH_Ctrl[2].bit[7] = 4;
+ state->CH_Ctrl[2].val[7] = 0;
+ state->CH_Ctrl[2].addr[8] = 68;
+ state->CH_Ctrl[2].bit[8] = 5;
+ state->CH_Ctrl[2].val[8] = 0;
+
+ state->CH_Ctrl[3].Ctrl_Num = DN_EN_VHFUHFBAR ;
+ state->CH_Ctrl[3].size = 1 ;
+ state->CH_Ctrl[3].addr[0] = 70;
+ state->CH_Ctrl[3].bit[0] = 5;
+ state->CH_Ctrl[3].val[0] = 0;
+
+ state->CH_Ctrl[4].Ctrl_Num = DN_GAIN_ADJUST ;
+ state->CH_Ctrl[4].size = 3 ;
+ state->CH_Ctrl[4].addr[0] = 73;
+ state->CH_Ctrl[4].bit[0] = 4;
+ state->CH_Ctrl[4].val[0] = 0;
+ state->CH_Ctrl[4].addr[1] = 73;
+ state->CH_Ctrl[4].bit[1] = 5;
+ state->CH_Ctrl[4].val[1] = 1;
+ state->CH_Ctrl[4].addr[2] = 73;
+ state->CH_Ctrl[4].bit[2] = 6;
+ state->CH_Ctrl[4].val[2] = 0;
+
+ state->CH_Ctrl[5].Ctrl_Num = DN_IQTNBUF_AMP ;
+ state->CH_Ctrl[5].size = 4 ;
+ state->CH_Ctrl[5].addr[0] = 70;
+ state->CH_Ctrl[5].bit[0] = 0;
+ state->CH_Ctrl[5].val[0] = 0;
+ state->CH_Ctrl[5].addr[1] = 70;
+ state->CH_Ctrl[5].bit[1] = 1;
+ state->CH_Ctrl[5].val[1] = 0;
+ state->CH_Ctrl[5].addr[2] = 70;
+ state->CH_Ctrl[5].bit[2] = 2;
+ state->CH_Ctrl[5].val[2] = 0;
+ state->CH_Ctrl[5].addr[3] = 70;
+ state->CH_Ctrl[5].bit[3] = 3;
+ state->CH_Ctrl[5].val[3] = 0;
+
+ state->CH_Ctrl[6].Ctrl_Num = DN_IQTNGNBFBIAS_BST ;
+ state->CH_Ctrl[6].size = 1 ;
+ state->CH_Ctrl[6].addr[0] = 70;
+ state->CH_Ctrl[6].bit[0] = 4;
+ state->CH_Ctrl[6].val[0] = 1;
+
+ state->CH_Ctrl[7].Ctrl_Num = RFSYN_EN_OUTMUX ;
+ state->CH_Ctrl[7].size = 1 ;
+ state->CH_Ctrl[7].addr[0] = 111;
+ state->CH_Ctrl[7].bit[0] = 4;
+ state->CH_Ctrl[7].val[0] = 0;
+
+ state->CH_Ctrl[8].Ctrl_Num = RFSYN_SEL_VCO_OUT ;
+ state->CH_Ctrl[8].size = 1 ;
+ state->CH_Ctrl[8].addr[0] = 111;
+ state->CH_Ctrl[8].bit[0] = 7;
+ state->CH_Ctrl[8].val[0] = 1;
+
+ state->CH_Ctrl[9].Ctrl_Num = RFSYN_SEL_VCO_HI ;
+ state->CH_Ctrl[9].size = 1 ;
+ state->CH_Ctrl[9].addr[0] = 111;
+ state->CH_Ctrl[9].bit[0] = 6;
+ state->CH_Ctrl[9].val[0] = 1;
+
+ state->CH_Ctrl[10].Ctrl_Num = RFSYN_SEL_DIVM ;
+ state->CH_Ctrl[10].size = 1 ;
+ state->CH_Ctrl[10].addr[0] = 111;
+ state->CH_Ctrl[10].bit[0] = 5;
+ state->CH_Ctrl[10].val[0] = 0;
+
+ state->CH_Ctrl[11].Ctrl_Num = RFSYN_RF_DIV_BIAS ;
+ state->CH_Ctrl[11].size = 2 ;
+ state->CH_Ctrl[11].addr[0] = 110;
+ state->CH_Ctrl[11].bit[0] = 0;
+ state->CH_Ctrl[11].val[0] = 1;
+ state->CH_Ctrl[11].addr[1] = 110;
+ state->CH_Ctrl[11].bit[1] = 1;
+ state->CH_Ctrl[11].val[1] = 0;
+
+ state->CH_Ctrl[12].Ctrl_Num = DN_SEL_FREQ ;
+ state->CH_Ctrl[12].size = 3 ;
+ state->CH_Ctrl[12].addr[0] = 69;
+ state->CH_Ctrl[12].bit[0] = 2;
+ state->CH_Ctrl[12].val[0] = 0;
+ state->CH_Ctrl[12].addr[1] = 69;
+ state->CH_Ctrl[12].bit[1] = 3;
+ state->CH_Ctrl[12].val[1] = 0;
+ state->CH_Ctrl[12].addr[2] = 69;
+ state->CH_Ctrl[12].bit[2] = 4;
+ state->CH_Ctrl[12].val[2] = 0;
+
+ state->CH_Ctrl[13].Ctrl_Num = RFSYN_VCO_BIAS ;
+ state->CH_Ctrl[13].size = 6 ;
+ state->CH_Ctrl[13].addr[0] = 110;
+ state->CH_Ctrl[13].bit[0] = 2;
+ state->CH_Ctrl[13].val[0] = 0;
+ state->CH_Ctrl[13].addr[1] = 110;
+ state->CH_Ctrl[13].bit[1] = 3;
+ state->CH_Ctrl[13].val[1] = 0;
+ state->CH_Ctrl[13].addr[2] = 110;
+ state->CH_Ctrl[13].bit[2] = 4;
+ state->CH_Ctrl[13].val[2] = 0;
+ state->CH_Ctrl[13].addr[3] = 110;
+ state->CH_Ctrl[13].bit[3] = 5;
+ state->CH_Ctrl[13].val[3] = 0;
+ state->CH_Ctrl[13].addr[4] = 110;
+ state->CH_Ctrl[13].bit[4] = 6;
+ state->CH_Ctrl[13].val[4] = 0;
+ state->CH_Ctrl[13].addr[5] = 110;
+ state->CH_Ctrl[13].bit[5] = 7;
+ state->CH_Ctrl[13].val[5] = 1;
+
+ state->CH_Ctrl[14].Ctrl_Num = CHCAL_INT_MOD_RF ;
+ state->CH_Ctrl[14].size = 7 ;
+ state->CH_Ctrl[14].addr[0] = 14;
+ state->CH_Ctrl[14].bit[0] = 0;
+ state->CH_Ctrl[14].val[0] = 0;
+ state->CH_Ctrl[14].addr[1] = 14;
+ state->CH_Ctrl[14].bit[1] = 1;
+ state->CH_Ctrl[14].val[1] = 0;
+ state->CH_Ctrl[14].addr[2] = 14;
+ state->CH_Ctrl[14].bit[2] = 2;
+ state->CH_Ctrl[14].val[2] = 0;
+ state->CH_Ctrl[14].addr[3] = 14;
+ state->CH_Ctrl[14].bit[3] = 3;
+ state->CH_Ctrl[14].val[3] = 0;
+ state->CH_Ctrl[14].addr[4] = 14;
+ state->CH_Ctrl[14].bit[4] = 4;
+ state->CH_Ctrl[14].val[4] = 0;
+ state->CH_Ctrl[14].addr[5] = 14;
+ state->CH_Ctrl[14].bit[5] = 5;
+ state->CH_Ctrl[14].val[5] = 0;
+ state->CH_Ctrl[14].addr[6] = 14;
+ state->CH_Ctrl[14].bit[6] = 6;
+ state->CH_Ctrl[14].val[6] = 0;
+
+ state->CH_Ctrl[15].Ctrl_Num = CHCAL_FRAC_MOD_RF ;
+ state->CH_Ctrl[15].size = 18 ;
+ state->CH_Ctrl[15].addr[0] = 17;
+ state->CH_Ctrl[15].bit[0] = 6;
+ state->CH_Ctrl[15].val[0] = 0;
+ state->CH_Ctrl[15].addr[1] = 17;
+ state->CH_Ctrl[15].bit[1] = 7;
+ state->CH_Ctrl[15].val[1] = 0;
+ state->CH_Ctrl[15].addr[2] = 16;
+ state->CH_Ctrl[15].bit[2] = 0;
+ state->CH_Ctrl[15].val[2] = 0;
+ state->CH_Ctrl[15].addr[3] = 16;
+ state->CH_Ctrl[15].bit[3] = 1;
+ state->CH_Ctrl[15].val[3] = 0;
+ state->CH_Ctrl[15].addr[4] = 16;
+ state->CH_Ctrl[15].bit[4] = 2;
+ state->CH_Ctrl[15].val[4] = 0;
+ state->CH_Ctrl[15].addr[5] = 16;
+ state->CH_Ctrl[15].bit[5] = 3;
+ state->CH_Ctrl[15].val[5] = 0;
+ state->CH_Ctrl[15].addr[6] = 16;
+ state->CH_Ctrl[15].bit[6] = 4;
+ state->CH_Ctrl[15].val[6] = 0;
+ state->CH_Ctrl[15].addr[7] = 16;
+ state->CH_Ctrl[15].bit[7] = 5;
+ state->CH_Ctrl[15].val[7] = 0;
+ state->CH_Ctrl[15].addr[8] = 16;
+ state->CH_Ctrl[15].bit[8] = 6;
+ state->CH_Ctrl[15].val[8] = 0;
+ state->CH_Ctrl[15].addr[9] = 16;
+ state->CH_Ctrl[15].bit[9] = 7;
+ state->CH_Ctrl[15].val[9] = 0;
+ state->CH_Ctrl[15].addr[10] = 15;
+ state->CH_Ctrl[15].bit[10] = 0;
+ state->CH_Ctrl[15].val[10] = 0;
+ state->CH_Ctrl[15].addr[11] = 15;
+ state->CH_Ctrl[15].bit[11] = 1;
+ state->CH_Ctrl[15].val[11] = 0;
+ state->CH_Ctrl[15].addr[12] = 15;
+ state->CH_Ctrl[15].bit[12] = 2;
+ state->CH_Ctrl[15].val[12] = 0;
+ state->CH_Ctrl[15].addr[13] = 15;
+ state->CH_Ctrl[15].bit[13] = 3;
+ state->CH_Ctrl[15].val[13] = 0;
+ state->CH_Ctrl[15].addr[14] = 15;
+ state->CH_Ctrl[15].bit[14] = 4;
+ state->CH_Ctrl[15].val[14] = 0;
+ state->CH_Ctrl[15].addr[15] = 15;
+ state->CH_Ctrl[15].bit[15] = 5;
+ state->CH_Ctrl[15].val[15] = 0;
+ state->CH_Ctrl[15].addr[16] = 15;
+ state->CH_Ctrl[15].bit[16] = 6;
+ state->CH_Ctrl[15].val[16] = 1;
+ state->CH_Ctrl[15].addr[17] = 15;
+ state->CH_Ctrl[15].bit[17] = 7;
+ state->CH_Ctrl[15].val[17] = 1;
+
+ state->CH_Ctrl[16].Ctrl_Num = RFSYN_LPF_R ;
+ state->CH_Ctrl[16].size = 5 ;
+ state->CH_Ctrl[16].addr[0] = 112;
+ state->CH_Ctrl[16].bit[0] = 0;
+ state->CH_Ctrl[16].val[0] = 0;
+ state->CH_Ctrl[16].addr[1] = 112;
+ state->CH_Ctrl[16].bit[1] = 1;
+ state->CH_Ctrl[16].val[1] = 0;
+ state->CH_Ctrl[16].addr[2] = 112;
+ state->CH_Ctrl[16].bit[2] = 2;
+ state->CH_Ctrl[16].val[2] = 0;
+ state->CH_Ctrl[16].addr[3] = 112;
+ state->CH_Ctrl[16].bit[3] = 3;
+ state->CH_Ctrl[16].val[3] = 0;
+ state->CH_Ctrl[16].addr[4] = 112;
+ state->CH_Ctrl[16].bit[4] = 4;
+ state->CH_Ctrl[16].val[4] = 1;
+
+ state->CH_Ctrl[17].Ctrl_Num = CHCAL_EN_INT_RF ;
+ state->CH_Ctrl[17].size = 1 ;
+ state->CH_Ctrl[17].addr[0] = 14;
+ state->CH_Ctrl[17].bit[0] = 7;
+ state->CH_Ctrl[17].val[0] = 0;
+
+ state->CH_Ctrl[18].Ctrl_Num = TG_LO_DIVVAL ;
+ state->CH_Ctrl[18].size = 4 ;
+ state->CH_Ctrl[18].addr[0] = 107;
+ state->CH_Ctrl[18].bit[0] = 3;
+ state->CH_Ctrl[18].val[0] = 0;
+ state->CH_Ctrl[18].addr[1] = 107;
+ state->CH_Ctrl[18].bit[1] = 4;
+ state->CH_Ctrl[18].val[1] = 0;
+ state->CH_Ctrl[18].addr[2] = 107;
+ state->CH_Ctrl[18].bit[2] = 5;
+ state->CH_Ctrl[18].val[2] = 0;
+ state->CH_Ctrl[18].addr[3] = 107;
+ state->CH_Ctrl[18].bit[3] = 6;
+ state->CH_Ctrl[18].val[3] = 0;
+
+ state->CH_Ctrl[19].Ctrl_Num = TG_LO_SELVAL ;
+ state->CH_Ctrl[19].size = 3 ;
+ state->CH_Ctrl[19].addr[0] = 107;
+ state->CH_Ctrl[19].bit[0] = 7;
+ state->CH_Ctrl[19].val[0] = 1;
+ state->CH_Ctrl[19].addr[1] = 106;
+ state->CH_Ctrl[19].bit[1] = 0;
+ state->CH_Ctrl[19].val[1] = 1;
+ state->CH_Ctrl[19].addr[2] = 106;
+ state->CH_Ctrl[19].bit[2] = 1;
+ state->CH_Ctrl[19].val[2] = 1;
+
+ state->CH_Ctrl[20].Ctrl_Num = TG_DIV_VAL ;
+ state->CH_Ctrl[20].size = 11 ;
+ state->CH_Ctrl[20].addr[0] = 109;
+ state->CH_Ctrl[20].bit[0] = 2;
+ state->CH_Ctrl[20].val[0] = 0;
+ state->CH_Ctrl[20].addr[1] = 109;
+ state->CH_Ctrl[20].bit[1] = 3;
+ state->CH_Ctrl[20].val[1] = 0;
+ state->CH_Ctrl[20].addr[2] = 109;
+ state->CH_Ctrl[20].bit[2] = 4;
+ state->CH_Ctrl[20].val[2] = 0;
+ state->CH_Ctrl[20].addr[3] = 109;
+ state->CH_Ctrl[20].bit[3] = 5;
+ state->CH_Ctrl[20].val[3] = 0;
+ state->CH_Ctrl[20].addr[4] = 109;
+ state->CH_Ctrl[20].bit[4] = 6;
+ state->CH_Ctrl[20].val[4] = 0;
+ state->CH_Ctrl[20].addr[5] = 109;
+ state->CH_Ctrl[20].bit[5] = 7;
+ state->CH_Ctrl[20].val[5] = 0;
+ state->CH_Ctrl[20].addr[6] = 108;
+ state->CH_Ctrl[20].bit[6] = 0;
+ state->CH_Ctrl[20].val[6] = 0;
+ state->CH_Ctrl[20].addr[7] = 108;
+ state->CH_Ctrl[20].bit[7] = 1;
+ state->CH_Ctrl[20].val[7] = 0;
+ state->CH_Ctrl[20].addr[8] = 108;
+ state->CH_Ctrl[20].bit[8] = 2;
+ state->CH_Ctrl[20].val[8] = 1;
+ state->CH_Ctrl[20].addr[9] = 108;
+ state->CH_Ctrl[20].bit[9] = 3;
+ state->CH_Ctrl[20].val[9] = 1;
+ state->CH_Ctrl[20].addr[10] = 108;
+ state->CH_Ctrl[20].bit[10] = 4;
+ state->CH_Ctrl[20].val[10] = 1;
+
+ state->CH_Ctrl[21].Ctrl_Num = TG_VCO_BIAS ;
+ state->CH_Ctrl[21].size = 6 ;
+ state->CH_Ctrl[21].addr[0] = 106;
+ state->CH_Ctrl[21].bit[0] = 2;
+ state->CH_Ctrl[21].val[0] = 0;
+ state->CH_Ctrl[21].addr[1] = 106;
+ state->CH_Ctrl[21].bit[1] = 3;
+ state->CH_Ctrl[21].val[1] = 0;
+ state->CH_Ctrl[21].addr[2] = 106;
+ state->CH_Ctrl[21].bit[2] = 4;
+ state->CH_Ctrl[21].val[2] = 0;
+ state->CH_Ctrl[21].addr[3] = 106;
+ state->CH_Ctrl[21].bit[3] = 5;
+ state->CH_Ctrl[21].val[3] = 0;
+ state->CH_Ctrl[21].addr[4] = 106;
+ state->CH_Ctrl[21].bit[4] = 6;
+ state->CH_Ctrl[21].val[4] = 0;
+ state->CH_Ctrl[21].addr[5] = 106;
+ state->CH_Ctrl[21].bit[5] = 7;
+ state->CH_Ctrl[21].val[5] = 1;
+
+ state->CH_Ctrl[22].Ctrl_Num = SEQ_EXTPOWERUP ;
+ state->CH_Ctrl[22].size = 1 ;
+ state->CH_Ctrl[22].addr[0] = 138;
+ state->CH_Ctrl[22].bit[0] = 4;
+ state->CH_Ctrl[22].val[0] = 1;
+
+ state->CH_Ctrl[23].Ctrl_Num = OVERRIDE_2 ;
+ state->CH_Ctrl[23].size = 1 ;
+ state->CH_Ctrl[23].addr[0] = 17;
+ state->CH_Ctrl[23].bit[0] = 5;
+ state->CH_Ctrl[23].val[0] = 0;
+
+ state->CH_Ctrl[24].Ctrl_Num = OVERRIDE_3 ;
+ state->CH_Ctrl[24].size = 1 ;
+ state->CH_Ctrl[24].addr[0] = 111;
+ state->CH_Ctrl[24].bit[0] = 3;
+ state->CH_Ctrl[24].val[0] = 0;
+
+ state->CH_Ctrl[25].Ctrl_Num = OVERRIDE_4 ;
+ state->CH_Ctrl[25].size = 1 ;
+ state->CH_Ctrl[25].addr[0] = 112;
+ state->CH_Ctrl[25].bit[0] = 7;
+ state->CH_Ctrl[25].val[0] = 0;
+
+ state->CH_Ctrl[26].Ctrl_Num = SEQ_FSM_PULSE ;
+ state->CH_Ctrl[26].size = 1 ;
+ state->CH_Ctrl[26].addr[0] = 136;
+ state->CH_Ctrl[26].bit[0] = 7;
+ state->CH_Ctrl[26].val[0] = 0;
+
+ state->CH_Ctrl[27].Ctrl_Num = GPIO_4B ;
+ state->CH_Ctrl[27].size = 1 ;
+ state->CH_Ctrl[27].addr[0] = 149;
+ state->CH_Ctrl[27].bit[0] = 7;
+ state->CH_Ctrl[27].val[0] = 0;
+
+ state->CH_Ctrl[28].Ctrl_Num = GPIO_3B ;
+ state->CH_Ctrl[28].size = 1 ;
+ state->CH_Ctrl[28].addr[0] = 149;
+ state->CH_Ctrl[28].bit[0] = 6;
+ state->CH_Ctrl[28].val[0] = 0;
+
+ state->CH_Ctrl[29].Ctrl_Num = GPIO_4 ;
+ state->CH_Ctrl[29].size = 1 ;
+ state->CH_Ctrl[29].addr[0] = 149;
+ state->CH_Ctrl[29].bit[0] = 5;
+ state->CH_Ctrl[29].val[0] = 1;
+
+ state->CH_Ctrl[30].Ctrl_Num = GPIO_3 ;
+ state->CH_Ctrl[30].size = 1 ;
+ state->CH_Ctrl[30].addr[0] = 149;
+ state->CH_Ctrl[30].bit[0] = 4;
+ state->CH_Ctrl[30].val[0] = 1;
+
+ state->CH_Ctrl[31].Ctrl_Num = GPIO_1B ;
+ state->CH_Ctrl[31].size = 1 ;
+ state->CH_Ctrl[31].addr[0] = 149;
+ state->CH_Ctrl[31].bit[0] = 3;
+ state->CH_Ctrl[31].val[0] = 0;
+
+ state->CH_Ctrl[32].Ctrl_Num = DAC_A_ENABLE ;
+ state->CH_Ctrl[32].size = 1 ;
+ state->CH_Ctrl[32].addr[0] = 93;
+ state->CH_Ctrl[32].bit[0] = 1;
+ state->CH_Ctrl[32].val[0] = 0;
+
+ state->CH_Ctrl[33].Ctrl_Num = DAC_B_ENABLE ;
+ state->CH_Ctrl[33].size = 1 ;
+ state->CH_Ctrl[33].addr[0] = 93;
+ state->CH_Ctrl[33].bit[0] = 0;
+ state->CH_Ctrl[33].val[0] = 0;
+
+ state->CH_Ctrl[34].Ctrl_Num = DAC_DIN_A ;
+ state->CH_Ctrl[34].size = 6 ;
+ state->CH_Ctrl[34].addr[0] = 92;
+ state->CH_Ctrl[34].bit[0] = 2;
+ state->CH_Ctrl[34].val[0] = 0;
+ state->CH_Ctrl[34].addr[1] = 92;
+ state->CH_Ctrl[34].bit[1] = 3;
+ state->CH_Ctrl[34].val[1] = 0;
+ state->CH_Ctrl[34].addr[2] = 92;
+ state->CH_Ctrl[34].bit[2] = 4;
+ state->CH_Ctrl[34].val[2] = 0;
+ state->CH_Ctrl[34].addr[3] = 92;
+ state->CH_Ctrl[34].bit[3] = 5;
+ state->CH_Ctrl[34].val[3] = 0;
+ state->CH_Ctrl[34].addr[4] = 92;
+ state->CH_Ctrl[34].bit[4] = 6;
+ state->CH_Ctrl[34].val[4] = 0;
+ state->CH_Ctrl[34].addr[5] = 92;
+ state->CH_Ctrl[34].bit[5] = 7;
+ state->CH_Ctrl[34].val[5] = 0;
+
+ state->CH_Ctrl[35].Ctrl_Num = DAC_DIN_B ;
+ state->CH_Ctrl[35].size = 6 ;
+ state->CH_Ctrl[35].addr[0] = 93;
+ state->CH_Ctrl[35].bit[0] = 2;
+ state->CH_Ctrl[35].val[0] = 0;
+ state->CH_Ctrl[35].addr[1] = 93;
+ state->CH_Ctrl[35].bit[1] = 3;
+ state->CH_Ctrl[35].val[1] = 0;
+ state->CH_Ctrl[35].addr[2] = 93;
+ state->CH_Ctrl[35].bit[2] = 4;
+ state->CH_Ctrl[35].val[2] = 0;
+ state->CH_Ctrl[35].addr[3] = 93;
+ state->CH_Ctrl[35].bit[3] = 5;
+ state->CH_Ctrl[35].val[3] = 0;
+ state->CH_Ctrl[35].addr[4] = 93;
+ state->CH_Ctrl[35].bit[4] = 6;
+ state->CH_Ctrl[35].val[4] = 0;
+ state->CH_Ctrl[35].addr[5] = 93;
+ state->CH_Ctrl[35].bit[5] = 7;
+ state->CH_Ctrl[35].val[5] = 0;
+
+#ifdef _MXL_PRODUCTION
+ state->CH_Ctrl[36].Ctrl_Num = RFSYN_EN_DIV ;
+ state->CH_Ctrl[36].size = 1 ;
+ state->CH_Ctrl[36].addr[0] = 109;
+ state->CH_Ctrl[36].bit[0] = 1;
+ state->CH_Ctrl[36].val[0] = 1;
+
+ state->CH_Ctrl[37].Ctrl_Num = RFSYN_DIVM ;
+ state->CH_Ctrl[37].size = 2 ;
+ state->CH_Ctrl[37].addr[0] = 112;
+ state->CH_Ctrl[37].bit[0] = 5;
+ state->CH_Ctrl[37].val[0] = 0;
+ state->CH_Ctrl[37].addr[1] = 112;
+ state->CH_Ctrl[37].bit[1] = 6;
+ state->CH_Ctrl[37].val[1] = 0;
+
+ state->CH_Ctrl[38].Ctrl_Num = DN_BYPASS_AGC_I2C ;
+ state->CH_Ctrl[38].size = 1 ;
+ state->CH_Ctrl[38].addr[0] = 65;
+ state->CH_Ctrl[38].bit[0] = 1;
+ state->CH_Ctrl[38].val[0] = 0;
+#endif
+
+ return 0 ;
+}
+
+static void InitTunerControls(struct dvb_frontend *fe)
+{
+ MXL5005_RegisterInit(fe);
+ MXL5005_ControlInit(fe);
+#ifdef _MXL_INTERNAL
+ MXL5005_MXLControlInit(fe);
+#endif
+}
+
+static u16 MXL5005_TunerConfig(struct dvb_frontend *fe,
+ u8 Mode, /* 0: Analog Mode ; 1: Digital Mode */
+ u8 IF_mode, /* for Analog Mode, 0: zero IF; 1: low IF */
+ u32 Bandwidth, /* filter channel bandwidth (6, 7, 8) */
+ u32 IF_out, /* Desired IF Out Frequency */
+ u32 Fxtal, /* XTAL Frequency */
+ u8 AGC_Mode, /* AGC Mode - Dual AGC: 0, Single AGC: 1 */
+ u16 TOP, /* 0: Dual AGC; Value: take over point */
+ u16 IF_OUT_LOAD, /* IF Out Load Resistor (200 / 300 Ohms) */
+ u8 CLOCK_OUT, /* 0: turn off clk out; 1: turn on clock out */
+ u8 DIV_OUT, /* 0: Div-1; 1: Div-4 */
+ u8 CAPSELECT, /* 0: disable On-Chip pulling cap; 1: enable */
+ u8 EN_RSSI, /* 0: disable RSSI; 1: enable RSSI */
+
+ /* Modulation Type; */
+ /* 0 - Default; 1 - DVB-T; 2 - ATSC; 3 - QAM; 4 - Analog Cable */
+ u8 Mod_Type,
+
+ /* Tracking Filter */
+ /* 0 - Default; 1 - Off; 2 - Type C; 3 - Type C-H */
+ u8 TF_Type
+ )
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u16 status = 0;
+
+ state->Mode = Mode;
+ state->IF_Mode = IF_mode;
+ state->Chan_Bandwidth = Bandwidth;
+ state->IF_OUT = IF_out;
+ state->Fxtal = Fxtal;
+ state->AGC_Mode = AGC_Mode;
+ state->TOP = TOP;
+ state->IF_OUT_LOAD = IF_OUT_LOAD;
+ state->CLOCK_OUT = CLOCK_OUT;
+ state->DIV_OUT = DIV_OUT;
+ state->CAPSELECT = CAPSELECT;
+ state->EN_RSSI = EN_RSSI;
+ state->Mod_Type = Mod_Type;
+ state->TF_Type = TF_Type;
+
+ /* Initialize all the controls and registers */
+ InitTunerControls(fe);
+
+ /* Synthesizer LO frequency calculation */
+ MXL_SynthIFLO_Calc(fe);
+
+ return status;
+}
+
+static void MXL_SynthIFLO_Calc(struct dvb_frontend *fe)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ if (state->Mode == 1) /* Digital Mode */
+ state->IF_LO = state->IF_OUT;
+ else /* Analog Mode */ {
+ if (state->IF_Mode == 0) /* Analog Zero IF mode */
+ state->IF_LO = state->IF_OUT + 400000;
+ else /* Analog Low IF mode */
+ state->IF_LO = state->IF_OUT + state->Chan_Bandwidth/2;
+ }
+}
+
+static void MXL_SynthRFTGLO_Calc(struct dvb_frontend *fe)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+
+ if (state->Mode == 1) /* Digital Mode */ {
+ /* remove 20.48MHz setting for 2.6.10 */
+ state->RF_LO = state->RF_IN;
+ /* change for 2.6.6 */
+ state->TG_LO = state->RF_IN - 750000;
+ } else /* Analog Mode */ {
+ if (state->IF_Mode == 0) /* Analog Zero IF mode */ {
+ state->RF_LO = state->RF_IN - 400000;
+ state->TG_LO = state->RF_IN - 1750000;
+ } else /* Analog Low IF mode */ {
+ state->RF_LO = state->RF_IN - state->Chan_Bandwidth/2;
+ state->TG_LO = state->RF_IN -
+ state->Chan_Bandwidth + 500000;
+ }
+ }
+}
+
+static u16 MXL_OverwriteICDefault(struct dvb_frontend *fe)
+{
+ u16 status = 0;
+
+ status += MXL_ControlWrite(fe, OVERRIDE_1, 1);
+ status += MXL_ControlWrite(fe, OVERRIDE_2, 1);
+ status += MXL_ControlWrite(fe, OVERRIDE_3, 1);
+ status += MXL_ControlWrite(fe, OVERRIDE_4, 1);
+
+ return status;
+}
+
+static u16 MXL_BlockInit(struct dvb_frontend *fe)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u16 status = 0;
+
+ status += MXL_OverwriteICDefault(fe);
+
+ /* Downconverter Control Dig Ana */
+ status += MXL_ControlWrite(fe, DN_IQTN_AMP_CUT, state->Mode ? 1 : 0);
+
+ /* Filter Control Dig Ana */
+ status += MXL_ControlWrite(fe, BB_MODE, state->Mode ? 0 : 1);
+ status += MXL_ControlWrite(fe, BB_BUF, state->Mode ? 3 : 2);
+ status += MXL_ControlWrite(fe, BB_BUF_OA, state->Mode ? 1 : 0);
+ status += MXL_ControlWrite(fe, BB_IQSWAP, state->Mode ? 0 : 1);
+ status += MXL_ControlWrite(fe, BB_INITSTATE_DLPF_TUNE, 0);
+
+ /* Initialize Low-Pass Filter */
+ if (state->Mode) { /* Digital Mode */
+ switch (state->Chan_Bandwidth) {
+ case 8000000:
+ status += MXL_ControlWrite(fe, BB_DLPF_BANDSEL, 0);
+ break;
+ case 7000000:
+ status += MXL_ControlWrite(fe, BB_DLPF_BANDSEL, 2);
+ break;
+ case 6000000:
+ status += MXL_ControlWrite(fe,
+ BB_DLPF_BANDSEL, 3);
+ break;
+ }
+ } else { /* Analog Mode */
+ switch (state->Chan_Bandwidth) {
+ case 8000000: /* Low Zero */
+ status += MXL_ControlWrite(fe, BB_ALPF_BANDSELECT,
+ (state->IF_Mode ? 0 : 3));
+ break;
+ case 7000000:
+ status += MXL_ControlWrite(fe, BB_ALPF_BANDSELECT,
+ (state->IF_Mode ? 1 : 4));
+ break;
+ case 6000000:
+ status += MXL_ControlWrite(fe, BB_ALPF_BANDSELECT,
+ (state->IF_Mode ? 2 : 5));
+ break;
+ }
+ }
+
+ /* Charge Pump Control Dig Ana */
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, state->Mode ? 5 : 8);
+ status += MXL_ControlWrite(fe,
+ RFSYN_EN_CHP_HIGAIN, state->Mode ? 1 : 1);
+ status += MXL_ControlWrite(fe, EN_CHP_LIN_B, state->Mode ? 0 : 0);
+
+ /* AGC TOP Control */
+ if (state->AGC_Mode == 0) /* Dual AGC */ {
+ status += MXL_ControlWrite(fe, AGC_IF, 15);
+ status += MXL_ControlWrite(fe, AGC_RF, 15);
+ } else /* Single AGC Mode Dig Ana */
+ status += MXL_ControlWrite(fe, AGC_RF, state->Mode ? 15 : 12);
+
+ if (state->TOP == 55) /* TOP == 5.5 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x0);
+
+ if (state->TOP == 72) /* TOP == 7.2 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x1);
+
+ if (state->TOP == 92) /* TOP == 9.2 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x2);
+
+ if (state->TOP == 110) /* TOP == 11.0 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x3);
+
+ if (state->TOP == 129) /* TOP == 12.9 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x4);
+
+ if (state->TOP == 147) /* TOP == 14.7 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x5);
+
+ if (state->TOP == 168) /* TOP == 16.8 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x6);
+
+ if (state->TOP == 194) /* TOP == 19.4 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x7);
+
+ if (state->TOP == 212) /* TOP == 21.2 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0x9);
+
+ if (state->TOP == 232) /* TOP == 23.2 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0xA);
+
+ if (state->TOP == 252) /* TOP == 25.2 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0xB);
+
+ if (state->TOP == 271) /* TOP == 27.1 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0xC);
+
+ if (state->TOP == 292) /* TOP == 29.2 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0xD);
+
+ if (state->TOP == 317) /* TOP == 31.7 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0xE);
+
+ if (state->TOP == 349) /* TOP == 34.9 */
+ status += MXL_ControlWrite(fe, AGC_IF, 0xF);
+
+ /* IF Synthesizer Control */
+ status += MXL_IFSynthInit(fe);
+
+ /* IF UpConverter Control */
+ if (state->IF_OUT_LOAD == 200) {
+ status += MXL_ControlWrite(fe, DRV_RES_SEL, 6);
+ status += MXL_ControlWrite(fe, I_DRIVER, 2);
+ }
+ if (state->IF_OUT_LOAD == 300) {
+ status += MXL_ControlWrite(fe, DRV_RES_SEL, 4);
+ status += MXL_ControlWrite(fe, I_DRIVER, 1);
+ }
+
+ /* Anti-Alias Filtering Control
+ * initialise Anti-Aliasing Filter
+ */
+ if (state->Mode) { /* Digital Mode */
+ if (state->IF_OUT >= 4000000UL && state->IF_OUT <= 6280000UL) {
+ status += MXL_ControlWrite(fe, EN_AAF, 1);
+ status += MXL_ControlWrite(fe, EN_3P, 1);
+ status += MXL_ControlWrite(fe, EN_AUX_3P, 1);
+ status += MXL_ControlWrite(fe, SEL_AAF_BAND, 0);
+ }
+ if ((state->IF_OUT == 36125000UL) ||
+ (state->IF_OUT == 36150000UL)) {
+ status += MXL_ControlWrite(fe, EN_AAF, 1);
+ status += MXL_ControlWrite(fe, EN_3P, 1);
+ status += MXL_ControlWrite(fe, EN_AUX_3P, 1);
+ status += MXL_ControlWrite(fe, SEL_AAF_BAND, 1);
+ }
+ if (state->IF_OUT > 36150000UL) {
+ status += MXL_ControlWrite(fe, EN_AAF, 0);
+ status += MXL_ControlWrite(fe, EN_3P, 1);
+ status += MXL_ControlWrite(fe, EN_AUX_3P, 1);
+ status += MXL_ControlWrite(fe, SEL_AAF_BAND, 1);
+ }
+ } else { /* Analog Mode */
+ if (state->IF_OUT >= 4000000UL && state->IF_OUT <= 5000000UL) {
+ status += MXL_ControlWrite(fe, EN_AAF, 1);
+ status += MXL_ControlWrite(fe, EN_3P, 1);
+ status += MXL_ControlWrite(fe, EN_AUX_3P, 1);
+ status += MXL_ControlWrite(fe, SEL_AAF_BAND, 0);
+ }
+ if (state->IF_OUT > 5000000UL) {
+ status += MXL_ControlWrite(fe, EN_AAF, 0);
+ status += MXL_ControlWrite(fe, EN_3P, 0);
+ status += MXL_ControlWrite(fe, EN_AUX_3P, 0);
+ status += MXL_ControlWrite(fe, SEL_AAF_BAND, 0);
+ }
+ }
+
+ /* Demod Clock Out */
+ if (state->CLOCK_OUT)
+ status += MXL_ControlWrite(fe, SEQ_ENCLK16_CLK_OUT, 1);
+ else
+ status += MXL_ControlWrite(fe, SEQ_ENCLK16_CLK_OUT, 0);
+
+ if (state->DIV_OUT == 1)
+ status += MXL_ControlWrite(fe, SEQ_SEL4_16B, 1);
+ if (state->DIV_OUT == 0)
+ status += MXL_ControlWrite(fe, SEQ_SEL4_16B, 0);
+
+ /* Crystal Control */
+ if (state->CAPSELECT)
+ status += MXL_ControlWrite(fe, XTAL_CAPSELECT, 1);
+ else
+ status += MXL_ControlWrite(fe, XTAL_CAPSELECT, 0);
+
+ if (state->Fxtal >= 12000000UL && state->Fxtal <= 16000000UL)
+ status += MXL_ControlWrite(fe, IF_SEL_DBL, 1);
+ if (state->Fxtal > 16000000UL && state->Fxtal <= 32000000UL)
+ status += MXL_ControlWrite(fe, IF_SEL_DBL, 0);
+
+ if (state->Fxtal >= 12000000UL && state->Fxtal <= 22000000UL)
+ status += MXL_ControlWrite(fe, RFSYN_R_DIV, 3);
+ if (state->Fxtal > 22000000UL && state->Fxtal <= 32000000UL)
+ status += MXL_ControlWrite(fe, RFSYN_R_DIV, 0);
+
+ /* Misc Controls */
+ if (state->Mode == 0 && state->IF_Mode == 1) /* Analog LowIF mode */
+ status += MXL_ControlWrite(fe, SEQ_EXTIQFSMPULSE, 0);
+ else
+ status += MXL_ControlWrite(fe, SEQ_EXTIQFSMPULSE, 1);
+
+ /* status += MXL_ControlRead(fe, IF_DIVVAL, &IF_DIVVAL_Val); */
+
+ /* Set TG_R_DIV */
+ status += MXL_ControlWrite(fe, TG_R_DIV,
+ MXL_Ceiling(state->Fxtal, 1000000));
+
+ /* Apply Default value to BB_INITSTATE_DLPF_TUNE */
+
+ /* RSSI Control */
+ if (state->EN_RSSI) {
+ status += MXL_ControlWrite(fe, SEQ_EXTSYNTHCALIF, 1);
+ status += MXL_ControlWrite(fe, SEQ_EXTDCCAL, 1);
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 1);
+ status += MXL_ControlWrite(fe, RFA_ENCLKRFAGC, 1);
+
+ /* RSSI reference point */
+ status += MXL_ControlWrite(fe, RFA_RSSI_REF, 2);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFH, 3);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFL, 1);
+
+ /* TOP point */
+ status += MXL_ControlWrite(fe, RFA_FLR, 0);
+ status += MXL_ControlWrite(fe, RFA_CEIL, 12);
+ }
+
+ /* Modulation type bit settings
+ * Override the control values preset
+ */
+ if (state->Mod_Type == MXL_DVBT) /* DVB-T Mode */ {
+ state->AGC_Mode = 1; /* Single AGC Mode */
+
+ /* Enable RSSI */
+ status += MXL_ControlWrite(fe, SEQ_EXTSYNTHCALIF, 1);
+ status += MXL_ControlWrite(fe, SEQ_EXTDCCAL, 1);
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 1);
+ status += MXL_ControlWrite(fe, RFA_ENCLKRFAGC, 1);
+
+ /* RSSI reference point */
+ status += MXL_ControlWrite(fe, RFA_RSSI_REF, 3);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFH, 5);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFL, 1);
+
+ /* TOP point */
+ status += MXL_ControlWrite(fe, RFA_FLR, 2);
+ status += MXL_ControlWrite(fe, RFA_CEIL, 13);
+ if (state->IF_OUT <= 6280000UL) /* Low IF */
+ status += MXL_ControlWrite(fe, BB_IQSWAP, 0);
+ else /* High IF */
+ status += MXL_ControlWrite(fe, BB_IQSWAP, 1);
+
+ }
+ if (state->Mod_Type == MXL_ATSC) /* ATSC Mode */ {
+ state->AGC_Mode = 1; /* Single AGC Mode */
+
+ /* Enable RSSI */
+ status += MXL_ControlWrite(fe, SEQ_EXTSYNTHCALIF, 1);
+ status += MXL_ControlWrite(fe, SEQ_EXTDCCAL, 1);
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 1);
+ status += MXL_ControlWrite(fe, RFA_ENCLKRFAGC, 1);
+
+ /* RSSI reference point */
+ status += MXL_ControlWrite(fe, RFA_RSSI_REF, 2);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFH, 4);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFL, 1);
+
+ /* TOP point */
+ status += MXL_ControlWrite(fe, RFA_FLR, 2);
+ status += MXL_ControlWrite(fe, RFA_CEIL, 13);
+ status += MXL_ControlWrite(fe, BB_INITSTATE_DLPF_TUNE, 1);
+ /* Low Zero */
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, 5);
+
+ if (state->IF_OUT <= 6280000UL) /* Low IF */
+ status += MXL_ControlWrite(fe, BB_IQSWAP, 0);
+ else /* High IF */
+ status += MXL_ControlWrite(fe, BB_IQSWAP, 1);
+ }
+ if (state->Mod_Type == MXL_QAM) /* QAM Mode */ {
+ state->Mode = MXL_DIGITAL_MODE;
+
+ /* state->AGC_Mode = 1; */ /* Single AGC Mode */
+
+ /* Disable RSSI */ /* change here for v2.6.5 */
+ status += MXL_ControlWrite(fe, SEQ_EXTSYNTHCALIF, 1);
+ status += MXL_ControlWrite(fe, SEQ_EXTDCCAL, 1);
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 0);
+ status += MXL_ControlWrite(fe, RFA_ENCLKRFAGC, 1);
+
+ /* RSSI reference point */
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFH, 5);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REF, 3);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFL, 2);
+ /* change here for v2.6.5 */
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, 3);
+
+ if (state->IF_OUT <= 6280000UL) /* Low IF */
+ status += MXL_ControlWrite(fe, BB_IQSWAP, 0);
+ else /* High IF */
+ status += MXL_ControlWrite(fe, BB_IQSWAP, 1);
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, 2);
+
+ }
+ if (state->Mod_Type == MXL_ANALOG_CABLE) {
+ /* Analog Cable Mode */
+ /* state->Mode = MXL_DIGITAL_MODE; */
+
+ state->AGC_Mode = 1; /* Single AGC Mode */
+
+ /* Disable RSSI */
+ status += MXL_ControlWrite(fe, SEQ_EXTSYNTHCALIF, 1);
+ status += MXL_ControlWrite(fe, SEQ_EXTDCCAL, 1);
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 0);
+ status += MXL_ControlWrite(fe, RFA_ENCLKRFAGC, 1);
+ /* change for 2.6.3 */
+ status += MXL_ControlWrite(fe, AGC_IF, 1);
+ status += MXL_ControlWrite(fe, AGC_RF, 15);
+ status += MXL_ControlWrite(fe, BB_IQSWAP, 1);
+ }
+
+ if (state->Mod_Type == MXL_ANALOG_OTA) {
+ /* Analog OTA Terrestrial mode add for 2.6.7 */
+ /* state->Mode = MXL_ANALOG_MODE; */
+
+ /* Enable RSSI */
+ status += MXL_ControlWrite(fe, SEQ_EXTSYNTHCALIF, 1);
+ status += MXL_ControlWrite(fe, SEQ_EXTDCCAL, 1);
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 1);
+ status += MXL_ControlWrite(fe, RFA_ENCLKRFAGC, 1);
+
+ /* RSSI reference point */
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFH, 5);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REF, 3);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFL, 2);
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, 3);
+ status += MXL_ControlWrite(fe, BB_IQSWAP, 1);
+ }
+
+ /* RSSI disable */
+ if (state->EN_RSSI == 0) {
+ status += MXL_ControlWrite(fe, SEQ_EXTSYNTHCALIF, 1);
+ status += MXL_ControlWrite(fe, SEQ_EXTDCCAL, 1);
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 0);
+ status += MXL_ControlWrite(fe, RFA_ENCLKRFAGC, 1);
+ }
+
+ return status;
+}
+
+static u16 MXL_IFSynthInit(struct dvb_frontend *fe)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u16 status = 0 ;
+ u32 Fref = 0 ;
+ u32 Kdbl, intModVal ;
+ u32 fracModVal ;
+ Kdbl = 2 ;
+
+ if (state->Fxtal >= 12000000UL && state->Fxtal <= 16000000UL)
+ Kdbl = 2 ;
+ if (state->Fxtal > 16000000UL && state->Fxtal <= 32000000UL)
+ Kdbl = 1 ;
+
+ /* IF Synthesizer Control */
+ if (state->Mode == 0 && state->IF_Mode == 1) /* Analog Low IF mode */ {
+ if (state->IF_LO == 41000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x0C);
+ Fref = 328000000UL ;
+ }
+ if (state->IF_LO == 47000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 376000000UL ;
+ }
+ if (state->IF_LO == 54000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x10);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x0C);
+ Fref = 324000000UL ;
+ }
+ if (state->IF_LO == 60000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x10);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 360000000UL ;
+ }
+ if (state->IF_LO == 39250000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x0C);
+ Fref = 314000000UL ;
+ }
+ if (state->IF_LO == 39650000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x0C);
+ Fref = 317200000UL ;
+ }
+ if (state->IF_LO == 40150000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x0C);
+ Fref = 321200000UL ;
+ }
+ if (state->IF_LO == 40650000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x0C);
+ Fref = 325200000UL ;
+ }
+ }
+
+ if (state->Mode || (state->Mode == 0 && state->IF_Mode == 0)) {
+ if (state->IF_LO == 57000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x10);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 342000000UL ;
+ }
+ if (state->IF_LO == 44000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 352000000UL ;
+ }
+ if (state->IF_LO == 43750000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 350000000UL ;
+ }
+ if (state->IF_LO == 36650000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x04);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 366500000UL ;
+ }
+ if (state->IF_LO == 36150000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x04);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 361500000UL ;
+ }
+ if (state->IF_LO == 36000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x04);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 360000000UL ;
+ }
+ if (state->IF_LO == 35250000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x04);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 352500000UL ;
+ }
+ if (state->IF_LO == 34750000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x04);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 347500000UL ;
+ }
+ if (state->IF_LO == 6280000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x07);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 376800000UL ;
+ }
+ if (state->IF_LO == 5000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x09);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 360000000UL ;
+ }
+ if (state->IF_LO == 4500000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x06);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 360000000UL ;
+ }
+ if (state->IF_LO == 4570000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x06);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 365600000UL ;
+ }
+ if (state->IF_LO == 4000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x05);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 360000000UL ;
+ }
+ if (state->IF_LO == 57400000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x10);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 344400000UL ;
+ }
+ if (state->IF_LO == 44400000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 355200000UL ;
+ }
+ if (state->IF_LO == 44150000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x08);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 353200000UL ;
+ }
+ if (state->IF_LO == 37050000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x04);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 370500000UL ;
+ }
+ if (state->IF_LO == 36550000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x04);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 365500000UL ;
+ }
+ if (state->IF_LO == 36125000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x04);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 361250000UL ;
+ }
+ if (state->IF_LO == 6000000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x07);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 360000000UL ;
+ }
+ if (state->IF_LO == 5400000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x07);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x0C);
+ Fref = 324000000UL ;
+ }
+ if (state->IF_LO == 5380000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x07);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x0C);
+ Fref = 322800000UL ;
+ }
+ if (state->IF_LO == 5200000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x09);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 374400000UL ;
+ }
+ if (state->IF_LO == 4900000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x09);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 352800000UL ;
+ }
+ if (state->IF_LO == 4400000UL) {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x06);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 352000000UL ;
+ }
+ if (state->IF_LO == 4063000UL) /* add for 2.6.8 */ {
+ status += MXL_ControlWrite(fe, IF_DIVVAL, 0x05);
+ status += MXL_ControlWrite(fe, IF_VCO_BIAS, 0x08);
+ Fref = 365670000UL ;
+ }
+ }
+ /* CHCAL_INT_MOD_IF */
+ /* CHCAL_FRAC_MOD_IF */
+ intModVal = Fref / (state->Fxtal * Kdbl/2);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_IF, intModVal);
+
+ fracModVal = (2<<15)*(Fref/1000 - (state->Fxtal/1000 * Kdbl/2) *
+ intModVal);
+
+ fracModVal = fracModVal / ((state->Fxtal * Kdbl/2)/1000);
+ status += MXL_ControlWrite(fe, CHCAL_FRAC_MOD_IF, fracModVal);
+
+ return status ;
+}
+
+static u32 MXL_GetXtalInt(u32 Xtal_Freq)
+{
+ if ((Xtal_Freq % 1000000) == 0)
+ return (Xtal_Freq / 10000);
+ else
+ return (((Xtal_Freq / 1000000) + 1)*100);
+}
+
+static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u16 status = 0;
+ u32 divider_val, E3, E4, E5, E5A;
+ u32 Fmax, Fmin, FmaxBin, FminBin;
+ u32 Kdbl_RF = 2;
+ u32 tg_divval;
+ u32 tg_lo;
+ u32 Xtal_Int;
+
+ u32 Fref_TG;
+ u32 Fvco;
+
+ Xtal_Int = MXL_GetXtalInt(state->Fxtal);
+
+ state->RF_IN = RF_Freq;
+
+ MXL_SynthRFTGLO_Calc(fe);
+
+ if (state->Fxtal >= 12000000UL && state->Fxtal <= 22000000UL)
+ Kdbl_RF = 2;
+ if (state->Fxtal > 22000000 && state->Fxtal <= 32000000)
+ Kdbl_RF = 1;
+
+ /* Downconverter Controls
+ * Look-Up Table Implementation for:
+ * DN_POLY
+ * DN_RFGAIN
+ * DN_CAP_RFLPF
+ * DN_EN_VHFUHFBAR
+ * DN_GAIN_ADJUST
+ * Change the boundary reference from RF_IN to RF_LO
+ */
+ if (state->RF_LO < 40000000UL)
+ return -1;
+
+ if (state->RF_LO >= 40000000UL && state->RF_LO <= 75000000UL) {
+ status += MXL_ControlWrite(fe, DN_POLY, 2);
+ status += MXL_ControlWrite(fe, DN_RFGAIN, 3);
+ status += MXL_ControlWrite(fe, DN_CAP_RFLPF, 423);
+ status += MXL_ControlWrite(fe, DN_EN_VHFUHFBAR, 1);
+ status += MXL_ControlWrite(fe, DN_GAIN_ADJUST, 1);
+ }
+ if (state->RF_LO > 75000000UL && state->RF_LO <= 100000000UL) {
+ status += MXL_ControlWrite(fe, DN_POLY, 3);
+ status += MXL_ControlWrite(fe, DN_RFGAIN, 3);
+ status += MXL_ControlWrite(fe, DN_CAP_RFLPF, 222);
+ status += MXL_ControlWrite(fe, DN_EN_VHFUHFBAR, 1);
+ status += MXL_ControlWrite(fe, DN_GAIN_ADJUST, 1);
+ }
+ if (state->RF_LO > 100000000UL && state->RF_LO <= 150000000UL) {
+ status += MXL_ControlWrite(fe, DN_POLY, 3);
+ status += MXL_ControlWrite(fe, DN_RFGAIN, 3);
+ status += MXL_ControlWrite(fe, DN_CAP_RFLPF, 147);
+ status += MXL_ControlWrite(fe, DN_EN_VHFUHFBAR, 1);
+ status += MXL_ControlWrite(fe, DN_GAIN_ADJUST, 2);
+ }
+ if (state->RF_LO > 150000000UL && state->RF_LO <= 200000000UL) {
+ status += MXL_ControlWrite(fe, DN_POLY, 3);
+ status += MXL_ControlWrite(fe, DN_RFGAIN, 3);
+ status += MXL_ControlWrite(fe, DN_CAP_RFLPF, 9);
+ status += MXL_ControlWrite(fe, DN_EN_VHFUHFBAR, 1);
+ status += MXL_ControlWrite(fe, DN_GAIN_ADJUST, 2);
+ }
+ if (state->RF_LO > 200000000UL && state->RF_LO <= 300000000UL) {
+ status += MXL_ControlWrite(fe, DN_POLY, 3);
+ status += MXL_ControlWrite(fe, DN_RFGAIN, 3);
+ status += MXL_ControlWrite(fe, DN_CAP_RFLPF, 0);
+ status += MXL_ControlWrite(fe, DN_EN_VHFUHFBAR, 1);
+ status += MXL_ControlWrite(fe, DN_GAIN_ADJUST, 3);
+ }
+ if (state->RF_LO > 300000000UL && state->RF_LO <= 650000000UL) {
+ status += MXL_ControlWrite(fe, DN_POLY, 3);
+ status += MXL_ControlWrite(fe, DN_RFGAIN, 1);
+ status += MXL_ControlWrite(fe, DN_CAP_RFLPF, 0);
+ status += MXL_ControlWrite(fe, DN_EN_VHFUHFBAR, 0);
+ status += MXL_ControlWrite(fe, DN_GAIN_ADJUST, 3);
+ }
+ if (state->RF_LO > 650000000UL && state->RF_LO <= 900000000UL) {
+ status += MXL_ControlWrite(fe, DN_POLY, 3);
+ status += MXL_ControlWrite(fe, DN_RFGAIN, 2);
+ status += MXL_ControlWrite(fe, DN_CAP_RFLPF, 0);
+ status += MXL_ControlWrite(fe, DN_EN_VHFUHFBAR, 0);
+ status += MXL_ControlWrite(fe, DN_GAIN_ADJUST, 3);
+ }
+ if (state->RF_LO > 900000000UL)
+ return -1;
+
+ /* DN_IQTNBUF_AMP */
+ /* DN_IQTNGNBFBIAS_BST */
+ if (state->RF_LO >= 40000000UL && state->RF_LO <= 75000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 75000000UL && state->RF_LO <= 100000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 100000000UL && state->RF_LO <= 150000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 150000000UL && state->RF_LO <= 200000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 200000000UL && state->RF_LO <= 300000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 300000000UL && state->RF_LO <= 400000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 400000000UL && state->RF_LO <= 450000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 450000000UL && state->RF_LO <= 500000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 500000000UL && state->RF_LO <= 550000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 550000000UL && state->RF_LO <= 600000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 600000000UL && state->RF_LO <= 650000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 650000000UL && state->RF_LO <= 700000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 700000000UL && state->RF_LO <= 750000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 750000000UL && state->RF_LO <= 800000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 1);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 0);
+ }
+ if (state->RF_LO > 800000000UL && state->RF_LO <= 850000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 10);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 1);
+ }
+ if (state->RF_LO > 850000000UL && state->RF_LO <= 900000000UL) {
+ status += MXL_ControlWrite(fe, DN_IQTNBUF_AMP, 10);
+ status += MXL_ControlWrite(fe, DN_IQTNGNBFBIAS_BST, 1);
+ }
+
+ /*
+ * Set RF Synth and LO Path Control
+ *
+ * Look-Up table implementation for:
+ * RFSYN_EN_OUTMUX
+ * RFSYN_SEL_VCO_OUT
+ * RFSYN_SEL_VCO_HI
+ * RFSYN_SEL_DIVM
+ * RFSYN_RF_DIV_BIAS
+ * DN_SEL_FREQ
+ *
+ * Set divider_val, Fmax, Fmix to use in Equations
+ */
+ FminBin = 28000000UL ;
+ FmaxBin = 42500000UL ;
+ if (state->RF_LO >= 40000000UL && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 1);
+ divider_val = 64 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 42500000UL ;
+ FmaxBin = 56000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 1);
+ divider_val = 64 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 56000000UL ;
+ FmaxBin = 85000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 1);
+ divider_val = 32 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 85000000UL ;
+ FmaxBin = 112000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 1);
+ divider_val = 32 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 112000000UL ;
+ FmaxBin = 170000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 2);
+ divider_val = 16 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 170000000UL ;
+ FmaxBin = 225000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 2);
+ divider_val = 16 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 225000000UL ;
+ FmaxBin = 300000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 4);
+ divider_val = 8 ;
+ Fmax = 340000000UL ;
+ Fmin = FminBin ;
+ }
+ FminBin = 300000000UL ;
+ FmaxBin = 340000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 0);
+ divider_val = 8 ;
+ Fmax = FmaxBin ;
+ Fmin = 225000000UL ;
+ }
+ FminBin = 340000000UL ;
+ FmaxBin = 450000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 2);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 0);
+ divider_val = 8 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 450000000UL ;
+ FmaxBin = 680000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 1);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 0);
+ divider_val = 4 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 680000000UL ;
+ FmaxBin = 900000000UL ;
+ if (state->RF_LO > FminBin && state->RF_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 1);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 0);
+ divider_val = 4 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+
+ /* CHCAL_INT_MOD_RF
+ * CHCAL_FRAC_MOD_RF
+ * RFSYN_LPF_R
+ * CHCAL_EN_INT_RF
+ */
+ /* Equation E3 RFSYN_VCO_BIAS */
+ E3 = (((Fmax-state->RF_LO)/1000)*32)/((Fmax-Fmin)/1000) + 8 ;
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, E3);
+
+ /* Equation E4 CHCAL_INT_MOD_RF */
+ E4 = (state->RF_LO*divider_val/1000)/(2*state->Fxtal*Kdbl_RF/1000);
+ MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, E4);
+
+ /* Equation E5 CHCAL_FRAC_MOD_RF CHCAL_EN_INT_RF */
+ E5 = ((2<<17)*(state->RF_LO/10000*divider_val -
+ (E4*(2*state->Fxtal*Kdbl_RF)/10000))) /
+ (2*state->Fxtal*Kdbl_RF/10000);
+
+ status += MXL_ControlWrite(fe, CHCAL_FRAC_MOD_RF, E5);
+
+ /* Equation E5A RFSYN_LPF_R */
+ E5A = (((Fmax - state->RF_LO)/1000)*4/((Fmax-Fmin)/1000)) + 1 ;
+ status += MXL_ControlWrite(fe, RFSYN_LPF_R, E5A);
+
+ /* Euqation E5B CHCAL_EN_INIT_RF */
+ status += MXL_ControlWrite(fe, CHCAL_EN_INT_RF, ((E5 == 0) ? 1 : 0));
+ /*if (E5 == 0)
+ * status += MXL_ControlWrite(fe, CHCAL_EN_INT_RF, 1);
+ *else
+ * status += MXL_ControlWrite(fe, CHCAL_FRAC_MOD_RF, E5);
+ */
+
+ /*
+ * Set TG Synth
+ *
+ * Look-Up table implementation for:
+ * TG_LO_DIVVAL
+ * TG_LO_SELVAL
+ *
+ * Set divider_val, Fmax, Fmix to use in Equations
+ */
+ if (state->TG_LO < 33000000UL)
+ return -1;
+
+ FminBin = 33000000UL ;
+ FmaxBin = 50000000UL ;
+ if (state->TG_LO >= FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x6);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x0);
+ divider_val = 36 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 50000000UL ;
+ FmaxBin = 67000000UL ;
+ if (state->TG_LO > FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x1);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x0);
+ divider_val = 24 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 67000000UL ;
+ FmaxBin = 100000000UL ;
+ if (state->TG_LO > FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0xC);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x2);
+ divider_val = 18 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 100000000UL ;
+ FmaxBin = 150000000UL ;
+ if (state->TG_LO > FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x8);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x2);
+ divider_val = 12 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 150000000UL ;
+ FmaxBin = 200000000UL ;
+ if (state->TG_LO > FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x2);
+ divider_val = 8 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 200000000UL ;
+ FmaxBin = 300000000UL ;
+ if (state->TG_LO > FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x8);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x3);
+ divider_val = 6 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 300000000UL ;
+ FmaxBin = 400000000UL ;
+ if (state->TG_LO > FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x3);
+ divider_val = 4 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 400000000UL ;
+ FmaxBin = 600000000UL ;
+ if (state->TG_LO > FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x8);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x7);
+ divider_val = 3 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+ FminBin = 600000000UL ;
+ FmaxBin = 900000000UL ;
+ if (state->TG_LO > FminBin && state->TG_LO <= FmaxBin) {
+ status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0);
+ status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x7);
+ divider_val = 2 ;
+ Fmax = FmaxBin ;
+ Fmin = FminBin ;
+ }
+
+ /* TG_DIV_VAL */
+ tg_divval = (state->TG_LO*divider_val/100000) *
+ (MXL_Ceiling(state->Fxtal, 1000000) * 100) /
+ (state->Fxtal/1000);
+
+ status += MXL_ControlWrite(fe, TG_DIV_VAL, tg_divval);
+
+ if (state->TG_LO > 600000000UL)
+ status += MXL_ControlWrite(fe, TG_DIV_VAL, tg_divval + 1);
+
+ Fmax = 1800000000UL ;
+ Fmin = 1200000000UL ;
+
+ /* prevent overflow of 32 bit unsigned integer, use
+ * following equation. Edit for v2.6.4
+ */
+ /* Fref_TF = Fref_TG * 1000 */
+ Fref_TG = (state->Fxtal/1000) / MXL_Ceiling(state->Fxtal, 1000000);
+
+ /* Fvco = Fvco/10 */
+ Fvco = (state->TG_LO/10000) * divider_val * Fref_TG;
+
+ tg_lo = (((Fmax/10 - Fvco)/100)*32) / ((Fmax-Fmin)/1000)+8;
+
+ /* below equation is same as above but much harder to debug.
+ * tg_lo = ( ((Fmax/10000 * Xtal_Int)/100) -
+ * ((state->TG_LO/10000)*divider_val *
+ * (state->Fxtal/10000)/100) )*32/((Fmax-Fmin)/10000 *
+ * Xtal_Int/100) + 8;
+ */
+
+ status += MXL_ControlWrite(fe, TG_VCO_BIAS , tg_lo);
+
+ /* add for 2.6.5 Special setting for QAM */
+ if (state->Mod_Type == MXL_QAM) {
+ if (state->RF_IN < 680000000)
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, 3);
+ else
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, 2);
+ }
+
+ /* Off Chip Tracking Filter Control */
+ if (state->TF_Type == MXL_TF_OFF) {
+ /* Tracking Filter Off State; turn off all the banks */
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 3, 1); /* Bank1 Off */
+ status += MXL_SetGPIO(fe, 1, 1); /* Bank2 Off */
+ status += MXL_SetGPIO(fe, 4, 1); /* Bank3 Off */
+ }
+
+ if (state->TF_Type == MXL_TF_C) /* Tracking Filter type C */ {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_ControlWrite(fe, DAC_DIN_A, 0);
+
+ if (state->RF_IN >= 43000000 && state->RF_IN < 150000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+ status += MXL_SetGPIO(fe, 3, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ }
+ if (state->RF_IN >= 150000000 && state->RF_IN < 280000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ }
+ if (state->RF_IN >= 280000000 && state->RF_IN < 360000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ }
+ if (state->RF_IN >= 360000000 && state->RF_IN < 560000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 0);
+ }
+ if (state->RF_IN >= 560000000 && state->RF_IN < 580000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 29);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 0);
+ }
+ if (state->RF_IN >= 580000000 && state->RF_IN < 630000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 0);
+ }
+ if (state->RF_IN >= 630000000 && state->RF_IN < 700000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 16);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ }
+ if (state->RF_IN >= 700000000 && state->RF_IN < 760000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 7);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ }
+ if (state->RF_IN >= 760000000 && state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ }
+ }
+
+ if (state->TF_Type == MXL_TF_C_H) {
+
+ /* Tracking Filter type C-H for Hauppauge only */
+ status += MXL_ControlWrite(fe, DAC_DIN_A, 0);
+
+ if (state->RF_IN >= 43000000 && state->RF_IN < 150000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ }
+ if (state->RF_IN >= 150000000 && state->RF_IN < 280000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ }
+ if (state->RF_IN >= 280000000 && state->RF_IN < 360000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ }
+ if (state->RF_IN >= 360000000 && state->RF_IN < 560000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ }
+ if (state->RF_IN >= 560000000 && state->RF_IN < 580000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ }
+ if (state->RF_IN >= 580000000 && state->RF_IN < 630000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ }
+ if (state->RF_IN >= 630000000 && state->RF_IN < 700000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ }
+ if (state->RF_IN >= 700000000 && state->RF_IN < 760000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ }
+ if (state->RF_IN >= 760000000 && state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ }
+ }
+
+ if (state->TF_Type == MXL_TF_D) { /* Tracking Filter type D */
+
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+
+ if (state->RF_IN >= 43000000 && state->RF_IN < 174000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 174000000 && state->RF_IN < 250000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 250000000 && state->RF_IN < 310000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 310000000 && state->RF_IN < 360000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 360000000 && state->RF_IN < 470000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 470000000 && state->RF_IN < 640000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 640000000 && state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ }
+
+ if (state->TF_Type == MXL_TF_D_L) {
+
+ /* Tracking Filter type D-L for Lumanate ONLY change 2.6.3 */
+ status += MXL_ControlWrite(fe, DAC_DIN_A, 0);
+
+ /* if UHF and terrestrial => Turn off Tracking Filter */
+ if (state->RF_IN >= 471000000 &&
+ (state->RF_IN - 471000000)%6000000 != 0) {
+ /* Turn off all the banks */
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_ControlWrite(fe, AGC_IF, 10);
+ } else {
+ /* if VHF or cable => Turn on Tracking Filter */
+ if (state->RF_IN >= 43000000 &&
+ state->RF_IN < 140000000) {
+
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 140000000 &&
+ state->RF_IN < 240000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 240000000 &&
+ state->RF_IN < 340000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 340000000 &&
+ state->RF_IN < 430000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 430000000 &&
+ state->RF_IN < 470000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 470000000 &&
+ state->RF_IN < 570000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 570000000 &&
+ state->RF_IN < 620000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 620000000 &&
+ state->RF_IN < 760000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 760000000 &&
+ state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_A_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ }
+ }
+
+ if (state->TF_Type == MXL_TF_E) /* Tracking Filter type E */ {
+
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+
+ if (state->RF_IN >= 43000000 && state->RF_IN < 174000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 174000000 && state->RF_IN < 250000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 250000000 && state->RF_IN < 310000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 310000000 && state->RF_IN < 360000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 360000000 && state->RF_IN < 470000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 470000000 && state->RF_IN < 640000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 640000000 && state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ }
+
+ if (state->TF_Type == MXL_TF_F) {
+
+ /* Tracking Filter type F */
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+
+ if (state->RF_IN >= 43000000 && state->RF_IN < 160000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 160000000 && state->RF_IN < 210000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 210000000 && state->RF_IN < 300000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 300000000 && state->RF_IN < 390000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 390000000 && state->RF_IN < 515000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 515000000 && state->RF_IN < 650000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 650000000 && state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ }
+
+ if (state->TF_Type == MXL_TF_E_2) {
+
+ /* Tracking Filter type E_2 */
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+
+ if (state->RF_IN >= 43000000 && state->RF_IN < 174000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 174000000 && state->RF_IN < 250000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 250000000 && state->RF_IN < 350000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 350000000 && state->RF_IN < 400000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 400000000 && state->RF_IN < 570000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 570000000 && state->RF_IN < 770000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 770000000 && state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ }
+
+ if (state->TF_Type == MXL_TF_G) {
+
+ /* Tracking Filter type G add for v2.6.8 */
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+
+ if (state->RF_IN >= 50000000 && state->RF_IN < 190000000) {
+
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 190000000 && state->RF_IN < 280000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 280000000 && state->RF_IN < 350000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 350000000 && state->RF_IN < 400000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 400000000 && state->RF_IN < 470000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 470000000 && state->RF_IN < 640000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 640000000 && state->RF_IN < 820000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 820000000 && state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ }
+
+ if (state->TF_Type == MXL_TF_E_NA) {
+
+ /* Tracking Filter type E-NA for Empia ONLY change for 2.6.8 */
+ status += MXL_ControlWrite(fe, DAC_DIN_B, 0);
+
+ /* if UHF and terrestrial=> Turn off Tracking Filter */
+ if (state->RF_IN >= 471000000 &&
+ (state->RF_IN - 471000000)%6000000 != 0) {
+
+ /* Turn off all the banks */
+ status += MXL_SetGPIO(fe, 3, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+
+ /* 2.6.12 Turn on RSSI */
+ status += MXL_ControlWrite(fe, SEQ_EXTSYNTHCALIF, 1);
+ status += MXL_ControlWrite(fe, SEQ_EXTDCCAL, 1);
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 1);
+ status += MXL_ControlWrite(fe, RFA_ENCLKRFAGC, 1);
+
+ /* RSSI reference point */
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFH, 5);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REF, 3);
+ status += MXL_ControlWrite(fe, RFA_RSSI_REFL, 2);
+
+ /* following parameter is from analog OTA mode,
+ * can be change to seek better performance */
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, 3);
+ } else {
+ /* if VHF or Cable => Turn on Tracking Filter */
+
+ /* 2.6.12 Turn off RSSI */
+ status += MXL_ControlWrite(fe, AGC_EN_RSSI, 0);
+
+ /* change back from above condition */
+ status += MXL_ControlWrite(fe, RFSYN_CHP_GAIN, 5);
+
+
+ if (state->RF_IN >= 43000000 && state->RF_IN < 174000000) {
+
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 174000000 && state->RF_IN < 250000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 0);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 250000000 && state->RF_IN < 350000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ if (state->RF_IN >= 350000000 && state->RF_IN < 400000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 0);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 400000000 && state->RF_IN < 570000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 0);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 570000000 && state->RF_IN < 770000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 0);
+ }
+ if (state->RF_IN >= 770000000 && state->RF_IN <= 900000000) {
+ status += MXL_ControlWrite(fe, DAC_B_ENABLE, 1);
+ status += MXL_SetGPIO(fe, 4, 1);
+ status += MXL_SetGPIO(fe, 1, 1);
+ status += MXL_SetGPIO(fe, 3, 1);
+ }
+ }
+ }
+ return status ;
+}
+
+static u16 MXL_SetGPIO(struct dvb_frontend *fe, u8 GPIO_Num, u8 GPIO_Val)
+{
+ u16 status = 0;
+
+ if (GPIO_Num == 1)
+ status += MXL_ControlWrite(fe, GPIO_1B, GPIO_Val ? 0 : 1);
+
+ /* GPIO2 is not available */
+
+ if (GPIO_Num == 3) {
+ if (GPIO_Val == 1) {
+ status += MXL_ControlWrite(fe, GPIO_3, 0);
+ status += MXL_ControlWrite(fe, GPIO_3B, 0);
+ }
+ if (GPIO_Val == 0) {
+ status += MXL_ControlWrite(fe, GPIO_3, 1);
+ status += MXL_ControlWrite(fe, GPIO_3B, 1);
+ }
+ if (GPIO_Val == 3) { /* tri-state */
+ status += MXL_ControlWrite(fe, GPIO_3, 0);
+ status += MXL_ControlWrite(fe, GPIO_3B, 1);
+ }
+ }
+ if (GPIO_Num == 4) {
+ if (GPIO_Val == 1) {
+ status += MXL_ControlWrite(fe, GPIO_4, 0);
+ status += MXL_ControlWrite(fe, GPIO_4B, 0);
+ }
+ if (GPIO_Val == 0) {
+ status += MXL_ControlWrite(fe, GPIO_4, 1);
+ status += MXL_ControlWrite(fe, GPIO_4B, 1);
+ }
+ if (GPIO_Val == 3) { /* tri-state */
+ status += MXL_ControlWrite(fe, GPIO_4, 0);
+ status += MXL_ControlWrite(fe, GPIO_4B, 1);
+ }
+ }
+
+ return status;
+}
+
+static u16 MXL_ControlWrite(struct dvb_frontend *fe, u16 ControlNum, u32 value)
+{
+ u16 status = 0;
+
+ /* Will write ALL Matching Control Name */
+ /* Write Matching INIT Control */
+ status += MXL_ControlWrite_Group(fe, ControlNum, value, 1);
+ /* Write Matching CH Control */
+ status += MXL_ControlWrite_Group(fe, ControlNum, value, 2);
+#ifdef _MXL_INTERNAL
+ /* Write Matching MXL Control */
+ status += MXL_ControlWrite_Group(fe, ControlNum, value, 3);
+#endif
+ return status;
+}
+
+static u16 MXL_ControlWrite_Group(struct dvb_frontend *fe, u16 controlNum,
+ u32 value, u16 controlGroup)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u16 i, j, k;
+ u32 highLimit;
+ u32 ctrlVal;
+
+ if (controlGroup == 1) /* Initial Control */ {
+
+ for (i = 0; i < state->Init_Ctrl_Num; i++) {
+
+ if (controlNum == state->Init_Ctrl[i].Ctrl_Num) {
+
+ highLimit = 1 << state->Init_Ctrl[i].size;
+ if (value < highLimit) {
+ for (j = 0; j < state->Init_Ctrl[i].size; j++) {
+ state->Init_Ctrl[i].val[j] = (u8)((value >> j) & 0x01);
+ MXL_RegWriteBit(fe, (u8)(state->Init_Ctrl[i].addr[j]),
+ (u8)(state->Init_Ctrl[i].bit[j]),
+ (u8)((value>>j) & 0x01));
+ }
+ ctrlVal = 0;
+ for (k = 0; k < state->Init_Ctrl[i].size; k++)
+ ctrlVal += state->Init_Ctrl[i].val[k] * (1 << k);
+ } else
+ return -1;
+ }
+ }
+ }
+ if (controlGroup == 2) /* Chan change Control */ {
+
+ for (i = 0; i < state->CH_Ctrl_Num; i++) {
+
+ if (controlNum == state->CH_Ctrl[i].Ctrl_Num) {
+
+ highLimit = 1 << state->CH_Ctrl[i].size;
+ if (value < highLimit) {
+ for (j = 0; j < state->CH_Ctrl[i].size; j++) {
+ state->CH_Ctrl[i].val[j] = (u8)((value >> j) & 0x01);
+ MXL_RegWriteBit(fe, (u8)(state->CH_Ctrl[i].addr[j]),
+ (u8)(state->CH_Ctrl[i].bit[j]),
+ (u8)((value>>j) & 0x01));
+ }
+ ctrlVal = 0;
+ for (k = 0; k < state->CH_Ctrl[i].size; k++)
+ ctrlVal += state->CH_Ctrl[i].val[k] * (1 << k);
+ } else
+ return -1;
+ }
+ }
+ }
+#ifdef _MXL_INTERNAL
+ if (controlGroup == 3) /* Maxlinear Control */ {
+
+ for (i = 0; i < state->MXL_Ctrl_Num; i++) {
+
+ if (controlNum == state->MXL_Ctrl[i].Ctrl_Num) {
+
+ highLimit = (1 << state->MXL_Ctrl[i].size);
+ if (value < highLimit) {
+ for (j = 0; j < state->MXL_Ctrl[i].size; j++) {
+ state->MXL_Ctrl[i].val[j] = (u8)((value >> j) & 0x01);
+ MXL_RegWriteBit(fe, (u8)(state->MXL_Ctrl[i].addr[j]),
+ (u8)(state->MXL_Ctrl[i].bit[j]),
+ (u8)((value>>j) & 0x01));
+ }
+ ctrlVal = 0;
+ for (k = 0; k < state->MXL_Ctrl[i].size; k++)
+ ctrlVal += state->MXL_Ctrl[i].val[k] * (1 << k);
+ } else
+ return -1;
+ }
+ }
+ }
+#endif
+ return 0 ; /* successful return */
+}
+
+static u16 MXL_RegRead(struct dvb_frontend *fe, u8 RegNum, u8 *RegVal)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ int i ;
+
+ for (i = 0; i < 104; i++) {
+ if (RegNum == state->TunerRegs[i].Reg_Num) {
+ *RegVal = (u8)(state->TunerRegs[i].Reg_Val);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static u16 MXL_ControlRead(struct dvb_frontend *fe, u16 controlNum, u32 *value)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u32 ctrlVal ;
+ u16 i, k ;
+
+ for (i = 0; i < state->Init_Ctrl_Num ; i++) {
+
+ if (controlNum == state->Init_Ctrl[i].Ctrl_Num) {
+
+ ctrlVal = 0;
+ for (k = 0; k < state->Init_Ctrl[i].size; k++)
+ ctrlVal += state->Init_Ctrl[i].val[k] * (1<<k);
+ *value = ctrlVal;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < state->CH_Ctrl_Num ; i++) {
+
+ if (controlNum == state->CH_Ctrl[i].Ctrl_Num) {
+
+ ctrlVal = 0;
+ for (k = 0; k < state->CH_Ctrl[i].size; k++)
+ ctrlVal += state->CH_Ctrl[i].val[k] * (1 << k);
+ *value = ctrlVal;
+ return 0;
+
+ }
+ }
+
+#ifdef _MXL_INTERNAL
+ for (i = 0; i < state->MXL_Ctrl_Num ; i++) {
+
+ if (controlNum == state->MXL_Ctrl[i].Ctrl_Num) {
+
+ ctrlVal = 0;
+ for (k = 0; k < state->MXL_Ctrl[i].size; k++)
+ ctrlVal += state->MXL_Ctrl[i].val[k] * (1<<k);
+ *value = ctrlVal;
+ return 0;
+
+ }
+ }
+#endif
+ return 1;
+}
+
+static void MXL_RegWriteBit(struct dvb_frontend *fe, u8 address, u8 bit,
+ u8 bitVal)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ int i ;
+
+ const u8 AND_MAP[8] = {
+ 0xFE, 0xFD, 0xFB, 0xF7,
+ 0xEF, 0xDF, 0xBF, 0x7F } ;
+
+ const u8 OR_MAP[8] = {
+ 0x01, 0x02, 0x04, 0x08,
+ 0x10, 0x20, 0x40, 0x80 } ;
+
+ for (i = 0; i < state->TunerRegs_Num; i++) {
+ if (state->TunerRegs[i].Reg_Num == address) {
+ if (bitVal)
+ state->TunerRegs[i].Reg_Val |= OR_MAP[bit];
+ else
+ state->TunerRegs[i].Reg_Val &= AND_MAP[bit];
+ break ;
+ }
+ }
+}
+
+static u32 MXL_Ceiling(u32 value, u32 resolution)
+{
+ return (value/resolution + (value % resolution > 0 ? 1 : 0));
+}
+
+/* Retrieve the Initialzation Registers */
+static u16 MXL_GetInitRegister(struct dvb_frontend *fe, u8 *RegNum,
+ u8 *RegVal, int *count)
+{
+ u16 status = 0;
+ int i ;
+
+ u8 RegAddr[] = {
+ 11, 12, 13, 22, 32, 43, 44, 53, 56, 59, 73,
+ 76, 77, 91, 134, 135, 137, 147,
+ 156, 166, 167, 168, 25 };
+
+ *count = sizeof(RegAddr) / sizeof(u8);
+
+ status += MXL_BlockInit(fe);
+
+ for (i = 0 ; i < *count; i++) {
+ RegNum[i] = RegAddr[i];
+ status += MXL_RegRead(fe, RegNum[i], &RegVal[i]);
+ }
+
+ return status;
+}
+
+static u16 MXL_GetCHRegister(struct dvb_frontend *fe, u8 *RegNum, u8 *RegVal,
+ int *count)
+{
+ u16 status = 0;
+ int i ;
+
+/* add 77, 166, 167, 168 register for 2.6.12 */
+#ifdef _MXL_PRODUCTION
+ u8 RegAddr[] = {14, 15, 16, 17, 22, 43, 65, 68, 69, 70, 73, 92, 93, 106,
+ 107, 108, 109, 110, 111, 112, 136, 138, 149, 77, 166, 167, 168 } ;
+#else
+ u8 RegAddr[] = {14, 15, 16, 17, 22, 43, 68, 69, 70, 73, 92, 93, 106,
+ 107, 108, 109, 110, 111, 112, 136, 138, 149, 77, 166, 167, 168 } ;
+ /*
+ u8 RegAddr[171];
+ for (i = 0; i <= 170; i++)
+ RegAddr[i] = i;
+ */
+#endif
+
+ *count = sizeof(RegAddr) / sizeof(u8);
+
+ for (i = 0 ; i < *count; i++) {
+ RegNum[i] = RegAddr[i];
+ status += MXL_RegRead(fe, RegNum[i], &RegVal[i]);
+ }
+
+ return status;
+}
+
+static u16 MXL_GetCHRegister_ZeroIF(struct dvb_frontend *fe, u8 *RegNum,
+ u8 *RegVal, int *count)
+{
+ u16 status = 0;
+ int i;
+
+ u8 RegAddr[] = {43, 136};
+
+ *count = sizeof(RegAddr) / sizeof(u8);
+
+ for (i = 0; i < *count; i++) {
+ RegNum[i] = RegAddr[i];
+ status += MXL_RegRead(fe, RegNum[i], &RegVal[i]);
+ }
+
+ return status;
+}
+
+static u16 MXL_GetMasterControl(u8 *MasterReg, int state)
+{
+ if (state == 1) /* Load_Start */
+ *MasterReg = 0xF3;
+ if (state == 2) /* Power_Down */
+ *MasterReg = 0x41;
+ if (state == 3) /* Synth_Reset */
+ *MasterReg = 0xB1;
+ if (state == 4) /* Seq_Off */
+ *MasterReg = 0xF1;
+
+ return 0;
+}
+
+#ifdef _MXL_PRODUCTION
+static u16 MXL_VCORange_Test(struct dvb_frontend *fe, int VCO_Range)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u16 status = 0 ;
+
+ if (VCO_Range == 1) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_DIV, 1);
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_DIVM, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 0);
+ if (state->Mode == 0 && state->IF_Mode == 1) {
+ /* Analog Low IF Mode */
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 8);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 56);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 180224);
+ }
+ if (state->Mode == 0 && state->IF_Mode == 0) {
+ /* Analog Zero IF Mode */
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 8);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 56);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 222822);
+ }
+ if (state->Mode == 1) /* Digital Mode */ {
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 8);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 56);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 229376);
+ }
+ }
+
+ if (VCO_Range == 2) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_DIV, 1);
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_DIVM, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 40);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 41);
+ if (state->Mode == 0 && state->IF_Mode == 1) {
+ /* Analog Low IF Mode */
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 40);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 42);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 206438);
+ }
+ if (state->Mode == 0 && state->IF_Mode == 0) {
+ /* Analog Zero IF Mode */
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 40);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 42);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 206438);
+ }
+ if (state->Mode == 1) /* Digital Mode */ {
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 1);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 40);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 41);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 16384);
+ }
+ }
+
+ if (VCO_Range == 3) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_DIV, 1);
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_DIVM, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 8);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 42);
+ if (state->Mode == 0 && state->IF_Mode == 1) {
+ /* Analog Low IF Mode */
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 8);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 44);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 173670);
+ }
+ if (state->Mode == 0 && state->IF_Mode == 0) {
+ /* Analog Zero IF Mode */
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 8);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 44);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 173670);
+ }
+ if (state->Mode == 1) /* Digital Mode */ {
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 8);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 42);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 245760);
+ }
+ }
+
+ if (VCO_Range == 4) {
+ status += MXL_ControlWrite(fe, RFSYN_EN_DIV, 1);
+ status += MXL_ControlWrite(fe, RFSYN_EN_OUTMUX, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_DIVM, 0);
+ status += MXL_ControlWrite(fe, RFSYN_DIVM, 1);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_OUT, 1);
+ status += MXL_ControlWrite(fe, RFSYN_RF_DIV_BIAS, 1);
+ status += MXL_ControlWrite(fe, DN_SEL_FREQ, 0);
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 40);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 27);
+ if (state->Mode == 0 && state->IF_Mode == 1) {
+ /* Analog Low IF Mode */
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 40);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 27);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 206438);
+ }
+ if (state->Mode == 0 && state->IF_Mode == 0) {
+ /* Analog Zero IF Mode */
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 40);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 27);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 206438);
+ }
+ if (state->Mode == 1) /* Digital Mode */ {
+ status += MXL_ControlWrite(fe, RFSYN_SEL_VCO_HI, 0);
+ status += MXL_ControlWrite(fe, RFSYN_VCO_BIAS, 40);
+ status += MXL_ControlWrite(fe, CHCAL_INT_MOD_RF, 27);
+ status += MXL_ControlWrite(fe,
+ CHCAL_FRAC_MOD_RF, 212992);
+ }
+ }
+
+ return status;
+}
+
+static u16 MXL_Hystersis_Test(struct dvb_frontend *fe, int Hystersis)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u16 status = 0;
+
+ if (Hystersis == 1)
+ status += MXL_ControlWrite(fe, DN_BYPASS_AGC_I2C, 1);
+
+ return status;
+}
+#endif
+/* End: Reference driver code found in the Realtek driver that
+ * is copyright MaxLinear */
+
+/* ----------------------------------------------------------------
+ * Begin: Everything after here is new code to adapt the
+ * proprietary Realtek driver into a Linux API tuner.
+ * Copyright (C) 2008 Steven Toth <stoth@hauppauge.com>
+ */
+static int mxl5005s_reset(struct dvb_frontend *fe)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ int ret = 0;
+
+ u8 buf[2] = { 0xff, 0x00 };
+ struct i2c_msg msg = { .addr = state->config->i2c_address, .flags = 0,
+ .buf = buf, .len = 2 };
+
+ dprintk(2, "%s()\n", __func__);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(state->i2c, &msg, 1) != 1) {
+ printk(KERN_WARNING "mxl5005s I2C reset failed\n");
+ ret = -EREMOTEIO;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return ret;
+}
+
+/* Write a single byte to a single reg, latch the value if required by
+ * following the transaction with the latch byte.
+ */
+static int mxl5005s_writereg(struct dvb_frontend *fe, u8 reg, u8 val, int latch)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u8 buf[3] = { reg, val, MXL5005S_LATCH_BYTE };
+ struct i2c_msg msg = { .addr = state->config->i2c_address, .flags = 0,
+ .buf = buf, .len = 3 };
+
+ if (latch == 0)
+ msg.len = 2;
+
+ dprintk(2, "%s(0x%x, 0x%x, 0x%x)\n", __func__, reg, val, msg.addr);
+
+ if (i2c_transfer(state->i2c, &msg, 1) != 1) {
+ printk(KERN_WARNING "mxl5005s I2C write failed\n");
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int mxl5005s_writeregs(struct dvb_frontend *fe, u8 *addrtable,
+ u8 *datatable, u8 len)
+{
+ int ret = 0, i;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ for (i = 0 ; i < len-1; i++) {
+ ret = mxl5005s_writereg(fe, addrtable[i], datatable[i], 0);
+ if (ret < 0)
+ break;
+ }
+
+ ret = mxl5005s_writereg(fe, addrtable[i], datatable[i], 1);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return ret;
+}
+
+static int mxl5005s_init(struct dvb_frontend *fe)
+{
+ dprintk(1, "%s()\n", __func__);
+ return mxl5005s_reconfigure(fe, MXL_QAM, MXL5005S_BANDWIDTH_6MHZ);
+}
+
+static int mxl5005s_reconfigure(struct dvb_frontend *fe, u32 mod_type,
+ u32 bandwidth)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+
+ u8 AddrTable[MXL5005S_REG_WRITING_TABLE_LEN_MAX];
+ u8 ByteTable[MXL5005S_REG_WRITING_TABLE_LEN_MAX];
+ int TableLen;
+
+ dprintk(1, "%s(type=%d, bw=%d)\n", __func__, mod_type, bandwidth);
+
+ mxl5005s_reset(fe);
+
+ /* Tuner initialization stage 0 */
+ MXL_GetMasterControl(ByteTable, MC_SYNTH_RESET);
+ AddrTable[0] = MASTER_CONTROL_ADDR;
+ ByteTable[0] |= state->config->AgcMasterByte;
+
+ mxl5005s_writeregs(fe, AddrTable, ByteTable, 1);
+
+ mxl5005s_AssignTunerMode(fe, mod_type, bandwidth);
+
+ /* Tuner initialization stage 1 */
+ MXL_GetInitRegister(fe, AddrTable, ByteTable, &TableLen);
+
+ mxl5005s_writeregs(fe, AddrTable, ByteTable, TableLen);
+
+ return 0;
+}
+
+static int mxl5005s_AssignTunerMode(struct dvb_frontend *fe, u32 mod_type,
+ u32 bandwidth)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ struct mxl5005s_config *c = state->config;
+
+ InitTunerControls(fe);
+
+ /* Set MxL5005S parameters. */
+ MXL5005_TunerConfig(
+ fe,
+ c->mod_mode,
+ c->if_mode,
+ bandwidth,
+ c->if_freq,
+ c->xtal_freq,
+ c->agc_mode,
+ c->top,
+ c->output_load,
+ c->clock_out,
+ c->div_out,
+ c->cap_select,
+ c->rssi_enable,
+ mod_type,
+ c->tracking_filter);
+
+ return 0;
+}
+
+static int mxl5005s_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ u32 req_mode, req_bw = 0;
+ int ret;
+
+ dprintk(1, "%s()\n", __func__);
+
+ if (fe->ops.info.type == FE_ATSC) {
+ switch (params->u.vsb.modulation) {
+ case VSB_8:
+ req_mode = MXL_ATSC; break;
+ default:
+ case QAM_64:
+ case QAM_256:
+ case QAM_AUTO:
+ req_mode = MXL_QAM; break;
+ }
+ } else
+ req_mode = MXL_DVBT;
+
+ /* Change tuner for new modulation type if reqd */
+ if (req_mode != state->current_mode) {
+ switch (req_mode) {
+ case VSB_8:
+ case QAM_64:
+ case QAM_256:
+ case QAM_AUTO:
+ req_bw = MXL5005S_BANDWIDTH_6MHZ;
+ break;
+ default:
+ /* Assume DVB-T */
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ req_bw = MXL5005S_BANDWIDTH_6MHZ;
+ break;
+ case BANDWIDTH_7_MHZ:
+ req_bw = MXL5005S_BANDWIDTH_7MHZ;
+ break;
+ case BANDWIDTH_AUTO:
+ case BANDWIDTH_8_MHZ:
+ req_bw = MXL5005S_BANDWIDTH_8MHZ;
+ break;
+ }
+ }
+
+ state->current_mode = req_mode;
+ ret = mxl5005s_reconfigure(fe, req_mode, req_bw);
+
+ } else
+ ret = 0;
+
+ if (ret == 0) {
+ dprintk(1, "%s() freq=%d\n", __func__, params->frequency);
+ ret = mxl5005s_SetRfFreqHz(fe, params->frequency);
+ }
+
+ return ret;
+}
+
+static int mxl5005s_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ dprintk(1, "%s()\n", __func__);
+
+ *frequency = state->RF_IN;
+
+ return 0;
+}
+
+static int mxl5005s_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct mxl5005s_state *state = fe->tuner_priv;
+ dprintk(1, "%s()\n", __func__);
+
+ *bandwidth = state->Chan_Bandwidth;
+
+ return 0;
+}
+
+static int mxl5005s_release(struct dvb_frontend *fe)
+{
+ dprintk(1, "%s()\n", __func__);
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static const struct dvb_tuner_ops mxl5005s_tuner_ops = {
+ .info = {
+ .name = "MaxLinear MXL5005S",
+ .frequency_min = 48000000,
+ .frequency_max = 860000000,
+ .frequency_step = 50000,
+ },
+
+ .release = mxl5005s_release,
+ .init = mxl5005s_init,
+
+ .set_params = mxl5005s_set_params,
+ .get_frequency = mxl5005s_get_frequency,
+ .get_bandwidth = mxl5005s_get_bandwidth,
+};
+
+struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct mxl5005s_config *config)
+{
+ struct mxl5005s_state *state = NULL;
+ dprintk(1, "%s()\n", __func__);
+
+ state = kzalloc(sizeof(struct mxl5005s_state), GFP_KERNEL);
+ if (state == NULL)
+ return NULL;
+
+ state->frontend = fe;
+ state->config = config;
+ state->i2c = i2c;
+ state->current_mode = MXL_QAM;
+
+ printk(KERN_INFO "MXL5005S: Attached at address 0x%02x\n",
+ config->i2c_address);
+
+ memcpy(&fe->ops.tuner_ops, &mxl5005s_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ fe->tuner_priv = state;
+ return fe;
+}
+EXPORT_SYMBOL(mxl5005s_attach);
+
+MODULE_DESCRIPTION("MaxLinear MXL5005S silicon tuner driver");
+MODULE_AUTHOR("Steven Toth");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/mxl5005s.h b/drivers/media/common/tuners/mxl5005s.h
new file mode 100644
index 00000000000..396db150bf0
--- /dev/null
+++ b/drivers/media/common/tuners/mxl5005s.h
@@ -0,0 +1,131 @@
+/*
+ MaxLinear MXL5005S VSB/QAM/DVBT tuner driver
+
+ Copyright (C) 2008 MaxLinear
+ Copyright (C) 2008 Steven Toth <stoth@hauppauge.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef __MXL5005S_H
+#define __MXL5005S_H
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+
+struct mxl5005s_config {
+
+ /* 7 bit i2c address */
+ u8 i2c_address;
+
+#define IF_FREQ_4570000HZ 4570000
+#define IF_FREQ_4571429HZ 4571429
+#define IF_FREQ_5380000HZ 5380000
+#define IF_FREQ_36000000HZ 36000000
+#define IF_FREQ_36125000HZ 36125000
+#define IF_FREQ_36166667HZ 36166667
+#define IF_FREQ_44000000HZ 44000000
+ u32 if_freq;
+
+#define CRYSTAL_FREQ_4000000HZ 4000000
+#define CRYSTAL_FREQ_16000000HZ 16000000
+#define CRYSTAL_FREQ_25000000HZ 25000000
+#define CRYSTAL_FREQ_28800000HZ 28800000
+ u32 xtal_freq;
+
+#define MXL_DUAL_AGC 0
+#define MXL_SINGLE_AGC 1
+ u8 agc_mode;
+
+#define MXL_TF_DEFAULT 0
+#define MXL_TF_OFF 1
+#define MXL_TF_C 2
+#define MXL_TF_C_H 3
+#define MXL_TF_D 4
+#define MXL_TF_D_L 5
+#define MXL_TF_E 6
+#define MXL_TF_F 7
+#define MXL_TF_E_2 8
+#define MXL_TF_E_NA 9
+#define MXL_TF_G 10
+ u8 tracking_filter;
+
+#define MXL_RSSI_DISABLE 0
+#define MXL_RSSI_ENABLE 1
+ u8 rssi_enable;
+
+#define MXL_CAP_SEL_DISABLE 0
+#define MXL_CAP_SEL_ENABLE 1
+ u8 cap_select;
+
+#define MXL_DIV_OUT_1 0
+#define MXL_DIV_OUT_4 1
+ u8 div_out;
+
+#define MXL_CLOCK_OUT_DISABLE 0
+#define MXL_CLOCK_OUT_ENABLE 1
+ u8 clock_out;
+
+#define MXL5005S_IF_OUTPUT_LOAD_200_OHM 200
+#define MXL5005S_IF_OUTPUT_LOAD_300_OHM 300
+ u32 output_load;
+
+#define MXL5005S_TOP_5P5 55
+#define MXL5005S_TOP_7P2 72
+#define MXL5005S_TOP_9P2 92
+#define MXL5005S_TOP_11P0 110
+#define MXL5005S_TOP_12P9 129
+#define MXL5005S_TOP_14P7 147
+#define MXL5005S_TOP_16P8 168
+#define MXL5005S_TOP_19P4 194
+#define MXL5005S_TOP_21P2 212
+#define MXL5005S_TOP_23P2 232
+#define MXL5005S_TOP_25P2 252
+#define MXL5005S_TOP_27P1 271
+#define MXL5005S_TOP_29P2 292
+#define MXL5005S_TOP_31P7 317
+#define MXL5005S_TOP_34P9 349
+ u32 top;
+
+#define MXL_ANALOG_MODE 0
+#define MXL_DIGITAL_MODE 1
+ u8 mod_mode;
+
+#define MXL_ZERO_IF 0
+#define MXL_LOW_IF 1
+ u8 if_mode;
+
+ /* Stuff I don't know what to do with */
+ u8 AgcMasterByte;
+};
+
+#if defined(CONFIG_MEDIA_TUNER_MXL5005S) || \
+ (defined(CONFIG_MEDIA_TUNER_MXL5005S_MODULE) && defined(MODULE))
+extern struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct mxl5005s_config *config);
+#else
+static inline struct dvb_frontend *mxl5005s_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct mxl5005s_config *config)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_TUNER_MXL5005S */
+
+#endif /* __MXL5005S_H */
+
diff --git a/drivers/media/common/tuners/tda18271-common.c b/drivers/media/common/tuners/tda18271-common.c
index e27a7620a32..42b5f5d4bfe 100644
--- a/drivers/media/common/tuners/tda18271-common.c
+++ b/drivers/media/common/tuners/tda18271-common.c
@@ -227,9 +227,8 @@ int tda18271_charge_pump_source(struct dvb_frontend *fe,
regs[r_cp] &= ~0x20;
regs[r_cp] |= ((force & 1) << 5);
- tda18271_write_regs(fe, r_cp, 1);
- return 0;
+ return tda18271_write_regs(fe, r_cp, 1);
}
int tda18271_init_regs(struct dvb_frontend *fe)
@@ -487,16 +486,15 @@ int tda18271_set_standby_mode(struct dvb_frontend *fe,
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
- tda_dbg("sm = %d, sm_lt = %d, sm_xt = %d\n", sm, sm_lt, sm_xt);
+ if (tda18271_debug & DBG_ADV)
+ tda_dbg("sm = %d, sm_lt = %d, sm_xt = %d\n", sm, sm_lt, sm_xt);
regs[R_EP3] &= ~0xe0; /* clear sm, sm_lt, sm_xt */
regs[R_EP3] |= sm ? (1 << 7) : 0 |
sm_lt ? (1 << 6) : 0 |
sm_xt ? (1 << 5) : 0;
- tda18271_write_regs(fe, R_EP3, 1);
-
- return 0;
+ return tda18271_write_regs(fe, R_EP3, 1);
}
/*---------------------------------------------------------------------*/
@@ -510,7 +508,7 @@ int tda18271_calc_main_pll(struct dvb_frontend *fe, u32 freq)
u32 div;
int ret = tda18271_lookup_pll_map(fe, MAIN_PLL, &freq, &pd, &d);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
regs[R_MPD] = (0x77 & pd);
@@ -542,7 +540,7 @@ int tda18271_calc_cal_pll(struct dvb_frontend *fe, u32 freq)
u32 div;
int ret = tda18271_lookup_pll_map(fe, CAL_PLL, &freq, &pd, &d);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
regs[R_CPD] = pd;
@@ -566,7 +564,7 @@ int tda18271_calc_bp_filter(struct dvb_frontend *fe, u32 *freq)
u8 val;
int ret = tda18271_lookup_map(fe, BP_FILTER, freq, &val);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
regs[R_EP1] &= ~0x07; /* clear bp filter bits */
@@ -583,7 +581,7 @@ int tda18271_calc_km(struct dvb_frontend *fe, u32 *freq)
u8 val;
int ret = tda18271_lookup_map(fe, RF_CAL_KMCO, freq, &val);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
regs[R_EB13] &= ~0x7c; /* clear k & m bits */
@@ -600,7 +598,7 @@ int tda18271_calc_rf_band(struct dvb_frontend *fe, u32 *freq)
u8 val;
int ret = tda18271_lookup_map(fe, RF_BAND, freq, &val);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
regs[R_EP2] &= ~0xe0; /* clear rf band bits */
@@ -617,7 +615,7 @@ int tda18271_calc_gain_taper(struct dvb_frontend *fe, u32 *freq)
u8 val;
int ret = tda18271_lookup_map(fe, GAIN_TAPER, freq, &val);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
regs[R_EP2] &= ~0x1f; /* clear gain taper bits */
@@ -634,7 +632,7 @@ int tda18271_calc_ir_measure(struct dvb_frontend *fe, u32 *freq)
u8 val;
int ret = tda18271_lookup_map(fe, IR_MEASURE, freq, &val);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
regs[R_EP5] &= ~0x07;
diff --git a/drivers/media/common/tuners/tda18271-fe.c b/drivers/media/common/tuners/tda18271-fe.c
index b262100ae89..89c01fb1f85 100644
--- a/drivers/media/common/tuners/tda18271-fe.c
+++ b/drivers/media/common/tuners/tda18271-fe.c
@@ -51,6 +51,7 @@ static int tda18271_channel_configuration(struct dvb_frontend *fe,
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
+ int ret;
u32 N;
/* update TV broadcast parameters */
@@ -85,7 +86,9 @@ static int tda18271_channel_configuration(struct dvb_frontend *fe,
/* update rf top / if top */
regs[R_EB22] = 0x00;
regs[R_EB22] |= map->rfagc_top;
- tda18271_write_regs(fe, R_EB22, 1);
+ ret = tda18271_write_regs(fe, R_EB22, 1);
+ if (tda_fail(ret))
+ goto fail;
/* --------------------------------------------------------------- */
@@ -121,7 +124,9 @@ static int tda18271_channel_configuration(struct dvb_frontend *fe,
/* agc1 has priority on agc2 */
regs[R_EB1] &= ~0x01;
- tda18271_write_regs(fe, R_EB1, 1);
+ ret = tda18271_write_regs(fe, R_EB1, 1);
+ if (tda_fail(ret))
+ goto fail;
/* --------------------------------------------------------------- */
@@ -141,7 +146,9 @@ static int tda18271_channel_configuration(struct dvb_frontend *fe,
break;
}
- tda18271_write_regs(fe, R_TM, 7);
+ ret = tda18271_write_regs(fe, R_TM, 7);
+ if (tda_fail(ret))
+ goto fail;
/* force charge pump source */
charge_pump_source(fe, 1);
@@ -158,9 +165,9 @@ static int tda18271_channel_configuration(struct dvb_frontend *fe,
regs[R_EP3] &= ~0x04;
else
regs[R_EP3] |= 0x04;
- tda18271_write_regs(fe, R_EP3, 1);
-
- return 0;
+ ret = tda18271_write_regs(fe, R_EP3, 1);
+fail:
+ return ret;
}
static int tda18271_read_thermometer(struct dvb_frontend *fe)
@@ -213,11 +220,13 @@ static int tda18271c2_rf_tracking_filters_correction(struct dvb_frontend *fe,
struct tda18271_priv *priv = fe->tuner_priv;
struct tda18271_rf_tracking_filter_cal *map = priv->rf_cal_state;
unsigned char *regs = priv->tda18271_regs;
- int tm_current, rfcal_comp, approx, i;
+ int tm_current, rfcal_comp, approx, i, ret;
u8 dc_over_dt, rf_tab;
/* power up */
- tda18271_set_standby_mode(fe, 0, 0, 0);
+ ret = tda18271_set_standby_mode(fe, 0, 0, 0);
+ if (tda_fail(ret))
+ goto fail;
/* read die current temperature */
tm_current = tda18271_read_thermometer(fe);
@@ -228,8 +237,8 @@ static int tda18271c2_rf_tracking_filters_correction(struct dvb_frontend *fe,
rf_tab = regs[R_EB14];
i = tda18271_lookup_rf_band(fe, &freq, NULL);
- if (i < 0)
- return -EINVAL;
+ if (tda_fail(i))
+ return i;
if ((0 == map[i].rf3) || (freq / 1000 < map[i].rf2)) {
approx = map[i].rf_a1 *
@@ -250,35 +259,42 @@ static int tda18271c2_rf_tracking_filters_correction(struct dvb_frontend *fe,
rfcal_comp = dc_over_dt * (tm_current - priv->tm_rfcal);
regs[R_EB14] = approx + rfcal_comp;
- tda18271_write_regs(fe, R_EB14, 1);
-
- return 0;
+ ret = tda18271_write_regs(fe, R_EB14, 1);
+fail:
+ return ret;
}
static int tda18271_por(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
+ int ret;
/* power up detector 1 */
regs[R_EB12] &= ~0x20;
- tda18271_write_regs(fe, R_EB12, 1);
+ ret = tda18271_write_regs(fe, R_EB12, 1);
+ if (tda_fail(ret))
+ goto fail;
regs[R_EB18] &= ~0x80; /* turn agc1 loop on */
regs[R_EB18] &= ~0x03; /* set agc1_gain to 6 dB */
- tda18271_write_regs(fe, R_EB18, 1);
+ ret = tda18271_write_regs(fe, R_EB18, 1);
+ if (tda_fail(ret))
+ goto fail;
regs[R_EB21] |= 0x03; /* set agc2_gain to -6 dB */
/* POR mode */
- tda18271_set_standby_mode(fe, 1, 0, 0);
+ ret = tda18271_set_standby_mode(fe, 1, 0, 0);
+ if (tda_fail(ret))
+ goto fail;
/* disable 1.5 MHz low pass filter */
regs[R_EB23] &= ~0x04; /* forcelp_fc2_en = 0 */
regs[R_EB23] &= ~0x02; /* XXX: lp_fc[2] = 0 */
- tda18271_write_regs(fe, R_EB21, 3);
-
- return 0;
+ ret = tda18271_write_regs(fe, R_EB21, 3);
+fail:
+ return ret;
}
static int tda18271_calibrate_rf(struct dvb_frontend *fe, u32 freq)
@@ -389,7 +405,7 @@ static int tda18271_powerscan(struct dvb_frontend *fe,
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
- int sgn, bcal, count, wait;
+ int sgn, bcal, count, wait, ret;
u8 cid_target;
u16 count_limit;
u32 freq;
@@ -421,7 +437,9 @@ static int tda18271_powerscan(struct dvb_frontend *fe,
tda18271_write_regs(fe, R_EP2, 1);
/* read power detection info, stored in EB10 */
- tda18271_read_extended(fe);
+ ret = tda18271_read_extended(fe);
+ if (tda_fail(ret))
+ return ret;
/* algorithm initialization */
sgn = 1;
@@ -447,7 +465,9 @@ static int tda18271_powerscan(struct dvb_frontend *fe,
tda18271_write_regs(fe, R_EP2, 1);
/* read power detection info, stored in EB10 */
- tda18271_read_extended(fe);
+ ret = tda18271_read_extended(fe);
+ if (tda_fail(ret))
+ return ret;
count += 200;
@@ -478,6 +498,7 @@ static int tda18271_powerscan_init(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
+ int ret;
/* set standard to digital */
regs[R_EP3] &= ~0x1f; /* clear std bits */
@@ -489,10 +510,14 @@ static int tda18271_powerscan_init(struct dvb_frontend *fe)
/* update IF output level & IF notch frequency */
regs[R_EP4] &= ~0x1c; /* clear if level bits */
- tda18271_write_regs(fe, R_EP3, 2);
+ ret = tda18271_write_regs(fe, R_EP3, 2);
+ if (tda_fail(ret))
+ goto fail;
regs[R_EB18] &= ~0x03; /* set agc1_gain to 6 dB */
- tda18271_write_regs(fe, R_EB18, 1);
+ ret = tda18271_write_regs(fe, R_EB18, 1);
+ if (tda_fail(ret))
+ goto fail;
regs[R_EB21] &= ~0x03; /* set agc2_gain to -15 dB */
@@ -500,9 +525,9 @@ static int tda18271_powerscan_init(struct dvb_frontend *fe)
regs[R_EB23] |= 0x04; /* forcelp_fc2_en = 1 */
regs[R_EB23] |= 0x02; /* lp_fc[2] = 1 */
- tda18271_write_regs(fe, R_EB21, 3);
-
- return 0;
+ ret = tda18271_write_regs(fe, R_EB21, 3);
+fail:
+ return ret;
}
static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq)
@@ -521,7 +546,7 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq)
i = tda18271_lookup_rf_band(fe, &freq, NULL);
- if (i < 0)
+ if (tda_fail(i))
return i;
rf_default[RF1] = 1000 * map[i].rf1_def;
@@ -535,6 +560,8 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq)
/* look for optimized calibration frequency */
bcal = tda18271_powerscan(fe, &rf_default[rf], &rf_freq[rf]);
+ if (tda_fail(bcal))
+ return bcal;
tda18271_calc_rf_cal(fe, &rf_freq[rf]);
prog_tab[rf] = regs[R_EB14];
@@ -575,22 +602,29 @@ static int tda18271_calc_rf_filter_curve(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned int i;
+ int ret;
tda_info("tda18271: performing RF tracking filter calibration\n");
/* wait for die temperature stabilization */
msleep(200);
- tda18271_powerscan_init(fe);
+ ret = tda18271_powerscan_init(fe);
+ if (tda_fail(ret))
+ goto fail;
/* rf band calibration */
- for (i = 0; priv->rf_cal_state[i].rfmax != 0; i++)
+ for (i = 0; priv->rf_cal_state[i].rfmax != 0; i++) {
+ ret =
tda18271_rf_tracking_filters_init(fe, 1000 *
priv->rf_cal_state[i].rfmax);
+ if (tda_fail(ret))
+ goto fail;
+ }
priv->tm_rfcal = tda18271_read_thermometer(fe);
-
- return 0;
+fail:
+ return ret;
}
/* ------------------------------------------------------------------ */
@@ -599,6 +633,7 @@ static int tda18271c2_rf_cal_init(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
+ int ret;
/* test RF_CAL_OK to see if we need init */
if ((regs[R_EP1] & 0x10) == 0)
@@ -607,15 +642,22 @@ static int tda18271c2_rf_cal_init(struct dvb_frontend *fe)
if (priv->cal_initialized)
return 0;
- tda18271_calc_rf_filter_curve(fe);
+ ret = tda18271_calc_rf_filter_curve(fe);
+ if (tda_fail(ret))
+ goto fail;
- tda18271_por(fe);
+ ret = tda18271_por(fe);
+ if (tda_fail(ret))
+ goto fail;
tda_info("tda18271: RF tracking filter calibration complete\n");
priv->cal_initialized = true;
-
- return 0;
+ goto end;
+fail:
+ tda_info("tda18271: RF tracking filter calibration failed!\n");
+end:
+ return ret;
}
static int tda18271c1_rf_tracking_filter_calibration(struct dvb_frontend *fe,
@@ -623,6 +665,7 @@ static int tda18271c1_rf_tracking_filter_calibration(struct dvb_frontend *fe,
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
+ int ret;
u32 N = 0;
/* calculate bp filter */
@@ -671,7 +714,10 @@ static int tda18271c1_rf_tracking_filter_calibration(struct dvb_frontend *fe,
tda18271_calc_main_pll(fe, N);
- tda18271_write_regs(fe, R_EP3, 11);
+ ret = tda18271_write_regs(fe, R_EP3, 11);
+ if (tda_fail(ret))
+ return ret;
+
msleep(5); /* RF tracking filter calibration initialization */
/* search for K,M,CO for RF calibration */
@@ -719,45 +765,56 @@ static int tda18271_ir_cal_init(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
unsigned char *regs = priv->tda18271_regs;
+ int ret;
- tda18271_read_regs(fe);
+ ret = tda18271_read_regs(fe);
+ if (tda_fail(ret))
+ goto fail;
/* test IR_CAL_OK to see if we need init */
if ((regs[R_EP1] & 0x08) == 0)
- tda18271_init_regs(fe);
-
- return 0;
+ ret = tda18271_init_regs(fe);
+fail:
+ return ret;
}
static int tda18271_init(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
+ int ret;
mutex_lock(&priv->lock);
/* power up */
- tda18271_set_standby_mode(fe, 0, 0, 0);
+ ret = tda18271_set_standby_mode(fe, 0, 0, 0);
+ if (tda_fail(ret))
+ goto fail;
/* initialization */
- tda18271_ir_cal_init(fe);
+ ret = tda18271_ir_cal_init(fe);
+ if (tda_fail(ret))
+ goto fail;
if (priv->id == TDA18271HDC2)
tda18271c2_rf_cal_init(fe);
-
+fail:
mutex_unlock(&priv->lock);
- return 0;
+ return ret;
}
static int tda18271_tune(struct dvb_frontend *fe,
struct tda18271_std_map_item *map, u32 freq, u32 bw)
{
struct tda18271_priv *priv = fe->tuner_priv;
+ int ret;
tda_dbg("freq = %d, ifc = %d, bw = %d, agc_mode = %d, std = %d\n",
freq, map->if_freq, bw, map->agc_mode, map->std);
- tda18271_init(fe);
+ ret = tda18271_init(fe);
+ if (tda_fail(ret))
+ goto fail;
mutex_lock(&priv->lock);
@@ -769,11 +826,11 @@ static int tda18271_tune(struct dvb_frontend *fe,
tda18271c2_rf_tracking_filters_correction(fe, freq);
break;
}
- tda18271_channel_configuration(fe, map, freq, bw);
+ ret = tda18271_channel_configuration(fe, map, freq, bw);
mutex_unlock(&priv->lock);
-
- return 0;
+fail:
+ return ret;
}
/* ------------------------------------------------------------------ */
@@ -837,7 +894,7 @@ static int tda18271_set_params(struct dvb_frontend *fe,
ret = tda18271_tune(fe, map, freq, bw);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
priv->frequency = freq;
@@ -893,7 +950,7 @@ static int tda18271_set_analog_params(struct dvb_frontend *fe,
ret = tda18271_tune(fe, map, freq, 0);
- if (ret < 0)
+ if (tda_fail(ret))
goto fail;
priv->frequency = freq;
@@ -905,16 +962,17 @@ fail:
static int tda18271_sleep(struct dvb_frontend *fe)
{
struct tda18271_priv *priv = fe->tuner_priv;
+ int ret;
mutex_lock(&priv->lock);
/* standby mode w/ slave tuner output
* & loop thru & xtal oscillator on */
- tda18271_set_standby_mode(fe, 1, 0, 0);
+ ret = tda18271_set_standby_mode(fe, 1, 0, 0);
mutex_unlock(&priv->lock);
- return 0;
+ return ret;
}
static int tda18271_release(struct dvb_frontend *fe)
@@ -1095,10 +1153,10 @@ struct dvb_frontend *tda18271_attach(struct dvb_frontend *fe, u8 addr,
if (cfg)
priv->small_i2c = cfg->small_i2c;
- if (tda18271_get_id(fe) < 0)
+ if (tda_fail(tda18271_get_id(fe)))
goto fail;
- if (tda18271_assign_map_layout(fe) < 0)
+ if (tda_fail(tda18271_assign_map_layout(fe)))
goto fail;
mutex_lock(&priv->lock);
diff --git a/drivers/media/common/tuners/tda18271-priv.h b/drivers/media/common/tuners/tda18271-priv.h
index 2bc5eb368ea..81a739365f8 100644
--- a/drivers/media/common/tuners/tda18271-priv.h
+++ b/drivers/media/common/tuners/tda18271-priv.h
@@ -153,6 +153,15 @@ extern int tda18271_debug;
#define tda_reg(fmt, arg...) dprintk(KERN_DEBUG, DBG_REG, fmt, ##arg)
#define tda_cal(fmt, arg...) dprintk(KERN_DEBUG, DBG_CAL, fmt, ##arg)
+#define tda_fail(ret) \
+({ \
+ int __ret; \
+ __ret = (ret < 0); \
+ if (__ret) \
+ tda_printk(KERN_ERR, "error %d on line %d\n", ret, __LINE__);\
+ __ret; \
+})
+
/*---------------------------------------------------------------------*/
enum tda18271_map_type {
diff --git a/drivers/media/common/tuners/tea5767.c b/drivers/media/common/tuners/tea5767.c
index f6e7d7ad842..1f5646334a8 100644
--- a/drivers/media/common/tuners/tea5767.c
+++ b/drivers/media/common/tuners/tea5767.c
@@ -373,14 +373,14 @@ int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
if ((rc = tuner_i2c_xfer_recv(&i2c, buffer, 7))< 5) {
printk(KERN_WARNING "It is not a TEA5767. Received %i bytes.\n", rc);
- return EINVAL;
+ return -EINVAL;
}
/* If all bytes are the same then it's a TV tuner and not a tea5767 */
if (buffer[0] == buffer[1] && buffer[0] == buffer[2] &&
buffer[0] == buffer[3] && buffer[0] == buffer[4]) {
printk(KERN_WARNING "All bytes are equal. It is not a TEA5767\n");
- return EINVAL;
+ return -EINVAL;
}
/* Status bytes:
@@ -390,7 +390,7 @@ int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
*/
if (((buffer[3] & 0x0f) != 0x00) || (buffer[4] != 0x00)) {
printk(KERN_WARNING "Chip ID is not zero. It is not a TEA5767\n");
- return EINVAL;
+ return -EINVAL;
}
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index 43d35bdb221..ceae6db901e 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -212,7 +212,7 @@ static void xc5000_TunerReset(struct dvb_frontend *fe)
dprintk(1, "%s()\n", __func__);
if (priv->cfg->tuner_callback) {
- ret = priv->cfg->tuner_callback(priv->cfg->priv,
+ ret = priv->cfg->tuner_callback(priv->devptr,
XC5000_TUNER_RESET, 0);
if (ret)
printk(KERN_ERR "xc5000: reset failed\n");
@@ -900,9 +900,9 @@ static const struct dvb_tuner_ops xc5000_tuner_ops = {
.get_status = xc5000_get_status
};
-struct dvb_frontend * xc5000_attach(struct dvb_frontend *fe,
- struct i2c_adapter *i2c,
- struct xc5000_config *cfg)
+struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct xc5000_config *cfg, void *devptr)
{
struct xc5000_priv *priv = NULL;
u16 id = 0;
@@ -916,6 +916,7 @@ struct dvb_frontend * xc5000_attach(struct dvb_frontend *fe,
priv->cfg = cfg;
priv->bandwidth = BANDWIDTH_6_MHZ;
priv->i2c = i2c;
+ priv->devptr = devptr;
/* Check if firmware has been loaded. It is possible that another
instance of the driver has loaded the firmware.
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index 0ee80f9d19b..c910715addc 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -31,29 +31,31 @@ struct xc5000_config {
u8 i2c_address;
u32 if_khz;
- /* For each bridge framework, when it attaches either analog or digital,
- * it has to store a reference back to its _core equivalent structure,
- * so that it can service the hardware by steering gpio's etc.
- * Each bridge implementation is different so cast priv accordingly.
- * The xc5000 driver cares not for this value, other than ensuring
- * it's passed back to a bridge during tuner_callback().
- */
- void *priv;
int (*tuner_callback) (void *priv, int command, int arg);
};
/* xc5000 callback command */
#define XC5000_TUNER_RESET 0
+/* For each bridge framework, when it attaches either analog or digital,
+ * it has to store a reference back to its _core equivalent structure,
+ * so that it can service the hardware by steering gpio's etc.
+ * Each bridge implementation is different so cast devptr accordingly.
+ * The xc5000 driver cares not for this value, other than ensuring
+ * it's passed back to a bridge during tuner_callback().
+ */
+
#if defined(CONFIG_MEDIA_TUNER_XC5000) || \
(defined(CONFIG_MEDIA_TUNER_XC5000_MODULE) && defined(MODULE))
extern struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- struct xc5000_config *cfg);
+ struct xc5000_config *cfg,
+ void *devptr);
#else
static inline struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c,
- struct xc5000_config *cfg)
+ struct xc5000_config *cfg,
+ void *devptr)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/media/common/tuners/xc5000_priv.h b/drivers/media/common/tuners/xc5000_priv.h
index 13b2d19341d..ecebfe4745a 100644
--- a/drivers/media/common/tuners/xc5000_priv.h
+++ b/drivers/media/common/tuners/xc5000_priv.h
@@ -31,6 +31,8 @@ struct xc5000_priv {
u8 video_standard;
u8 rf_mode;
u8 fwloaded;
+
+ void *devptr;
};
#endif
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index 7b0ea3bdfaf..f9d087669d5 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -634,7 +634,7 @@ int flexcop_frontend_init(struct flexcop_device *fc)
}
/* try the sky v2.3 (vp310/Samsung tbdu18132(tsa5059)) */
- fc->fe = dvb_attach(vp310_mt312_attach,
+ fc->fe = dvb_attach(mt312_attach,
&skystar23_samsung_tbdu18132_config, i2c);
if (fc->fe != NULL) {
ops = &fc->fe->ops;
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index d1239b8342f..7588db1319d 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -1,6 +1,7 @@
config DVB_BT8XX
tristate "BT8xx based PCI cards"
depends on DVB_CORE && PCI && I2C && VIDEO_BT848
+ depends on HOTPLUG # due to FW_LOADER
select DVB_MT352 if !DVB_FE_CUSTOMISE
select DVB_SP887X if !DVB_FE_CUSTOMISE
select DVB_NXT6000 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/cinergyT2/Kconfig b/drivers/media/dvb/cinergyT2/Kconfig
index 3d778c5aba6..c03513b2cca 100644
--- a/drivers/media/dvb/cinergyT2/Kconfig
+++ b/drivers/media/dvb/cinergyT2/Kconfig
@@ -1,6 +1,6 @@
config DVB_CINERGYT2
tristate "Terratec CinergyT2/qanu USB2 DVB-T receiver"
- depends on DVB_CORE && USB
+ depends on DVB_CORE && USB && INPUT
help
Support for "TerraTec CinergyT2" USB2.0 Highspeed DVB Receivers
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 8cbdb0ec67e..588fbe105c2 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -910,15 +910,21 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
int curdelay = 100000000;
int slot;
+ /* Beware of too high polling frequency, because one polling
+ * call might take several hundred milliseconds until timeout!
+ */
for (slot = 0; slot < ca->slot_count; slot++) {
switch (ca->slot_info[slot].slot_state) {
default:
case DVB_CA_SLOTSTATE_NONE:
+ delay = HZ * 60; /* 60s */
+ if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE))
+ delay = HZ * 5; /* 5s */
+ break;
case DVB_CA_SLOTSTATE_INVALID:
- delay = HZ * 60;
- if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) {
- delay = HZ / 10;
- }
+ delay = HZ * 60; /* 60s */
+ if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE))
+ delay = HZ / 10; /* 100ms */
break;
case DVB_CA_SLOTSTATE_UNINITIALISED:
@@ -926,19 +932,17 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
case DVB_CA_SLOTSTATE_VALIDATE:
case DVB_CA_SLOTSTATE_WAITFR:
case DVB_CA_SLOTSTATE_LINKINIT:
- delay = HZ / 10;
+ delay = HZ / 10; /* 100ms */
break;
case DVB_CA_SLOTSTATE_RUNNING:
- delay = HZ * 60;
- if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) {
- delay = HZ / 10;
- }
+ delay = HZ * 60; /* 60s */
+ if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE))
+ delay = HZ / 10; /* 100ms */
if (ca->open) {
if ((!ca->slot_info[slot].da_irq_supported) ||
- (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_DA))) {
- delay = HZ / 10;
- }
+ (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_DA)))
+ delay = HZ / 10; /* 100ms */
}
break;
}
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 4c1cff9feb2..cf4584e48b6 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -1,6 +1,7 @@
config DVB_USB
tristate "Support for various USB DVB devices"
depends on DVB_CORE && USB && I2C
+ depends on HOTPLUG # due to FW_LOADER
select FW_LOADER
help
By enabling this you will be able to choose the various supported
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 6d238460592..c20553c4da1 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -30,7 +30,7 @@ config DVB_CX24123
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_MT312
- tristate "Zarlink VP310/MT312 based"
+ tristate "Zarlink VP310/MT312/ZL10313 based"
depends on DVB_CORE && I2C
default m if DVB_FE_CUSTOMISE
help
@@ -97,7 +97,7 @@ comment "DVB-T (terrestrial) frontends"
config DVB_SP8870
tristate "Spase sp8870 based"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && HOTPLUG
default m if DVB_FE_CUSTOMISE
select FW_LOADER
help
@@ -110,7 +110,7 @@ config DVB_SP8870
config DVB_SP887X
tristate "Spase sp887x based"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && HOTPLUG
default m if DVB_FE_CUSTOMISE
select FW_LOADER
help
@@ -144,7 +144,7 @@ config DVB_L64781
config DVB_TDA1004X
tristate "Philips TDA10045H/TDA10046H based"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && HOTPLUG
default m if DVB_FE_CUSTOMISE
select FW_LOADER
help
@@ -211,7 +211,7 @@ config DVB_DIB7000P
config DVB_TDA10048
tristate "Philips TDA10048HN based"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && HOTPLUG
default m if DVB_FE_CUSTOMISE
select FW_LOADER
help
@@ -253,7 +253,7 @@ comment "ATSC (North American/Korean Terrestrial/Cable DTV) frontends"
config DVB_NXT200X
tristate "NxtWave Communications NXT2002/NXT2004 based"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && HOTPLUG
default m if DVB_FE_CUSTOMISE
select FW_LOADER
help
@@ -268,7 +268,7 @@ config DVB_NXT200X
config DVB_OR51211
tristate "Oren OR51211 based"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && HOTPLUG
default m if DVB_FE_CUSTOMISE
select FW_LOADER
help
@@ -281,7 +281,7 @@ config DVB_OR51211
config DVB_OR51132
tristate "Oren OR51132 based"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && HOTPLUG
default m if DVB_FE_CUSTOMISE
select FW_LOADER
help
@@ -297,7 +297,7 @@ config DVB_OR51132
config DVB_BCM3510
tristate "Broadcom BCM3510"
- depends on DVB_CORE && I2C
+ depends on DVB_CORE && I2C && HOTPLUG
default m if DVB_FE_CUSTOMISE
select FW_LOADER
help
diff --git a/drivers/media/dvb/frontends/itd1000.c b/drivers/media/dvb/frontends/itd1000.c
index 04c562ccf99..600dad6b41e 100644
--- a/drivers/media/dvb/frontends/itd1000.c
+++ b/drivers/media/dvb/frontends/itd1000.c
@@ -195,7 +195,7 @@ static void itd1000_set_vco(struct itd1000_state *state, u32 freq_khz)
}
}
-struct {
+static const struct {
u32 freq;
u8 values[10]; /* RFTR, RFST1 - RFST9 */
} itd1000_fre_values[] = {
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c
index 081ca3398c7..5ac9b15920f 100644
--- a/drivers/media/dvb/frontends/mt312.c
+++ b/drivers/media/dvb/frontends/mt312.c
@@ -737,7 +737,7 @@ static void mt312_release(struct dvb_frontend *fe)
}
#define MT312_SYS_CLK 90000000UL /* 90 MHz */
-static struct dvb_frontend_ops vp310_mt312_ops = {
+static struct dvb_frontend_ops mt312_ops = {
.info = {
.name = "Zarlink ???? DVB-S",
@@ -776,7 +776,7 @@ static struct dvb_frontend_ops vp310_mt312_ops = {
.set_voltage = mt312_set_voltage,
};
-struct dvb_frontend *vp310_mt312_attach(const struct mt312_config *config,
+struct dvb_frontend *mt312_attach(const struct mt312_config *config,
struct i2c_adapter *i2c)
{
struct mt312_state *state = NULL;
@@ -795,7 +795,7 @@ struct dvb_frontend *vp310_mt312_attach(const struct mt312_config *config,
goto error;
/* create dvb_frontend */
- memcpy(&state->frontend.ops, &vp310_mt312_ops,
+ memcpy(&state->frontend.ops, &mt312_ops,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
@@ -827,12 +827,13 @@ error:
kfree(state);
return NULL;
}
-EXPORT_SYMBOL(vp310_mt312_attach);
+EXPORT_SYMBOL(mt312_attach);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Zarlink VP310/MT312/ZL10313 DVB-S Demodulator driver");
MODULE_AUTHOR("Andreas Oberritter <obi@linuxtv.org>");
+MODULE_AUTHOR("Matthias Schwarzott <zzam@gentoo.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/mt312.h b/drivers/media/dvb/frontends/mt312.h
index de796eab391..29e3bb5496b 100644
--- a/drivers/media/dvb/frontends/mt312.h
+++ b/drivers/media/dvb/frontends/mt312.h
@@ -37,10 +37,10 @@ struct mt312_config {
};
#if defined(CONFIG_DVB_MT312) || (defined(CONFIG_DVB_MT312_MODULE) && defined(MODULE))
-struct dvb_frontend *vp310_mt312_attach(const struct mt312_config *config,
+struct dvb_frontend *mt312_attach(const struct mt312_config *config,
struct i2c_adapter *i2c);
#else
-static inline struct dvb_frontend *vp310_mt312_attach(
+static inline struct dvb_frontend *mt312_attach(
const struct mt312_config *config, struct i2c_adapter *i2c)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index ae882432dd3..d4339b1b3b6 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -5,6 +5,7 @@ config TTPCI_EEPROM
config DVB_AV7110
tristate "AV7110 cards"
depends on DVB_CORE && PCI && I2C
+ depends on HOTPLUG
select FW_LOADER if !DVB_AV7110_FIRMWARE
select TTPCI_EEPROM
select VIDEO_SAA7146_VV
@@ -123,6 +124,7 @@ config DVB_BUDGET_AV
depends on DVB_BUDGET_CORE && I2C
select VIDEO_SAA7146_VV
depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV
+ depends on HOTPLUG # dependency of FW_LOADER
select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_STV0299 if !DVB_FE_CUSTOMISE
select DVB_TDA1004X if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/ttusb-dec/Kconfig b/drivers/media/dvb/ttusb-dec/Kconfig
index 83611012ef3..0712899e39a 100644
--- a/drivers/media/dvb/ttusb-dec/Kconfig
+++ b/drivers/media/dvb/ttusb-dec/Kconfig
@@ -1,6 +1,7 @@
config DVB_TTUSB_DEC
tristate "Technotrend/Hauppauge USB DEC devices"
depends on DVB_CORE && USB
+ depends on HOTPLUG # due to FW_LOADER
select FW_LOADER
select CRC32
help
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index fe743aa7f64..89d8d37838a 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -44,6 +44,10 @@ config VIDEO_TVEEPROM
tristate
depends on I2C
+config VIDEO_TUNER
+ tristate
+ depends on MEDIA_TUNER
+
#
# Multimedia Video device configuration
#
@@ -690,7 +694,7 @@ config VIDEO_MXB
tristate "Siemens-Nixdorf 'Multimedia eXtension Board'"
depends on PCI && VIDEO_V4L1 && I2C
select VIDEO_SAA7146_VV
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_SAA7111 if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_TDA9840 if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_TEA6415C if VIDEO_HELPER_CHIPS_AUTO
@@ -906,7 +910,7 @@ config SOC_CAMERA
config SOC_CAMERA_MT9M001
tristate "mt9m001 support"
- depends on SOC_CAMERA
+ depends on SOC_CAMERA && I2C
select GPIO_PCA953X if MT9M001_PCA9536_SWITCH
help
This driver supports MT9M001 cameras from Micron, monochrome
@@ -921,7 +925,7 @@ config MT9M001_PCA9536_SWITCH
config SOC_CAMERA_MT9V022
tristate "mt9v022 support"
- depends on SOC_CAMERA
+ depends on SOC_CAMERA && I2C
select GPIO_PCA953X if MT9V022_PCA9536_SWITCH
help
This driver supports MT9V022 cameras from Micron
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index a352c6e31f0..dff0d6abe91 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -84,7 +84,7 @@ obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o
obj-$(CONFIG_VIDEO_DPC) += dpc7146.o
obj-$(CONFIG_TUNER_3036) += tuner-3036.o
-obj-$(CONFIG_MEDIA_TUNER) += tuner.o
+obj-$(CONFIG_VIDEO_TUNER) += tuner.o
obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index cab277fafa6..def10d08637 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -1,8 +1,9 @@
config VIDEO_AU0828
tristate "Auvitek AU0828 support"
- depends on VIDEO_DEV && I2C && INPUT && DVB_CORE
+ depends on VIDEO_DEV && I2C && INPUT && DVB_CORE && USB
select I2C_ALGOBIT
+ select VIDEO_TVEEPROM
select DVB_AU8522 if !DVB_FE_CUSTOMIZE
select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
---help---
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 1371b4e4b5f..c86a5f17eca 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -337,12 +337,10 @@ int au0828_dvb_register(struct au0828_dev *dev)
dvb->frontend = dvb_attach(au8522_attach,
&hauppauge_hvr950q_config,
&dev->i2c_adap);
- if (dvb->frontend != NULL) {
- hauppauge_hvr950q_tunerconfig.priv = dev;
+ if (dvb->frontend != NULL)
dvb_attach(xc5000_attach, dvb->frontend,
&dev->i2c_adap,
- &hauppauge_hvr950q_tunerconfig);
- }
+ &hauppauge_hvr950q_tunerconfig, dev);
break;
default:
printk(KERN_WARNING "The frontend of your DVB/ATSC card "
diff --git a/drivers/media/video/bt8xx/Kconfig b/drivers/media/video/bt8xx/Kconfig
index 7431ef6de9f..24a34fc1f2b 100644
--- a/drivers/media/video/bt8xx/Kconfig
+++ b/drivers/media/video/bt8xx/Kconfig
@@ -1,12 +1,13 @@
config VIDEO_BT848
tristate "BT848 Video For Linux"
depends on VIDEO_DEV && PCI && I2C && VIDEO_V4L2 && INPUT
+ depends on HOTPLUG # due to FW_LOADER
select I2C_ALGOBIT
select FW_LOADER
select VIDEO_BTCX
select VIDEOBUF_DMA_SG
select VIDEO_IR
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_TVAUDIO if VIDEO_HELPER_CHIPS_AUTO
diff --git a/drivers/media/video/cx18/Kconfig b/drivers/media/video/cx18/Kconfig
index acc4b47f1d1..5f942690570 100644
--- a/drivers/media/video/cx18/Kconfig
+++ b/drivers/media/video/cx18/Kconfig
@@ -1,14 +1,17 @@
config VIDEO_CX18
tristate "Conexant cx23418 MPEG encoder support"
depends on VIDEO_V4L2 && DVB_CORE && PCI && I2C && EXPERIMENTAL
+ depends on INPUT # due to VIDEO_IR
+ depends on HOTPLUG # due to FW_LOADER
select I2C_ALGOBIT
select FW_LOADER
select VIDEO_IR
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_CX2341X
select VIDEO_CS5345
select DVB_S5H1409
+ select MEDIA_TUNER_MXL5005S
---help---
This is a video4linux driver for Conexant cx23418 based
PCI combo video recorder devices.
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index f5e3ba1f535..553adbf2cd4 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -47,11 +47,12 @@ static struct cx18_card_tuner_i2c cx18_i2c_std = {
static const struct cx18_card cx18_card_hvr1600_esmt = {
.type = CX18_CARD_HVR_1600_ESMT,
.name = "Hauppauge HVR-1600",
- .comment = "DVB & VBI are not yet supported\n",
+ .comment = "VBI is not yet supported\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_CX23418,
.hw_muxer = CX18_HW_CS5345,
- .hw_all = CX18_HW_TVEEPROM | CX18_HW_TUNER | CX18_HW_CS5345,
+ .hw_all = CX18_HW_TVEEPROM | CX18_HW_TUNER |
+ CX18_HW_CS5345 | CX18_HW_DVB,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX23418_COMPOSITE7 },
{ CX18_CARD_INPUT_SVIDEO1, 1, CX23418_SVIDEO1 },
@@ -86,11 +87,12 @@ static const struct cx18_card cx18_card_hvr1600_esmt = {
static const struct cx18_card cx18_card_hvr1600_samsung = {
.type = CX18_CARD_HVR_1600_SAMSUNG,
.name = "Hauppauge HVR-1600 (Preproduction)",
- .comment = "DVB & VBI are not yet supported\n",
+ .comment = "VBI is not yet supported\n",
.v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_CX23418,
.hw_muxer = CX18_HW_CS5345,
- .hw_all = CX18_HW_TVEEPROM | CX18_HW_TUNER | CX18_HW_CS5345,
+ .hw_all = CX18_HW_TVEEPROM | CX18_HW_TUNER |
+ CX18_HW_CS5345 | CX18_HW_DVB,
.video_inputs = {
{ CX18_CARD_INPUT_VID_TUNER, 0, CX23418_COMPOSITE7 },
{ CX18_CARD_INPUT_SVIDEO1, 1, CX23418_SVIDEO1 },
@@ -134,14 +136,15 @@ static const struct cx18_card_pci_info cx18_pci_h900[] = {
static const struct cx18_card cx18_card_h900 = {
.type = CX18_CARD_COMPRO_H900,
.name = "Compro VideoMate H900",
- .comment = "Not yet supported!\n",
- .v4l2_capabilities = 0,
+ .comment = "DVB & VBI are not yet supported\n",
+ .v4l2_capabilities = CX18_CAP_ENCODER,
.hw_audio_ctrl = CX18_HW_CX23418,
.hw_all = CX18_HW_TUNER,
.video_inputs = {
- { CX18_CARD_INPUT_VID_TUNER, 0, CX23418_COMPOSITE7 },
- { CX18_CARD_INPUT_SVIDEO1, 1, CX23418_SVIDEO1 },
- { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
+ { CX18_CARD_INPUT_VID_TUNER, 0, CX23418_COMPOSITE2 },
+ { CX18_CARD_INPUT_SVIDEO1, 1,
+ CX23418_SVIDEO_LUMA3 | CX23418_SVIDEO_CHROMA4 },
+ { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE1 },
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER,
@@ -163,6 +166,7 @@ static const struct cx18_card cx18_card_h900 = {
.tune_lane = 0,
.initial_emrs = 0,
},
+ .xceive_pin = 15,
.pci_list = cx18_pci_h900,
.i2c = &cx18_i2c_std,
};
@@ -200,8 +204,6 @@ static const struct cx18_card cx18_card_mpc718 = {
/* XC3028 tuner */
{ .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
},
- /* tuner reset */
- .gpio_init = { .direction = 0x1000, .initial_value = 0x1000 },
.ddr = {
/* Probably Samsung K4D263238G-VC33 memory */
.chip_config = 0x003,
@@ -211,6 +213,7 @@ static const struct cx18_card cx18_card_mpc718 = {
.tune_lane = 0,
.initial_emrs = 2,
},
+ .xceive_pin = 15,
.pci_list = cx18_pci_mpc718,
.i2c = &cx18_i2c_std,
};
diff --git a/drivers/media/video/cx18/cx18-cards.h b/drivers/media/video/cx18/cx18-cards.h
index bca249bdd33..bccb67f0db1 100644
--- a/drivers/media/video/cx18/cx18-cards.h
+++ b/drivers/media/video/cx18/cx18-cards.h
@@ -114,8 +114,8 @@ struct cx18_card_pci_info {
/* The mask is the set of bits used by the operation */
struct cx18_gpio_init { /* set initial GPIO DIR and OUT values */
- u16 direction; /* DIR setting. Leave to 0 if no init is needed */
- u16 initial_value;
+ u32 direction; /* DIR setting. Leave to 0 if no init is needed */
+ u32 initial_value;
};
struct cx18_card_tuner {
@@ -153,6 +153,7 @@ struct cx18_card {
struct cx18_card_audio_input radio_input;
/* GPIO card-specific settings */
+ u8 xceive_pin; /* XCeive tuner GPIO reset pin */
struct cx18_gpio_init gpio_init;
struct cx18_card_tuner tuners[CX18_CARD_MAX_TUNERS];
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 8f5ed9b4bf8..0dd4e052997 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -164,16 +164,6 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(CX18_VERSION);
-int cx18_waitq(wait_queue_head_t *waitq)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE);
- schedule();
- finish_wait(waitq, &wait);
- return signal_pending(current) ? -EINTR : 0;
-}
-
/* Generic utility functions */
int cx18_msleep_timeout(unsigned int msecs, int intr)
{
@@ -220,13 +210,13 @@ static void cx18_process_eeprom(struct cx18 *cx)
/* Many thanks to Steven Toth from Hauppauge for providing the
model numbers */
+ /* Note: the Samsung memory models cannot be reliably determined
+ from the model number. Use the cardtype module option if you
+ have one of these preproduction models. */
switch (tv.model) {
- case 74000 ... 74099:
+ case 74000 ... 74999:
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
break;
- case 74700 ... 74799:
- cx->card = cx18_get_card(CX18_CARD_HVR_1600_SAMSUNG);
- break;
case 0:
CX18_ERR("Invalid EEPROM\n");
return;
@@ -548,6 +538,7 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *dev,
return 0;
}
+#ifdef MODULE
static u32 cx18_request_module(struct cx18 *cx, u32 hw,
const char *name, u32 id)
{
@@ -560,12 +551,14 @@ static u32 cx18_request_module(struct cx18 *cx, u32 hw,
CX18_DEBUG_INFO("Loaded module %s\n", name);
return hw;
}
+#endif
static void cx18_load_and_init_modules(struct cx18 *cx)
{
u32 hw = cx->card->hw_all;
int i;
+#ifdef MODULE
/* load modules */
#ifndef CONFIG_MEDIA_TUNER
hw = cx18_request_module(cx, hw, "tuner", CX18_HW_TUNER);
@@ -573,6 +566,7 @@ static void cx18_load_and_init_modules(struct cx18 *cx)
#ifndef CONFIG_VIDEO_CS5345
hw = cx18_request_module(cx, hw, "cs5345", CX18_HW_CS5345);
#endif
+#endif
/* check which i2c devices are actually found */
for (i = 0; i < 32; i++) {
@@ -613,7 +607,7 @@ static int __devinit cx18_probe(struct pci_dev *dev,
}
cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
- if (cx == 0) {
+ if (!cx) {
spin_unlock(&cx18_cards_lock);
return -ENOMEM;
}
@@ -801,7 +795,7 @@ static int __devinit cx18_probe(struct pci_dev *dev,
return 0;
free_streams:
- cx18_streams_cleanup(cx);
+ cx18_streams_cleanup(cx, 1);
free_irq:
free_irq(cx->dev->irq, (void *)cx);
free_i2c:
@@ -904,14 +898,13 @@ static void cx18_remove(struct pci_dev *pci_dev)
cx18_halt_firmware(cx);
- cx18_streams_cleanup(cx);
+ cx18_streams_cleanup(cx, 1);
exit_cx18_i2c(cx);
free_irq(cx->dev->irq, (void *)cx);
- if (cx->dev)
- cx18_iounmap(cx);
+ cx18_iounmap(cx);
release_mem_region(cx->base_addr, CX18_MEM_SIZE);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 2ee939193bb..a2a6c58d12f 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -444,9 +444,6 @@ extern spinlock_t cx18_cards_lock;
/* Return non-zero if a signal is pending */
int cx18_msleep_timeout(unsigned int msecs, int intr);
-/* Wait on queue, returns -EINTR if interrupted */
-int cx18_waitq(wait_queue_head_t *waitq);
-
/* Read Hauppauge eeprom */
struct tveeprom; /* forward reference */
void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv);
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index 65efe69d939..c9744173f96 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -24,25 +24,27 @@
#include "cx18-streams.h"
#include "cx18-cards.h"
#include "s5h1409.h"
-
-/* Wait until the MXL500X driver is merged */
-#ifdef HAVE_MXL500X
-#include "mxl500x.h"
-#endif
+#include "mxl5005s.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define CX18_REG_DMUX_NUM_PORT_0_CONTROL 0xd5a000
-#ifdef HAVE_MXL500X
-static struct mxl500x_config hauppauge_hvr1600_tuner = {
- .delsys = MXL500x_MODE_ATSC,
- .octf = MXL500x_OCTF_CH,
- .xtal_freq = 16000000,
- .iflo_freq = 5380000,
- .ref_freq = 322800000,
- .rssi_ena = MXL_RSSI_ENABLE,
- .addr = 0xC6 >> 1,
+static struct mxl5005s_config hauppauge_hvr1600_tuner = {
+ .i2c_address = 0xC6 >> 1,
+ .if_freq = IF_FREQ_5380000HZ,
+ .xtal_freq = CRYSTAL_FREQ_16000000HZ,
+ .agc_mode = MXL_SINGLE_AGC,
+ .tracking_filter = MXL_TF_C_H,
+ .rssi_enable = MXL_RSSI_ENABLE,
+ .cap_select = MXL_CAP_SEL_ENABLE,
+ .div_out = MXL_DIV_OUT_4,
+ .clock_out = MXL_CLOCK_OUT_DISABLE,
+ .output_load = MXL5005S_IF_OUTPUT_LOAD_200_OHM,
+ .top = MXL5005S_TOP_25P2,
+ .mod_mode = MXL_DIGITAL_MODE,
+ .if_mode = MXL_ZERO_IF,
+ .AgcMasterByte = 0x00,
};
static struct s5h1409_config hauppauge_hvr1600_config = {
@@ -55,7 +57,6 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
.mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
};
-#endif
static int dvb_register(struct cx18_stream *stream);
@@ -252,21 +253,18 @@ static int dvb_register(struct cx18_stream *stream)
int ret = 0;
switch (cx->card->type) {
-/* Wait until the MXL500X driver is merged */
-#ifdef HAVE_MXL500X
case CX18_CARD_HVR_1600_ESMT:
case CX18_CARD_HVR_1600_SAMSUNG:
dvb->fe = dvb_attach(s5h1409_attach,
&hauppauge_hvr1600_config,
&cx->i2c_adap[0]);
if (dvb->fe != NULL) {
- dvb_attach(mxl500x_attach, dvb->fe,
- &hauppauge_hvr1600_tuner,
- &cx->i2c_adap[0]);
+ dvb_attach(mxl5005s_attach, dvb->fe,
+ &cx->i2c_adap[0],
+ &hauppauge_hvr1600_tuner);
ret = 0;
}
break;
-#endif
default:
/* No Digital Tv Support */
break;
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 69303065a29..0b3141db174 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -39,7 +39,7 @@
associated VBI streams are also automatically claimed.
Possible error returns: -EBUSY if someone else has claimed
the stream or 0 on success. */
-int cx18_claim_stream(struct cx18_open_id *id, int type)
+static int cx18_claim_stream(struct cx18_open_id *id, int type)
{
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[type];
@@ -87,7 +87,7 @@ int cx18_claim_stream(struct cx18_open_id *id, int type)
/* This function releases a previously claimed stream. It will take into
account associated VBI streams. */
-void cx18_release_stream(struct cx18_stream *s)
+static void cx18_release_stream(struct cx18_stream *s)
{
struct cx18 *cx = s->cx;
struct cx18_stream *s_vbi;
@@ -662,6 +662,8 @@ int cx18_v4l2_open(struct inode *inode, struct file *filp)
for (x = 0; cx == NULL && x < cx18_cards_active; x++) {
/* find out which stream this open was on */
for (y = 0; y < CX18_MAX_STREAMS; y++) {
+ if (cx18_cards[x] == NULL)
+ continue;
s = &cx18_cards[x]->streams[y];
if (s->v4l2dev && s->v4l2dev->minor == minor) {
cx = cx18_cards[x];
diff --git a/drivers/media/video/cx18/cx18-fileops.h b/drivers/media/video/cx18/cx18-fileops.h
index 16cdafbd24c..46da0282fc7 100644
--- a/drivers/media/video/cx18/cx18-fileops.h
+++ b/drivers/media/video/cx18/cx18-fileops.h
@@ -34,12 +34,3 @@ void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
void cx18_mute(struct cx18 *cx);
void cx18_unmute(struct cx18 *cx);
-/* Utilities */
-
-/* Try to claim a stream for the filehandle. Return 0 on success,
- -EBUSY if stream already claimed. Once a stream is claimed, it
- remains claimed until the associated filehandle is closed. */
-int cx18_claim_stream(struct cx18_open_id *id, int type);
-
-/* Release a previously claimed stream. */
-void cx18_release_stream(struct cx18_stream *s);
diff --git a/drivers/media/video/cx18/cx18-gpio.c b/drivers/media/video/cx18/cx18-gpio.c
index 19253e6b867..bb8bc86086d 100644
--- a/drivers/media/video/cx18/cx18-gpio.c
+++ b/drivers/media/video/cx18/cx18-gpio.c
@@ -35,6 +35,9 @@
#define CX18_REG_GPIO_OUT2 0xc78104
#define CX18_REG_GPIO_DIR2 0xc7810c
+static u32 gpio_dir;
+static u32 gpio_val;
+
/*
* HVR-1600 GPIO pins, courtesy of Hauppauge:
*
@@ -44,31 +47,53 @@
* gpio13: cs5345 reset pin
*/
+static void gpio_write(struct cx18 *cx)
+{
+ write_reg((gpio_dir & 0xffff) << 16, CX18_REG_GPIO_DIR1);
+ write_reg(((gpio_dir & 0xffff) << 16) | (gpio_val & 0xffff),
+ CX18_REG_GPIO_OUT1);
+ write_reg(gpio_dir & 0xffff0000, CX18_REG_GPIO_DIR2);
+ write_reg((gpio_dir & 0xffff0000) | ((gpio_val & 0xffff0000) >> 16),
+ CX18_REG_GPIO_OUT2);
+}
+
void cx18_gpio_init(struct cx18 *cx)
{
- if (cx->card->gpio_init.direction == 0)
+ gpio_dir = cx->card->gpio_init.direction;
+ gpio_val = cx->card->gpio_init.initial_value;
+
+ if (gpio_dir == 0)
return;
- CX18_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n",
- read_reg(CX18_REG_GPIO_DIR1), read_reg(CX18_REG_GPIO_OUT1));
+ gpio_dir |= 1 << cx->card->xceive_pin;
+ gpio_val |= 1 << cx->card->xceive_pin;
- /* init output data then direction */
- write_reg(cx->card->gpio_init.direction << 16, CX18_REG_GPIO_DIR1);
- write_reg(0, CX18_REG_GPIO_DIR2);
- write_reg((cx->card->gpio_init.direction << 16) |
- cx->card->gpio_init.initial_value, CX18_REG_GPIO_OUT1);
- write_reg(0, CX18_REG_GPIO_OUT2);
+ CX18_DEBUG_INFO("GPIO initial dir: %08x/%08x out: %08x/%08x\n",
+ read_reg(CX18_REG_GPIO_DIR1), read_reg(CX18_REG_GPIO_DIR2),
+ read_reg(CX18_REG_GPIO_OUT1), read_reg(CX18_REG_GPIO_OUT2));
+
+ gpio_write(cx);
}
/* Xceive tuner reset function */
int cx18_reset_tuner_gpio(void *dev, int cmd, int value)
{
struct i2c_algo_bit_data *algo = dev;
- struct cx18 *cx = algo->data;
-/* int curdir, curout;*/
+ struct cx18_i2c_algo_callback_data *cb_data = algo->data;
+ struct cx18 *cx = cb_data->cx;
if (cmd != XC2028_TUNER_RESET)
return 0;
CX18_DEBUG_INFO("Resetting tuner\n");
+
+ gpio_dir |= 1 << cx->card->xceive_pin;
+ gpio_val &= ~(1 << cx->card->xceive_pin);
+
+ gpio_write(cx);
+ schedule_timeout_interruptible(msecs_to_jiffies(1));
+
+ gpio_val |= 1 << cx->card->xceive_pin;
+ gpio_write(cx);
+ schedule_timeout_interruptible(msecs_to_jiffies(1));
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index 18c88d1e483..4f08a4058d1 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -25,6 +25,7 @@
#include "cx18-cards.h"
#include "cx18-gpio.h"
#include "cx18-av-core.h"
+#include "cx18-i2c.h"
#include <media/ir-kbd-i2c.h>
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
index 65af1bb507c..6990b77c620 100644
--- a/drivers/media/video/cx18/cx18-queue.c
+++ b/drivers/media/video/cx18/cx18-queue.c
@@ -26,17 +26,6 @@
#include "cx18-queue.h"
#include "cx18-scb.h"
-int cx18_buf_copy_from_user(struct cx18_stream *s, struct cx18_buffer *buf,
- const char __user *src, int copybytes)
-{
- if (s->buf_size - buf->bytesused < copybytes)
- copybytes = s->buf_size - buf->bytesused;
- if (copy_from_user(buf->buf + buf->bytesused, src, copybytes))
- return -EFAULT;
- buf->bytesused += copybytes;
- return copybytes;
-}
-
void cx18_buf_swap(struct cx18_buffer *buf)
{
int i;
@@ -159,8 +148,9 @@ static void cx18_queue_move_buf(struct cx18_stream *s, struct cx18_queue *from,
-ENOMEM is returned if the buffers could not be obtained, 0 if all
buffers where obtained from the 'from' list and if non-zero then
the number of stolen buffers is returned. */
-int cx18_queue_move(struct cx18_stream *s, struct cx18_queue *from,
- struct cx18_queue *steal, struct cx18_queue *to, int needed_bytes)
+static int cx18_queue_move(struct cx18_stream *s, struct cx18_queue *from,
+ struct cx18_queue *steal, struct cx18_queue *to,
+ int needed_bytes)
{
unsigned long flags;
int rc = 0;
@@ -239,12 +229,12 @@ int cx18_stream_alloc(struct cx18_stream *s)
/* allocate stream buffers. Initially all buffers are in q_free. */
for (i = 0; i < s->buffers; i++) {
- struct cx18_buffer *buf =
- kzalloc(sizeof(struct cx18_buffer), GFP_KERNEL);
+ struct cx18_buffer *buf = kzalloc(sizeof(struct cx18_buffer),
+ GFP_KERNEL|__GFP_NOWARN);
if (buf == NULL)
break;
- buf->buf = kmalloc(s->buf_size, GFP_KERNEL);
+ buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
if (buf->buf == NULL) {
kfree(buf);
break;
diff --git a/drivers/media/video/cx18/cx18-queue.h b/drivers/media/video/cx18/cx18-queue.h
index f86c8a6fa6e..91423b9863a 100644
--- a/drivers/media/video/cx18/cx18-queue.h
+++ b/drivers/media/video/cx18/cx18-queue.h
@@ -39,8 +39,6 @@ static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
s->buf_size, s->dma);
}
-int cx18_buf_copy_from_user(struct cx18_stream *s, struct cx18_buffer *buf,
- const char __user *src, int copybytes);
void cx18_buf_swap(struct cx18_buffer *buf);
/* cx18_queue utility functions */
@@ -48,8 +46,6 @@ void cx18_queue_init(struct cx18_queue *q);
void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
struct cx18_queue *q);
struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
-int cx18_queue_move(struct cx18_stream *s, struct cx18_queue *from,
- struct cx18_queue *steal, struct cx18_queue *to, int needed_bytes);
struct cx18_buffer *cx18_queue_find_buf(struct cx18_stream *s, u32 id,
u32 bytesused);
void cx18_flush_queues(struct cx18_stream *s);
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index afb141b2027..4ca9d847f1b 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -218,7 +218,7 @@ int cx18_streams_setup(struct cx18 *cx)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
- cx18_streams_cleanup(cx);
+ cx18_streams_cleanup(cx, 0);
return -ENOMEM;
}
@@ -296,12 +296,12 @@ int cx18_streams_register(struct cx18 *cx)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
- cx18_streams_cleanup(cx);
+ cx18_streams_cleanup(cx, 1);
return -ENOMEM;
}
/* Unregister v4l2 devices */
-void cx18_streams_cleanup(struct cx18 *cx)
+void cx18_streams_cleanup(struct cx18 *cx, int unregister)
{
struct video_device *vdev;
int type;
@@ -319,8 +319,11 @@ void cx18_streams_cleanup(struct cx18 *cx)
cx18_stream_free(&cx->streams[type]);
- /* Unregister device */
- video_unregister_device(vdev);
+ /* Unregister or release device */
+ if (unregister)
+ video_unregister_device(vdev);
+ else
+ video_device_release(vdev);
}
}
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index 8c7ba7d2fa7..f327e947b24 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -24,7 +24,7 @@
u32 cx18_find_handle(struct cx18 *cx);
int cx18_streams_setup(struct cx18 *cx);
int cx18_streams_register(struct cx18 *cx);
-void cx18_streams_cleanup(struct cx18 *cx);
+void cx18_streams_cleanup(struct cx18 *cx, int unregister);
/* Capture related */
int cx18_start_v4l2_encode_stream(struct cx18_stream *s);
diff --git a/drivers/media/video/cx23885/Kconfig b/drivers/media/video/cx23885/Kconfig
index cadf936c367..7bf14c9a15c 100644
--- a/drivers/media/video/cx23885/Kconfig
+++ b/drivers/media/video/cx23885/Kconfig
@@ -1,18 +1,20 @@
config VIDEO_CX23885
tristate "Conexant cx23885 (2388x successor) support"
depends on DVB_CORE && VIDEO_DEV && PCI && I2C && INPUT
+ depends on HOTPLUG # due to FW_LOADER
select I2C_ALGOBIT
select FW_LOADER
select VIDEO_BTCX
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_IR
select VIDEOBUF_DVB
select VIDEO_CX25840
+ select VIDEO_CX2341X
+ select DVB_DIB7000P if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_MT2131 if !DVB_FE_CUSTOMISE
select DVB_S5H1409 if !DVB_FE_CUSTOMISE
select DVB_LGDT330X if !DVB_FE_CUSTOMISE
- select DVB_PLL if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMIZE
select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE
select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 6ebf58724a0..20e05f23054 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -200,6 +200,10 @@ struct cx23885_subid cx23885_subids[] = {
.card = CX23885_BOARD_HAUPPAUGE_HVR1200,
}, {
.subvendor = 0x0070,
+ .subdevice = 0x71d3,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1200,
+ }, {
+ .subvendor = 0x0070,
.subdevice = 0x8101,
.card = CX23885_BOARD_HAUPPAUGE_HVR1700,
}, {
@@ -245,6 +249,33 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
/* Make sure we support the board model */
switch (tv.model)
{
+ case 71009:
+ /* WinTV-HVR1200 (PCIe, Retail, full height)
+ * DVB-T and basic analog */
+ case 71359:
+ /* WinTV-HVR1200 (PCIe, OEM, half height)
+ * DVB-T and basic analog */
+ case 71439:
+ /* WinTV-HVR1200 (PCIe, OEM, half height)
+ * DVB-T and basic analog */
+ case 71449:
+ /* WinTV-HVR1200 (PCIe, OEM, full height)
+ * DVB-T and basic analog */
+ case 71939:
+ /* WinTV-HVR1200 (PCIe, OEM, half height)
+ * DVB-T and basic analog */
+ case 71949:
+ /* WinTV-HVR1200 (PCIe, OEM, full height)
+ * DVB-T and basic analog */
+ case 71959:
+ /* WinTV-HVR1200 (PCIe, OEM, full height)
+ * DVB-T and basic analog */
+ case 71979:
+ /* WinTV-HVR1200 (PCIe, OEM, half height)
+ * DVB-T and basic analog */
+ case 71999:
+ /* WinTV-HVR1200 (PCIe, OEM, full height)
+ * DVB-T and basic analog */
case 76601: /* WinTV-HVR1800lp (PCIe, Retail, No IR, Dual channel ATSC and MPEG2 HW Encoder */
case 77001: /* WinTV-HVR1500 (Express Card, OEM, No IR, ATSC and Basic analog */
case 77011: /* WinTV-HVR1500 (Express Card, Retail, No IR, ATSC and Basic analog */
@@ -263,8 +294,11 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
case 80019:
/* WinTV-HVR1400 (Express Card, Retail, IR,
* DVB-T and Basic analog */
+ case 81509:
+ /* WinTV-HVR1700 (PCIe, OEM, No IR, half height)
+ * DVB-T and MPEG2 HW Encoder */
case 81519:
- /* WinTV-HVR1700 (PCIe, Retail, No IR, half height,
+ /* WinTV-HVR1700 (PCIe, OEM, No IR, full height)
* DVB-T and MPEG2 HW Encoder */
break;
default:
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index f05649727b6..022aa391937 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -37,7 +37,6 @@
#include "lgdt330x.h"
#include "xc5000.h"
#include "tda10048.h"
-#include "dvb-pll.h"
#include "tuner-xc2028.h"
#include "tuner-simple.h"
#include "dib7000p.h"
@@ -385,12 +384,10 @@ static int dvb_register(struct cx23885_tsport *port)
port->dvb.frontend = dvb_attach(s5h1409_attach,
&hauppauge_hvr1500q_config,
&dev->i2c_bus[0].i2c_adap);
- if (port->dvb.frontend != NULL) {
- hauppauge_hvr1500q_tunerconfig.priv = i2c_bus;
+ if (port->dvb.frontend != NULL)
dvb_attach(xc5000_attach, port->dvb.frontend,
&i2c_bus->i2c_adap,
- &hauppauge_hvr1500q_tunerconfig);
- }
+ &hauppauge_hvr1500q_tunerconfig, i2c_bus);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1500:
i2c_bus = &dev->i2c_bus[1];
diff --git a/drivers/media/video/cx25840/Kconfig b/drivers/media/video/cx25840/Kconfig
index 7cf29a03ed6..448f4cd0ce3 100644
--- a/drivers/media/video/cx25840/Kconfig
+++ b/drivers/media/video/cx25840/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_CX25840
tristate "Conexant CX2584x audio/video decoders"
depends on VIDEO_V4L2 && I2C && EXPERIMENTAL
+ depends on HOTPLUG # due to FW_LOADER
select FW_LOADER
---help---
Support for the Conexant CX2584x audio/video decoders.
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index b0d7d6a7a4c..10e20d8196d 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -2,10 +2,9 @@ config VIDEO_CX88
tristate "Conexant 2388x (bt878 successor) support"
depends on VIDEO_DEV && PCI && I2C && INPUT
select I2C_ALGOBIT
- select FW_LOADER
select VIDEO_BTCX
select VIDEOBUF_DMA_SG
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_IR
select VIDEO_WM8775 if VIDEO_HELPER_CHIPS_AUTO
@@ -34,8 +33,9 @@ config VIDEO_CX88_ALSA
config VIDEO_CX88_BLACKBIRD
tristate "Blackbird MPEG encoder support (cx2388x + cx23416)"
- depends on VIDEO_CX88
+ depends on VIDEO_CX88 && HOTPLUG
select VIDEO_CX2341X
+ select FW_LOADER
---help---
This adds support for MPEG encoder cards based on the
Blackbird reference design, using the Conexant 2388x
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 1c7fe6862a6..d96173ff1db 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -509,9 +509,6 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
if (!fe) {
printk(KERN_ERR "%s/2: xc3028 attach failed\n",
dev->core->name);
- dvb_frontend_detach(dev->dvb.frontend);
- dvb_unregister_frontend(dev->dvb.frontend);
- dev->dvb.frontend = NULL;
return -EINVAL;
}
@@ -523,20 +520,23 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
static int dvb_register(struct cx8802_dev *dev)
{
+ struct cx88_core *core = dev->core;
+
/* init struct videobuf_dvb */
- dev->dvb.name = dev->core->name;
+ dev->dvb.name = core->name;
dev->ts_gen_cntrl = 0x0c;
/* init frontend */
- switch (dev->core->boardnr) {
+ switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_DVB_T1:
dev->dvb.frontend = dvb_attach(cx22702_attach,
&connexant_refboard_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- &dev->core->i2c_adap,
- DVB_PLL_THOMSON_DTT759X);
+ if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+ 0x61, &core->i2c_adap,
+ DVB_PLL_THOMSON_DTT759X))
+ goto frontend_detach;
}
break;
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
@@ -545,11 +545,12 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_WINFAST_DTV1000:
dev->dvb.frontend = dvb_attach(cx22702_attach,
&connexant_refboard_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
- &dev->core->i2c_adap,
- DVB_PLL_THOMSON_DTT7579);
+ if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+ 0x60, &core->i2c_adap,
+ DVB_PLL_THOMSON_DTT7579))
+ goto frontend_detach;
}
break;
case CX88_BOARD_WINFAST_DTV2000H:
@@ -559,29 +560,32 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_HAUPPAUGE_HVR3000:
dev->dvb.frontend = dvb_attach(cx22702_attach,
&hauppauge_hvr_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_PHILIPS_FMD1216ME_MK3);
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_PHILIPS_FMD1216ME_MK3))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
dev->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
- NULL, DVB_PLL_THOMSON_DTT7579);
+ if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+ 0x60, NULL, DVB_PLL_THOMSON_DTT7579))
+ goto frontend_detach;
break;
}
/* ZL10353 replaces MT352 on later cards */
dev->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_plus_v1_1,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
- NULL, DVB_PLL_THOMSON_DTT7579);
+ if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+ 0x60, NULL, DVB_PLL_THOMSON_DTT7579))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
@@ -589,28 +593,31 @@ static int dvb_register(struct cx8802_dev *dev)
* compatible, with a slightly different MT352 AGC gain. */
dev->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv_dual,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, DVB_PLL_THOMSON_DTT7579);
+ if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+ 0x61, NULL, DVB_PLL_THOMSON_DTT7579))
+ goto frontend_detach;
break;
}
/* ZL10353 replaces MT352 on later cards */
dev->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_plus_v1_1,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, DVB_PLL_THOMSON_DTT7579);
+ if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+ 0x61, NULL, DVB_PLL_THOMSON_DTT7579))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
dev->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, DVB_PLL_LG_Z201);
+ if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+ 0x61, NULL, DVB_PLL_LG_Z201))
+ goto frontend_detach;
}
break;
case CX88_BOARD_KWORLD_DVB_T:
@@ -618,10 +625,11 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_ADSTECH_DVB_T_PCI:
dev->dvb.frontend = dvb_attach(mt352_attach,
&dntv_live_dvbt_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, DVB_PLL_UNKNOWN_1);
+ if (!dvb_attach(dvb_pll_attach, dev->dvb.frontend,
+ 0x61, NULL, DVB_PLL_UNKNOWN_1))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
@@ -630,32 +638,35 @@ static int dvb_register(struct cx8802_dev *dev)
dev->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
&dev->vp3054->adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_PHILIPS_FMD1216ME_MK3);
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_PHILIPS_FMD1216ME_MK3))
+ goto frontend_detach;
}
#else
- printk(KERN_ERR "%s/2: built without vp3054 support\n", dev->core->name);
+ printk(KERN_ERR "%s/2: built without vp3054 support\n",
+ core->name);
#endif
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
dev->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_hybrid,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_THOMSON_FE6600);
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_THOMSON_FE6600))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
dev->dvb.frontend = dvb_attach(zl10353_attach,
&dvico_fusionhdtv_xc3028,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend == NULL)
dev->dvb.frontend = dvb_attach(mt352_attach,
&dvico_fusionhdtv_mt352_xc3028,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
/*
* On this board, the demod provides the I2C bus pullup.
* We must not permit gate_ctrl to be performed, or
@@ -668,19 +679,18 @@ static int dvb_register(struct cx8802_dev *dev)
break;
case CX88_BOARD_PCHDTV_HD3000:
dev->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_THOMSON_DTT761X);
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_THOMSON_DTT761X))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
dev->ts_gen_cntrl = 0x08;
- {
- /* Do a hardware reset of chip before using it. */
- struct cx88_core *core = dev->core;
+ /* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
mdelay(100);
cx_set(MO_GP0_IO, 1);
@@ -690,139 +700,137 @@ static int dvb_register(struct cx8802_dev *dev)
fusionhdtv_3_gold.pll_rf_set = lgdt330x_pll_rf_set;
dev->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_MICROTUNE_4042FI5);
- }
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_MICROTUNE_4042FI5))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T:
dev->ts_gen_cntrl = 0x08;
- {
- /* Do a hardware reset of chip before using it. */
- struct cx88_core *core = dev->core;
+ /* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
mdelay(100);
cx_set(MO_GP0_IO, 9);
mdelay(200);
dev->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_3_gold,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_THOMSON_DTT761X);
- }
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_THOMSON_DTT761X))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
dev->ts_gen_cntrl = 0x08;
- {
- /* Do a hardware reset of chip before using it. */
- struct cx88_core *core = dev->core;
+ /* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
mdelay(100);
cx_set(MO_GP0_IO, 1);
mdelay(200);
dev->dvb.frontend = dvb_attach(lgdt330x_attach,
&fusionhdtv_5_gold,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_LG_TDVS_H06XF);
- dvb_attach(tda9887_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x43);
- }
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_LG_TDVS_H06XF))
+ goto frontend_detach;
+ if (!dvb_attach(tda9887_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x43))
+ goto frontend_detach;
}
break;
case CX88_BOARD_PCHDTV_HD5500:
dev->ts_gen_cntrl = 0x08;
- {
- /* Do a hardware reset of chip before using it. */
- struct cx88_core *core = dev->core;
+ /* Do a hardware reset of chip before using it. */
cx_clear(MO_GP0_IO, 1);
mdelay(100);
cx_set(MO_GP0_IO, 1);
mdelay(200);
dev->dvb.frontend = dvb_attach(lgdt330x_attach,
&pchdtv_hd5500,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_LG_TDVS_H06XF);
- dvb_attach(tda9887_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x43);
- }
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_LG_TDVS_H06XF))
+ goto frontend_detach;
+ if (!dvb_attach(tda9887_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x43))
+ goto frontend_detach;
}
break;
case CX88_BOARD_ATI_HDTVWONDER:
dev->dvb.frontend = dvb_attach(nxt200x_attach,
&ati_hdtvwonder,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
- dvb_attach(simple_tuner_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x61,
- TUNER_PHILIPS_TUV1236D);
+ if (!dvb_attach(simple_tuner_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x61,
+ TUNER_PHILIPS_TUV1236D))
+ goto frontend_detach;
}
break;
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
dev->dvb.frontend = dvb_attach(cx24123_attach,
&hauppauge_novas_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend) {
- dvb_attach(isl6421_attach, dev->dvb.frontend,
- &dev->core->i2c_adap, 0x08, 0x00, 0x00);
+ if (!dvb_attach(isl6421_attach, dev->dvb.frontend,
+ &core->i2c_adap, 0x08, 0x00, 0x00))
+ goto frontend_detach;
}
break;
case CX88_BOARD_KWORLD_DVBS_100:
dev->dvb.frontend = dvb_attach(cx24123_attach,
&kworld_dvbs_100_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend) {
- dev->core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
+ core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
dev->dvb.frontend->ops.set_voltage = kworld_dvbs_100_set_voltage;
}
break;
case CX88_BOARD_GENIATECH_DVBS:
dev->dvb.frontend = dvb_attach(cx24123_attach,
&geniatech_dvbs_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend) {
- dev->core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
+ core->prev_set_voltage = dev->dvb.frontend->ops.set_voltage;
dev->dvb.frontend->ops.set_voltage = geniatech_dvbs_set_voltage;
}
break;
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
dev->dvb.frontend = dvb_attach(s5h1409_attach,
&pinnacle_pctv_hd_800i_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
/* tuner_config.video_dev must point to
* i2c_adap.algo_data
*/
- pinnacle_pctv_hd_800i_tuner_config.priv =
- dev->core->i2c_adap.algo_data;
- dvb_attach(xc5000_attach, dev->dvb.frontend,
- &dev->core->i2c_adap,
- &pinnacle_pctv_hd_800i_tuner_config);
+ if (!dvb_attach(xc5000_attach, dev->dvb.frontend,
+ &core->i2c_adap,
+ &pinnacle_pctv_hd_800i_tuner_config,
+ core->i2c_adap.algo_data))
+ goto frontend_detach;
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
dev->dvb.frontend = dvb_attach(s5h1409_attach,
&dvico_hdtv5_pci_nano_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
struct dvb_frontend *fe;
struct xc2028_config cfg = {
- .i2c_adap = &dev->core->i2c_adap,
+ .i2c_adap = &core->i2c_adap,
.i2c_addr = 0x61,
.callback = cx88_pci_nano_callback,
};
@@ -841,50 +849,50 @@ static int dvb_register(struct cx8802_dev *dev)
case CX88_BOARD_PINNACLE_HYBRID_PCTV:
dev->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_geniatech_x8000_mt,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (attach_xc3028(0x61, dev) < 0)
- return -EINVAL;
+ goto frontend_detach;
break;
case CX88_BOARD_GENIATECH_X8000_MT:
dev->ts_gen_cntrl = 0x00;
dev->dvb.frontend = dvb_attach(zl10353_attach,
&cx88_geniatech_x8000_mt,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (attach_xc3028(0x61, dev) < 0)
- return -EINVAL;
+ goto frontend_detach;
break;
case CX88_BOARD_KWORLD_ATSC_120:
dev->dvb.frontend = dvb_attach(s5h1409_attach,
&kworld_atsc_120_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (attach_xc3028(0x61, dev) < 0)
- return -EINVAL;
+ goto frontend_detach;
break;
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
dev->dvb.frontend = dvb_attach(s5h1411_attach,
&dvico_fusionhdtv7_config,
- &dev->core->i2c_adap);
+ &core->i2c_adap);
if (dev->dvb.frontend != NULL) {
/* tuner_config.video_dev must point to
* i2c_adap.algo_data
*/
- dvico_fusionhdtv7_tuner_config.priv =
- dev->core->i2c_adap.algo_data;
- dvb_attach(xc5000_attach, dev->dvb.frontend,
- &dev->core->i2c_adap,
- &dvico_fusionhdtv7_tuner_config);
+ if (!dvb_attach(xc5000_attach, dev->dvb.frontend,
+ &core->i2c_adap,
+ &dvico_fusionhdtv7_tuner_config,
+ core->i2c_adap.algo_data))
+ goto frontend_detach;
}
break;
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
- dev->core->name);
+ core->name);
break;
}
if (NULL == dev->dvb.frontend) {
printk(KERN_ERR
"%s/2: frontend initialization failed\n",
- dev->core->name);
+ core->name);
return -EINVAL;
}
@@ -892,11 +900,18 @@ static int dvb_register(struct cx8802_dev *dev)
dev->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
/* Put the analog decoder in standby to keep it quiet */
- cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
+ cx88_call_i2c_clients(core, TUNER_SET_STANDBY, NULL);
/* register everything */
return videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev,
&dev->pci->dev, adapter_nr);
+
+frontend_detach:
+ if (dev->dvb.frontend) {
+ dvb_frontend_detach(dev->dvb.frontend);
+ dev->dvb.frontend = NULL;
+ }
+ return -EINVAL;
}
/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index c7c2896bbd8..16a5af30e9d 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -1,7 +1,7 @@
config VIDEO_EM28XX
tristate "Empia EM28xx USB video capture support"
depends on VIDEO_DEV && I2C && INPUT
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_IR
select VIDEOBUF_VMALLOC
@@ -35,7 +35,6 @@ config VIDEO_EM28XX_DVB
select DVB_LGDT330X if !DVB_FE_CUSTOMISE
select DVB_ZL10353 if !DVB_FE_CUSTOMISE
select VIDEOBUF_DVB
- select FW_LOADER
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 50ccf377120..3e4f3c7e92e 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -420,7 +420,13 @@ struct usb_device_id em28xx_id_table [] = {
.driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900 },
{ USB_DEVICE(0x2040, 0x6502),
.driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900 },
- { USB_DEVICE(0x2040, 0x6513),
+ { USB_DEVICE(0x2040, 0x6513), /* HCW HVR-980 */
+ .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_950 },
+ { USB_DEVICE(0x2040, 0x6517), /* HP HVR-950 */
+ .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_950 },
+ { USB_DEVICE(0x2040, 0x651b), /* RP HVR-950 */
+ .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_950 },
+ { USB_DEVICE(0x2040, 0x651f), /* HCW HVR-850 */
.driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_950 },
{ USB_DEVICE(0x0ccd, 0x0042),
.driver_info = EM2880_BOARD_TERRATEC_HYBRID_XS },
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 7df81575b7f..8cf4983f003 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -251,7 +251,6 @@ static int attach_xc3028(u8 addr, struct em28xx *dev)
printk(KERN_ERR "%s/2: xc3028 attach failed\n",
dev->name);
dvb_frontend_detach(dev->dvb->frontend);
- dvb_unregister_frontend(dev->dvb->frontend);
dev->dvb->frontend = NULL;
return -EINVAL;
}
diff --git a/drivers/media/video/ivtv/Kconfig b/drivers/media/video/ivtv/Kconfig
index eec115bf951..5d7ee8fcdd5 100644
--- a/drivers/media/video/ivtv/Kconfig
+++ b/drivers/media/video/ivtv/Kconfig
@@ -1,10 +1,12 @@
config VIDEO_IVTV
tristate "Conexant cx23416/cx23415 MPEG encoder/decoder support"
depends on VIDEO_V4L1 && VIDEO_V4L2 && PCI && I2C && EXPERIMENTAL
+ depends on INPUT # due to VIDEO_IR
+ depends on HOTPLUG # due to FW_LOADER
select I2C_ALGOBIT
select FW_LOADER
select VIDEO_IR
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_CX2341X
select VIDEO_CX25840
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
index 8c02fa66159..c7e449f6397 100644
--- a/drivers/media/video/ivtv/ivtv-controls.c
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -181,12 +181,12 @@ static int ivtv_setup_vbi_fmt(struct ivtv *itv, enum v4l2_mpeg_stream_vbi_fmt fm
return 0;
}
/* Need sliced data for mpeg insertion */
- if (get_service_set(itv->vbi.sliced_in) == 0) {
+ if (ivtv_get_service_set(itv->vbi.sliced_in) == 0) {
if (itv->is_60hz)
itv->vbi.sliced_in->service_set = V4L2_SLICED_CAPTION_525;
else
itv->vbi.sliced_in->service_set = V4L2_SLICED_WSS_625;
- expand_service_set(itv->vbi.sliced_in, itv->is_50hz);
+ ivtv_expand_service_set(itv->vbi.sliced_in, itv->is_50hz);
}
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index ed020f722b0..797e636771d 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -853,6 +853,7 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *dev,
return 0;
}
+#ifdef MODULE
static u32 ivtv_request_module(struct ivtv *itv, u32 hw,
const char *name, u32 id)
{
@@ -865,12 +866,14 @@ static u32 ivtv_request_module(struct ivtv *itv, u32 hw,
IVTV_DEBUG_INFO("Loaded module %s\n", name);
return hw;
}
+#endif
static void ivtv_load_and_init_modules(struct ivtv *itv)
{
u32 hw = itv->card->hw_all;
unsigned i;
+#ifdef MODULE
/* load modules */
#ifndef CONFIG_MEDIA_TUNER
hw = ivtv_request_module(itv, hw, "tuner", IVTV_HW_TUNER);
@@ -911,6 +914,7 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
#ifndef CONFIG_VIDEO_M52790
hw = ivtv_request_module(itv, hw, "m52790", IVTV_HW_M52790);
#endif
+#endif
/* check which i2c devices are actually found */
for (i = 0; i < 32; i++) {
@@ -1228,7 +1232,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
return 0;
free_streams:
- ivtv_streams_cleanup(itv);
+ ivtv_streams_cleanup(itv, 1);
free_irq:
free_irq(itv->dev->irq, (void *)itv);
free_i2c:
@@ -1373,7 +1377,7 @@ static void ivtv_remove(struct pci_dev *pci_dev)
flush_workqueue(itv->irq_work_queues);
destroy_workqueue(itv->irq_work_queues);
- ivtv_streams_cleanup(itv);
+ ivtv_streams_cleanup(itv, 1);
ivtv_udma_free(itv);
exit_ivtv_i2c(itv);
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 2b74b0ab147..f2fa434b677 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -987,6 +987,8 @@ int ivtv_v4l2_open(struct inode *inode, struct file *filp)
/* Find which card this open was on */
spin_lock(&ivtv_cards_lock);
for (x = 0; itv == NULL && x < ivtv_cards_active; x++) {
+ if (ivtv_cards[x] == NULL)
+ continue;
/* find out which stream this open was on */
for (y = 0; y < IVTV_MAX_STREAMS; y++) {
s = &ivtv_cards[x]->streams[y];
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index d508b5d0538..26cc0f6699f 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -38,7 +38,7 @@
#include <linux/dvb/audio.h>
#include <linux/i2c-id.h>
-u16 service2vbi(int type)
+u16 ivtv_service2vbi(int type)
{
switch (type) {
case V4L2_SLICED_TELETEXT_B:
@@ -88,7 +88,7 @@ static u16 select_service_from_set(int field, int line, u16 set, int is_pal)
return 0;
}
-void expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
+void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
{
u16 set = fmt->service_set;
int f, l;
@@ -115,7 +115,7 @@ static int check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
return set != 0;
}
-u16 get_service_set(struct v4l2_sliced_vbi_format *fmt)
+u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt)
{
int f, l;
u16 set = 0;
@@ -466,7 +466,7 @@ static int ivtv_get_fmt(struct ivtv *itv, int streamtype, struct v4l2_format *fm
vbifmt->service_lines[0][23] = V4L2_SLICED_WSS_625;
vbifmt->service_lines[0][16] = V4L2_SLICED_VPS;
}
- vbifmt->service_set = get_service_set(vbifmt);
+ vbifmt->service_set = ivtv_get_service_set(vbifmt);
break;
}
@@ -481,12 +481,12 @@ static int ivtv_get_fmt(struct ivtv *itv, int streamtype, struct v4l2_format *fm
if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) {
vbifmt->service_set = itv->is_50hz ? V4L2_SLICED_VBI_625 :
V4L2_SLICED_VBI_525;
- expand_service_set(vbifmt, itv->is_50hz);
+ ivtv_expand_service_set(vbifmt, itv->is_50hz);
break;
}
itv->video_dec_func(itv, VIDIOC_G_FMT, fmt);
- vbifmt->service_set = get_service_set(vbifmt);
+ vbifmt->service_set = ivtv_get_service_set(vbifmt);
break;
}
case V4L2_BUF_TYPE_VBI_OUTPUT:
@@ -640,9 +640,9 @@ static int ivtv_try_or_set_fmt(struct ivtv *itv, int streamtype,
memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved));
if (vbifmt->service_set)
- expand_service_set(vbifmt, itv->is_50hz);
+ ivtv_expand_service_set(vbifmt, itv->is_50hz);
set = check_service_set(vbifmt, itv->is_50hz);
- vbifmt->service_set = get_service_set(vbifmt);
+ vbifmt->service_set = ivtv_get_service_set(vbifmt);
if (!set_fmt)
return 0;
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index a03351b6853..4e67f0ed1fc 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -21,9 +21,9 @@
#ifndef IVTV_IOCTL_H
#define IVTV_IOCTL_H
-u16 service2vbi(int type);
-void expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
-u16 get_service_set(struct v4l2_sliced_vbi_format *fmt);
+u16 ivtv_service2vbi(int type);
+void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
+u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt);
int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg);
int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void *arg);
diff --git a/drivers/media/video/ivtv/ivtv-queue.c b/drivers/media/video/ivtv/ivtv-queue.c
index 3e1deec67a5..fc8b1eaa333 100644
--- a/drivers/media/video/ivtv/ivtv-queue.c
+++ b/drivers/media/video/ivtv/ivtv-queue.c
@@ -203,14 +203,14 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
s->dma != PCI_DMA_NONE ? "DMA " : "",
s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
- s->sg_pending = kzalloc(SGsize, GFP_KERNEL);
+ s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
if (s->sg_pending == NULL) {
IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
return -ENOMEM;
}
s->sg_pending_size = 0;
- s->sg_processing = kzalloc(SGsize, GFP_KERNEL);
+ s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
if (s->sg_processing == NULL) {
IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
kfree(s->sg_pending);
@@ -219,7 +219,8 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
}
s->sg_processing_size = 0;
- s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element), GFP_KERNEL);
+ s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
+ GFP_KERNEL|__GFP_NOWARN);
if (s->sg_dma == NULL) {
IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
kfree(s->sg_pending);
@@ -235,11 +236,12 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
/* allocate stream buffers. Initially all buffers are in q_free. */
for (i = 0; i < s->buffers; i++) {
- struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer), GFP_KERNEL);
+ struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
+ GFP_KERNEL|__GFP_NOWARN);
if (buf == NULL)
break;
- buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL);
+ buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
if (buf->buf == NULL) {
kfree(buf);
break;
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 4ab8d36831b..c47c2b94514 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -244,7 +244,7 @@ int ivtv_streams_setup(struct ivtv *itv)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
- ivtv_streams_cleanup(itv);
+ ivtv_streams_cleanup(itv, 0);
return -ENOMEM;
}
@@ -304,12 +304,12 @@ int ivtv_streams_register(struct ivtv *itv)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
- ivtv_streams_cleanup(itv);
+ ivtv_streams_cleanup(itv, 1);
return -ENOMEM;
}
/* Unregister v4l2 devices */
-void ivtv_streams_cleanup(struct ivtv *itv)
+void ivtv_streams_cleanup(struct ivtv *itv, int unregister)
{
int type;
@@ -322,8 +322,11 @@ void ivtv_streams_cleanup(struct ivtv *itv)
continue;
ivtv_stream_free(&itv->streams[type]);
- /* Unregister device */
- video_unregister_device(vdev);
+ /* Unregister or release device */
+ if (unregister)
+ video_unregister_device(vdev);
+ else
+ video_device_release(vdev);
}
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.h b/drivers/media/video/ivtv/ivtv-streams.h
index 3d76a415fbd..a653a513641 100644
--- a/drivers/media/video/ivtv/ivtv-streams.h
+++ b/drivers/media/video/ivtv/ivtv-streams.h
@@ -23,7 +23,7 @@
int ivtv_streams_setup(struct ivtv *itv);
int ivtv_streams_register(struct ivtv *itv);
-void ivtv_streams_cleanup(struct ivtv *itv);
+void ivtv_streams_cleanup(struct ivtv *itv, int unregister);
/* Capture related */
int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s);
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index c151bcf5519..71798f0da27 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -169,7 +169,8 @@ static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp)
linemask[0] |= (1 << l);
else
linemask[1] |= (1 << (l - 32));
- dst[sd + 12 + line * 43] = service2vbi(itv->vbi.sliced_data[i].id);
+ dst[sd + 12 + line * 43] =
+ ivtv_service2vbi(itv->vbi.sliced_data[i].id);
memcpy(dst + sd + 12 + line * 43 + 1, itv->vbi.sliced_data[i].data, 42);
line++;
}
diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c
index 62f70bd5e3c..a9417f6e408 100644
--- a/drivers/media/video/ivtv/ivtv-yuv.c
+++ b/drivers/media/video/ivtv/ivtv-yuv.c
@@ -908,7 +908,7 @@ static void ivtv_yuv_init(struct ivtv *itv)
}
/* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
- yi->blanking_ptr = kzalloc(720 * 16, GFP_KERNEL);
+ yi->blanking_ptr = kzalloc(720 * 16, GFP_KERNEL|__GFP_NOWARN);
if (yi->blanking_ptr) {
yi->blanking_dmaptr = pci_map_single(itv->dev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE);
} else {
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index df789f683e6..73be154f7f0 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -948,7 +948,8 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
}
/* Allocate the pseudo palette */
- oi->ivtvfb_info.pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ oi->ivtvfb_info.pseudo_palette =
+ kmalloc(sizeof(u32) * 16, GFP_KERNEL|__GFP_NOWARN);
if (!oi->ivtvfb_info.pseudo_palette) {
IVTVFB_ERR("abort, unable to alloc pseudo pallete\n");
@@ -1056,7 +1057,8 @@ static int ivtvfb_init_card(struct ivtv *itv)
return -EBUSY;
}
- itv->osd_info = kzalloc(sizeof(struct osd_info), GFP_ATOMIC);
+ itv->osd_info = kzalloc(sizeof(struct osd_info),
+ GFP_ATOMIC|__GFP_NOWARN);
if (itv->osd_info == NULL) {
IVTVFB_ERR("Failed to allocate memory for osd_info\n");
return -ENOMEM;
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 179e47049a4..ee43499544c 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -12,15 +12,12 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/gpio.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
-#ifdef CONFIG_MT9M001_PCA9536_SWITCH
-#include <asm/gpio.h>
-#endif
-
/* mt9m001 i2c address 0x5d
* The platform has to define i2c_board_info
* and call i2c_register_board_info() */
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index d1391ac5509..1658fe59039 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -13,15 +13,12 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/log2.h>
+#include <linux/gpio.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
-#ifdef CONFIG_MT9M001_PCA9536_SWITCH
-#include <asm/gpio.h>
-#endif
-
/* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
* The platform has to define i2c_board_info
* and call i2c_register_board_info() */
@@ -91,7 +88,7 @@ static const struct soc_camera_data_format mt9v022_monochrome_formats[] = {
struct mt9v022 {
struct i2c_client *client;
struct soc_camera_device icd;
- int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */
+ int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */
int switch_gpio;
u16 chip_control;
unsigned char datawidth;
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
index 9620c67fae7..4482b2c72ce 100644
--- a/drivers/media/video/pvrusb2/Kconfig
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -1,8 +1,10 @@
config VIDEO_PVRUSB2
tristate "Hauppauge WinTV-PVR USB2 support"
depends on VIDEO_V4L2 && I2C
+ depends on VIDEO_MEDIA # Avoids pvrusb = Y / DVB = M
+ depends on HOTPLUG # due to FW_LOADER
select FW_LOADER
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_CX2341X
select VIDEO_SAA711X
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 40e4c3bd2cb..83f076abce3 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_SAA7134
depends on VIDEO_DEV && PCI && I2C && INPUT
select VIDEOBUF_DMA_SG
select VIDEO_IR
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_TVEEPROM
select CRC32
---help---
@@ -27,6 +27,7 @@ config VIDEO_SAA7134_ALSA
config VIDEO_SAA7134_DVB
tristate "DVB/ATSC Support for saa7134 based TV cards"
depends on VIDEO_SAA7134 && DVB_CORE
+ depends on HOTPLUG # due to FW_LOADER
select VIDEOBUF_DVB
select FW_LOADER
select DVB_PLL if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index eec127864fe..2c19cd0113c 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -864,7 +864,6 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
struct saa7134_dev *dev;
struct saa7134_mpeg_ops *mops;
int err;
- int mask;
if (saa7134_devcount == SAA7134_MAXBOARDS)
return -ENOMEM;
@@ -1065,11 +1064,6 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
if (TUNER_ABSENT != dev->tuner_type)
saa7134_i2c_call_clients(dev, TUNER_SET_STANDBY, NULL);
- if (card(dev).gpiomask != 0) {
- mask = card(dev).gpiomask;
- saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, mask, mask);
- saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, mask, 0);
- }
return 0;
fail4:
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 2d16be2259d..469f93aac00 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -538,19 +538,23 @@ static int philips_tda827x_tuner_sleep(struct dvb_frontend *fe)
return 0;
}
-static void configure_tda827x_fe(struct saa7134_dev *dev, struct tda1004x_config *cdec_conf,
- struct tda827x_config *tuner_conf)
+static int configure_tda827x_fe(struct saa7134_dev *dev,
+ struct tda1004x_config *cdec_conf,
+ struct tda827x_config *tuner_conf)
{
dev->dvb.frontend = dvb_attach(tda10046_attach, cdec_conf, &dev->i2c_adap);
if (dev->dvb.frontend) {
if (cdec_conf->i2c_gate)
dev->dvb.frontend->ops.i2c_gate_ctrl = tda8290_i2c_gate_ctrl;
- if (dvb_attach(tda827x_attach, dev->dvb.frontend, cdec_conf->tuner_address,
- &dev->i2c_adap, tuner_conf) == NULL) {
- wprintk("no tda827x tuner found at addr: %02x\n",
+ if (dvb_attach(tda827x_attach, dev->dvb.frontend,
+ cdec_conf->tuner_address,
+ &dev->i2c_adap, tuner_conf))
+ return 0;
+
+ wprintk("no tda827x tuner found at addr: %02x\n",
cdec_conf->tuner_address);
- }
}
+ return -EINVAL;
}
/* ------------------------------------------------------------------ */
@@ -997,7 +1001,9 @@ static int dvb_init(struct saa7134_dev *dev)
break;
case SAA7134_BOARD_FLYDVBTDUO:
case SAA7134_BOARD_FLYDVBT_DUO_CARDBUS:
- configure_tda827x_fe(dev, &tda827x_lifeview_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &tda827x_lifeview_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_PHILIPS_EUROPA:
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
@@ -1022,36 +1028,52 @@ static int dvb_init(struct saa7134_dev *dev)
}
break;
case SAA7134_BOARD_KWORLD_DVBT_210:
- configure_tda827x_fe(dev, &kworld_dvb_t_210_config, &tda827x_cfg_2);
+ if (configure_tda827x_fe(dev, &kworld_dvb_t_210_config,
+ &tda827x_cfg_2) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_PHILIPS_TIGER:
- configure_tda827x_fe(dev, &philips_tiger_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &philips_tiger_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_PINNACLE_PCTV_310i:
- configure_tda827x_fe(dev, &pinnacle_pctv_310i_config, &tda827x_cfg_1);
+ if (configure_tda827x_fe(dev, &pinnacle_pctv_310i_config,
+ &tda827x_cfg_1) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
- configure_tda827x_fe(dev, &hauppauge_hvr_1110_config, &tda827x_cfg_1);
+ if (configure_tda827x_fe(dev, &hauppauge_hvr_1110_config,
+ &tda827x_cfg_1) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
- configure_tda827x_fe(dev, &asus_p7131_dual_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &asus_p7131_dual_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_FLYDVBT_LR301:
- configure_tda827x_fe(dev, &tda827x_lifeview_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &tda827x_lifeview_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_FLYDVB_TRIO:
- if(! use_frontend) { /* terrestrial */
- configure_tda827x_fe(dev, &lifeview_trio_config, &tda827x_cfg_0);
+ if (!use_frontend) { /* terrestrial */
+ if (configure_tda827x_fe(dev, &lifeview_trio_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
} else { /* satellite */
dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap);
if (dev->dvb.frontend) {
if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x63,
&dev->i2c_adap, 0) == NULL) {
wprintk("%s: Lifeview Trio, No tda826x found!\n", __func__);
+ goto dettach_frontend;
}
if (dvb_attach(isl6421_attach, dev->dvb.frontend, &dev->i2c_adap,
0x08, 0, 0) == NULL) {
wprintk("%s: Lifeview Trio, No ISL6421 found!\n", __func__);
+ goto dettach_frontend;
}
}
}
@@ -1067,15 +1089,20 @@ static int dvb_init(struct saa7134_dev *dev)
&ads_duo_cfg) == NULL) {
wprintk("no tda827x tuner found at addr: %02x\n",
ads_tech_duo_config.tuner_address);
+ goto dettach_frontend;
}
}
break;
case SAA7134_BOARD_TEVION_DVBT_220RF:
- configure_tda827x_fe(dev, &tevion_dvbt220rf_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &tevion_dvbt220rf_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_MEDION_MD8800_QUADRO:
if (!use_frontend) { /* terrestrial */
- configure_tda827x_fe(dev, &md8800_dvbt_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &md8800_dvbt_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
} else { /* satellite */
dev->dvb.frontend = dvb_attach(tda10086_attach,
&flydvbs, &dev->i2c_adap);
@@ -1086,16 +1113,20 @@ static int dvb_init(struct saa7134_dev *dev)
struct i2c_msg msg = {.addr = 0x08, .flags = 0, .len = 1};
if (dvb_attach(tda826x_attach, dev->dvb.frontend,
- 0x60, &dev->i2c_adap, 0) == NULL)
+ 0x60, &dev->i2c_adap, 0) == NULL) {
wprintk("%s: Medion Quadro, no tda826x "
"found !\n", __func__);
+ goto dettach_frontend;
+ }
if (dev_id != 0x08) {
/* we need to open the i2c gate (we know it exists) */
fe->ops.i2c_gate_ctrl(fe, 1);
if (dvb_attach(isl6405_attach, fe,
- &dev->i2c_adap, 0x08, 0, 0) == NULL)
+ &dev->i2c_adap, 0x08, 0, 0) == NULL) {
wprintk("%s: Medion Quadro, no ISL6405 "
"found !\n", __func__);
+ goto dettach_frontend;
+ }
if (dev_id == 0x07) {
/* fire up the 2nd section of the LNB supply since
we can't do this from the other section */
@@ -1117,19 +1148,17 @@ static int dvb_init(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180:
dev->dvb.frontend = dvb_attach(nxt200x_attach, &avertvhda180,
&dev->i2c_adap);
- if (dev->dvb.frontend) {
+ if (dev->dvb.frontend)
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
NULL, DVB_PLL_TDHU2);
- }
break;
case SAA7134_BOARD_KWORLD_ATSC110:
dev->dvb.frontend = dvb_attach(nxt200x_attach, &kworldatsc110,
&dev->i2c_adap);
- if (dev->dvb.frontend) {
+ if (dev->dvb.frontend)
dvb_attach(simple_tuner_attach, dev->dvb.frontend,
&dev->i2c_adap, 0x61,
TUNER_PHILIPS_TUV1236D);
- }
break;
case SAA7134_BOARD_FLYDVBS_LR300:
dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs,
@@ -1138,10 +1167,12 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x60,
&dev->i2c_adap, 0) == NULL) {
wprintk("%s: No tda826x found!\n", __func__);
+ goto dettach_frontend;
}
if (dvb_attach(isl6421_attach, dev->dvb.frontend,
&dev->i2c_adap, 0x08, 0, 0) == NULL) {
wprintk("%s: No ISL6421 found!\n", __func__);
+ goto dettach_frontend;
}
}
break;
@@ -1168,43 +1199,65 @@ static int dvb_init(struct saa7134_dev *dev)
}
break;
case SAA7134_BOARD_CINERGY_HT_PCMCIA:
- configure_tda827x_fe(dev, &cinergy_ht_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &cinergy_ht_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_CINERGY_HT_PCI:
- configure_tda827x_fe(dev, &cinergy_ht_pci_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &cinergy_ht_pci_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_PHILIPS_TIGER_S:
- configure_tda827x_fe(dev, &philips_tiger_s_config, &tda827x_cfg_2);
+ if (configure_tda827x_fe(dev, &philips_tiger_s_config,
+ &tda827x_cfg_2) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_ASUS_P7131_4871:
- configure_tda827x_fe(dev, &asus_p7131_4871_config, &tda827x_cfg_2);
+ if (configure_tda827x_fe(dev, &asus_p7131_4871_config,
+ &tda827x_cfg_2) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
- configure_tda827x_fe(dev, &asus_p7131_hybrid_lna_config, &tda827x_cfg_2);
+ if (configure_tda827x_fe(dev, &asus_p7131_hybrid_lna_config,
+ &tda827x_cfg_2) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_AVERMEDIA_SUPER_007:
- configure_tda827x_fe(dev, &avermedia_super_007_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &avermedia_super_007_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_TWINHAN_DTV_DVB_3056:
- configure_tda827x_fe(dev, &twinhan_dtv_dvb_3056_config, &tda827x_cfg_2_sw42);
+ if (configure_tda827x_fe(dev, &twinhan_dtv_dvb_3056_config,
+ &tda827x_cfg_2_sw42) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_PHILIPS_SNAKE:
dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs,
&dev->i2c_adap);
if (dev->dvb.frontend) {
if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x60,
- &dev->i2c_adap, 0) == NULL)
+ &dev->i2c_adap, 0) == NULL) {
wprintk("%s: No tda826x found!\n", __func__);
+ goto dettach_frontend;
+ }
if (dvb_attach(lnbp21_attach, dev->dvb.frontend,
- &dev->i2c_adap, 0, 0) == NULL)
+ &dev->i2c_adap, 0, 0) == NULL) {
wprintk("%s: No lnbp21 found!\n", __func__);
+ goto dettach_frontend;
+ }
}
break;
case SAA7134_BOARD_CREATIX_CTX953:
- configure_tda827x_fe(dev, &md8800_dvbt_config, &tda827x_cfg_0);
+ if (configure_tda827x_fe(dev, &md8800_dvbt_config,
+ &tda827x_cfg_0) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_MSI_TVANYWHERE_AD11:
- configure_tda827x_fe(dev, &philips_tiger_s_config, &tda827x_cfg_2);
+ if (configure_tda827x_fe(dev, &philips_tiger_s_config,
+ &tda827x_cfg_2) < 0)
+ goto dettach_frontend;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_506:
dev->dvb.frontend = dvb_attach(mt352_attach,
@@ -1218,16 +1271,20 @@ static int dvb_init(struct saa7134_dev *dev)
if (dev->dvb.frontend) {
struct dvb_frontend *fe;
if (dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
- &dev->i2c_adap, DVB_PLL_PHILIPS_SD1878_TDA8261) == NULL)
+ &dev->i2c_adap, DVB_PLL_PHILIPS_SD1878_TDA8261) == NULL) {
wprintk("%s: MD7134 DVB-S, no SD1878 "
"found !\n", __func__);
+ goto dettach_frontend;
+ }
/* we need to open the i2c gate (we know it exists) */
fe = dev->dvb.frontend;
fe->ops.i2c_gate_ctrl(fe, 1);
if (dvb_attach(isl6405_attach, fe,
- &dev->i2c_adap, 0x08, 0, 0) == NULL)
+ &dev->i2c_adap, 0x08, 0, 0) == NULL) {
wprintk("%s: MD7134 DVB-S, no ISL6405 "
"found !\n", __func__);
+ goto dettach_frontend;
+ }
fe->ops.i2c_gate_ctrl(fe, 0);
dev->original_set_voltage = fe->ops.set_voltage;
fe->ops.set_voltage = md8800_set_voltage;
@@ -1254,10 +1311,7 @@ static int dvb_init(struct saa7134_dev *dev)
if (!fe) {
printk(KERN_ERR "%s/2: xc3028 attach failed\n",
dev->name);
- dvb_frontend_detach(dev->dvb.frontend);
- dvb_unregister_frontend(dev->dvb.frontend);
- dev->dvb.frontend = NULL;
- return -1;
+ goto dettach_frontend;
}
}
@@ -1282,6 +1336,12 @@ static int dvb_init(struct saa7134_dev *dev)
dev->dvb.frontend->ops.tuner_ops.sleep(dev->dvb.frontend);
}
return ret;
+
+dettach_frontend:
+ dvb_frontend_detach(dev->dvb.frontend);
+ dev->dvb.frontend = NULL;
+
+ return -1;
}
static int dvb_fini(struct saa7134_dev *dev)
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index a0baf2d0ba7..48e1a01718e 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1634,7 +1634,7 @@ static int saa7134_s_fmt_overlay(struct file *file, void *priv,
struct saa7134_fh *fh = priv;
struct saa7134_dev *dev = fh->dev;
int err;
- unsigned int flags;
+ unsigned long flags;
if (saa7134_no_overlay > 0) {
printk(KERN_ERR "V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 9276ed99738..b12c60cf5a0 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -30,6 +30,7 @@
#include <linux/kref.h>
#include <linux/usb.h>
+#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
@@ -245,6 +246,8 @@ static int stk_initialise(struct stk_camera *dev)
return -1;
}
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+
/* sysfs functions */
/*FIXME cleanup this */
@@ -350,6 +353,10 @@ static void stk_remove_sysfs_files(struct video_device *vdev)
video_device_remove_file(vdev, &dev_attr_vflip);
}
+#else
+#define stk_create_sysfs_files(a)
+#define stk_remove_sysfs_files(a)
+#endif
/* *********************************************** */
/*
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index e57a6460577..8f0100f67a9 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -885,12 +885,19 @@ static int __exit tcm825x_remove(struct i2c_client *client)
return 0;
}
+static const struct i2c_device_id tcm825x_id[] = {
+ { "tcm825x", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tcm825x_id);
+
static struct i2c_driver tcm825x_i2c_driver = {
.driver = {
.name = TCM825X_NAME,
},
.probe = tcm825x_probe,
.remove = __exit_p(tcm825x_remove),
+ .id_table = tcm825x_id,
};
static struct tcm825x_sensor tcm825x = {
diff --git a/drivers/media/video/tlv320aic23b.c b/drivers/media/video/tlv320aic23b.c
index f1db54202de..28ab9f9d760 100644
--- a/drivers/media/video/tlv320aic23b.c
+++ b/drivers/media/video/tlv320aic23b.c
@@ -168,6 +168,11 @@ static int tlv320aic23b_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
+static const struct i2c_device_id tlv320aic23b_id[] = {
+ { "tlv320aic23b", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tlv320aic23b_id);
static struct v4l2_i2c_driver_data v4l2_i2c_data = {
.name = "tlv320aic23b",
@@ -175,4 +180,5 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data = {
.command = tlv320aic23b_command,
.probe = tlv320aic23b_probe,
.remove = tlv320aic23b_remove,
+ .id_table = tlv320aic23b_id,
};
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 6bf104ea051..5a75788b92a 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -40,11 +40,11 @@
typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
if (__a) { \
__r = (int) __a(ARGS); \
+ symbol_put(FUNCTION); \
} else { \
printk(KERN_ERR "TUNER: Unable to find " \
"symbol "#FUNCTION"()\n"); \
} \
- symbol_put(FUNCTION); \
__r; \
})
@@ -340,16 +340,6 @@ static void tuner_i2c_address_check(struct tuner *t)
tuner_warn("====================== WARNING! ======================\n");
}
-static void attach_tda829x(struct tuner *t)
-{
- struct tda829x_config cfg = {
- .lna_cfg = t->config,
- .tuner_callback = t->tuner_callback,
- };
- dvb_attach(tda829x_attach,
- &t->fe, t->i2c->adapter, t->i2c->addr, &cfg);
-}
-
static struct xc5000_config xc5000_cfg;
static void set_type(struct i2c_client *c, unsigned int type,
@@ -385,12 +375,19 @@ static void set_type(struct i2c_client *c, unsigned int type,
switch (t->type) {
case TUNER_MT2032:
- dvb_attach(microtune_attach,
- &t->fe, t->i2c->adapter, t->i2c->addr);
+ if (!dvb_attach(microtune_attach,
+ &t->fe, t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
break;
case TUNER_PHILIPS_TDA8290:
{
- attach_tda829x(t);
+ struct tda829x_config cfg = {
+ .lna_cfg = t->config,
+ .tuner_callback = t->tuner_callback,
+ };
+ if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter,
+ t->i2c->addr, &cfg))
+ goto attach_failed;
break;
}
case TUNER_TEA5767:
@@ -441,8 +438,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
break;
}
case TUNER_TDA9887:
- dvb_attach(tda9887_attach,
- &t->fe, t->i2c->adapter, t->i2c->addr);
+ if (!dvb_attach(tda9887_attach,
+ &t->fe, t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
break;
case TUNER_XC5000:
{
@@ -450,10 +448,10 @@ static void set_type(struct i2c_client *c, unsigned int type,
xc5000_cfg.i2c_address = t->i2c->addr;
xc5000_cfg.if_khz = 5380;
- xc5000_cfg.priv = c->adapter->algo_data;
xc5000_cfg.tuner_callback = t->tuner_callback;
if (!dvb_attach(xc5000_attach,
- &t->fe, t->i2c->adapter, &xc5000_cfg))
+ &t->fe, t->i2c->adapter, &xc5000_cfg,
+ c->adapter->algo_data))
goto attach_failed;
xc_tuner_ops = &t->fe.ops.tuner_ops;
@@ -1167,7 +1165,7 @@ static int tuner_probe(struct i2c_client *client,
/* If chip is not tda8290, don't register.
since it can be tda9887*/
if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
- t->i2c->addr) == 0) {
+ t->i2c->addr) >= 0) {
tuner_dbg("tda829x detected\n");
} else {
/* Default is being tda9887 */
@@ -1181,7 +1179,7 @@ static int tuner_probe(struct i2c_client *client,
case 0x60:
if (tuner_symbol_probe(tea5767_autodetection,
t->i2c->adapter, t->i2c->addr)
- != EINVAL) {
+ >= 0) {
t->type = TUNER_TEA5767;
t->mode_mask = T_RADIO;
t->mode = T_STANDBY;
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index 6f9945b04e1..c77914d99d1 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -1505,7 +1505,8 @@ static int chip_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
/* fill required data structures */
- strcpy(client->name, desc->name);
+ if (!id)
+ strlcpy(client->name, desc->name, I2C_NAME_SIZE);
chip->type = desc-chiplist;
chip->shadow.count = desc->registers+1;
chip->prevmode = -1;
@@ -1830,6 +1831,15 @@ static int chip_legacy_probe(struct i2c_adapter *adap)
return 0;
}
+/* This driver supports many devices and the idea is to let the driver
+ detect which device is present. So rather than listing all supported
+ devices here, we pretend to support a single, fake device type. */
+static const struct i2c_device_id chip_id[] = {
+ { "tvaudio", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, chip_id);
+
static struct v4l2_i2c_driver_data v4l2_i2c_data = {
.name = "tvaudio",
.driverid = I2C_DRIVERID_TVAUDIO,
@@ -1837,6 +1847,7 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data = {
.probe = chip_probe,
.remove = chip_remove,
.legacy_probe = chip_legacy_probe,
+ .id_table = chip_id,
};
/*
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index 3cf8a8e801e..9da0e1807ff 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -319,10 +319,12 @@ audioIC[] =
{AUDIO_CHIP_INTERNAL, "CX25843"},
{AUDIO_CHIP_INTERNAL, "CX23418"},
{AUDIO_CHIP_INTERNAL, "CX23885"},
- /* 40-42 */
+ /* 40-44 */
{AUDIO_CHIP_INTERNAL, "CX23888"},
{AUDIO_CHIP_INTERNAL, "SAA7131"},
{AUDIO_CHIP_INTERNAL, "CX23887"},
+ {AUDIO_CHIP_INTERNAL, "SAA7164"},
+ {AUDIO_CHIP_INTERNAL, "AU8522"},
};
/* This list is supplied by Hauppauge. Thanks! */
@@ -341,8 +343,10 @@ static const char *decoderIC[] = {
"CX882", "TVP5150A", "CX25840", "CX25841", "CX25842",
/* 30-34 */
"CX25843", "CX23418", "NEC61153", "CX23885", "CX23888",
- /* 35-37 */
- "SAA7131", "CX25837", "CX23887"
+ /* 35-39 */
+ "SAA7131", "CX25837", "CX23887", "CX23885A", "CX23887A",
+ /* 40-42 */
+ "SAA7164", "CX23885B", "AU8522"
};
static int hasRadioTuner(int tunerType)
diff --git a/drivers/media/video/usbvision/Kconfig b/drivers/media/video/usbvision/Kconfig
index 74e1d3075a2..fc24ef05b3f 100644
--- a/drivers/media/video/usbvision/Kconfig
+++ b/drivers/media/video/usbvision/Kconfig
@@ -1,7 +1,7 @@
config VIDEO_USBVISION
tristate "USB video devices based on Nogatech NT1003/1004/1005"
depends on I2C && VIDEO_V4L2
- select MEDIA_TUNER
+ select VIDEO_TUNER
select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
---help---
There are more than 50 different USB video devices based on
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 30a1af857c7..fa394104339 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -47,6 +47,7 @@
* to test the HW NMI watchdog
* F## = Break at do_fork for ## iterations
* S## = Break at sys_open for ## iterations
+ * I## = Run the single step test ## iterations
*
* NOTE: that the do_fork and sys_open tests are mutually exclusive.
*
@@ -375,7 +376,7 @@ static void emul_sstep_get(char *arg)
break;
case 1:
/* set breakpoint */
- break_helper("Z0", 0, sstep_addr);
+ break_helper("Z0", NULL, sstep_addr);
break;
case 2:
/* Continue */
@@ -383,7 +384,7 @@ static void emul_sstep_get(char *arg)
break;
case 3:
/* Clear breakpoint */
- break_helper("z0", 0, sstep_addr);
+ break_helper("z0", NULL, sstep_addr);
break;
default:
eprintk("kgdbts: ERROR failed sstep get emulation\n");
@@ -465,11 +466,11 @@ static struct test_struct sw_breakpoint_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", }, /* Continue */
- { "g", "kgdbts_break_test", 0, check_and_rewind_pc },
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs },
{ "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
{ "D", "OK" }, /* Detach */
- { "D", "OK", 0, got_break }, /* If the test worked we made it here */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
@@ -499,14 +500,14 @@ static struct test_struct singlestep_break_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", }, /* Continue */
- { "g", "kgdbts_break_test", 0, check_and_rewind_pc },
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs }, /* Write registers */
{ "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
- { "g", "kgdbts_break_test", 0, check_single_step },
+ { "g", "kgdbts_break_test", NULL, check_single_step },
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", }, /* Continue */
- { "g", "kgdbts_break_test", 0, check_and_rewind_pc },
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs }, /* Write registers */
{ "D", "OK" }, /* Remove all breakpoints and continues */
{ "", "" },
@@ -520,14 +521,14 @@ static struct test_struct do_fork_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "do_fork", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", }, /* Continue */
- { "g", "do_fork", 0, check_and_rewind_pc }, /* check location */
+ { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
{ "write", "OK", write_regs }, /* Write registers */
{ "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
- { "g", "do_fork", 0, check_single_step },
+ { "g", "do_fork", NULL, check_single_step },
{ "do_fork", "OK", sw_break, }, /* set sw breakpoint */
{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
- { "D", "OK", 0, final_ack_set }, /* detach and unregister I/O */
+ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
{ "", "" },
};
@@ -538,14 +539,14 @@ static struct test_struct sys_open_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "sys_open", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", }, /* Continue */
- { "g", "sys_open", 0, check_and_rewind_pc }, /* check location */
+ { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
{ "write", "OK", write_regs }, /* Write registers */
{ "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
- { "g", "sys_open", 0, check_single_step },
+ { "g", "sys_open", NULL, check_single_step },
{ "sys_open", "OK", sw_break, }, /* set sw breakpoint */
{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
- { "D", "OK", 0, final_ack_set }, /* detach and unregister I/O */
+ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
{ "", "" },
};
@@ -556,11 +557,11 @@ static struct test_struct hw_breakpoint_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */
{ "c", "T0*", }, /* Continue */
- { "g", "kgdbts_break_test", 0, check_and_rewind_pc },
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs },
{ "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */
{ "D", "OK" }, /* Detach */
- { "D", "OK", 0, got_break }, /* If the test worked we made it here */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
@@ -570,12 +571,12 @@ static struct test_struct hw_breakpoint_test[] = {
static struct test_struct hw_write_break_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */
- { "c", "T0*", 0, got_break }, /* Continue */
- { "g", "silent", 0, check_and_rewind_pc },
+ { "c", "T0*", NULL, got_break }, /* Continue */
+ { "g", "silent", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs },
{ "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */
{ "D", "OK" }, /* Detach */
- { "D", "OK", 0, got_break }, /* If the test worked we made it here */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
@@ -585,12 +586,12 @@ static struct test_struct hw_write_break_test[] = {
static struct test_struct hw_access_break_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */
- { "c", "T0*", 0, got_break }, /* Continue */
- { "g", "silent", 0, check_and_rewind_pc },
+ { "c", "T0*", NULL, got_break }, /* Continue */
+ { "g", "silent", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs },
{ "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */
{ "D", "OK" }, /* Detach */
- { "D", "OK", 0, got_break }, /* If the test worked we made it here */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
@@ -599,9 +600,9 @@ static struct test_struct hw_access_break_test[] = {
*/
static struct test_struct nmi_sleep_test[] = {
{ "?", "S0*" }, /* Clear break points */
- { "c", "T0*", 0, got_break }, /* Continue */
+ { "c", "T0*", NULL, got_break }, /* Continue */
{ "D", "OK" }, /* Detach */
- { "D", "OK", 0, got_break }, /* If the test worked we made it here */
+ { "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
@@ -874,18 +875,23 @@ static void kgdbts_run_tests(void)
{
char *ptr;
int fork_test = 0;
- int sys_open_test = 0;
+ int do_sys_open_test = 0;
+ int sstep_test = 1000;
int nmi_sleep = 0;
+ int i;
ptr = strstr(config, "F");
if (ptr)
- fork_test = simple_strtol(ptr+1, NULL, 10);
+ fork_test = simple_strtol(ptr + 1, NULL, 10);
ptr = strstr(config, "S");
if (ptr)
- sys_open_test = simple_strtol(ptr+1, NULL, 10);
+ do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
ptr = strstr(config, "N");
if (ptr)
nmi_sleep = simple_strtol(ptr+1, NULL, 10);
+ ptr = strstr(config, "I");
+ if (ptr)
+ sstep_test = simple_strtol(ptr+1, NULL, 10);
/* required internal KGDB tests */
v1printk("kgdbts:RUN plant and detach test\n");
@@ -894,8 +900,13 @@ static void kgdbts_run_tests(void)
run_breakpoint_test(0);
v1printk("kgdbts:RUN bad memory access test\n");
run_bad_read_test();
- v1printk("kgdbts:RUN singlestep breakpoint test\n");
- run_singlestep_break_test();
+ v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test);
+ for (i = 0; i < sstep_test; i++) {
+ run_singlestep_break_test();
+ if (i % 100 == 0)
+ v1printk("kgdbts:RUN singlestep [%i/%i]\n",
+ i, sstep_test);
+ }
/* ===Optional tests=== */
@@ -922,7 +933,7 @@ static void kgdbts_run_tests(void)
repeat_test = fork_test;
printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n",
repeat_test);
- kthread_run(kgdbts_unreg_thread, 0, "kgdbts_unreg");
+ kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
run_do_fork_test();
return;
}
@@ -931,11 +942,11 @@ static void kgdbts_run_tests(void)
* executed because a kernel thread will be spawned at the very
* end to unregister the debug hooks.
*/
- if (sys_open_test) {
- repeat_test = sys_open_test;
+ if (do_sys_open_test) {
+ repeat_test = do_sys_open_test;
printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n",
repeat_test);
- kthread_run(kgdbts_unreg_thread, 0, "kgdbts_unreg");
+ kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
run_sys_open_test();
return;
}
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 5515234be86..03a87a307e3 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -157,215 +157,136 @@ struct xpc_msg {
/*
* Define the return values and values passed to user's callout functions.
* (It is important to add new value codes at the end just preceding
- * xpcUnknownReason, which must have the highest numerical value.)
+ * xpUnknownReason, which must have the highest numerical value.)
*/
-enum xpc_retval {
- xpcSuccess = 0,
+enum xp_retval {
+ xpSuccess = 0,
- xpcNotConnected, /* 1: channel is not connected */
- xpcConnected, /* 2: channel connected (opened) */
- xpcRETIRED1, /* 3: (formerly xpcDisconnected) */
+ xpNotConnected, /* 1: channel is not connected */
+ xpConnected, /* 2: channel connected (opened) */
+ xpRETIRED1, /* 3: (formerly xpDisconnected) */
- xpcMsgReceived, /* 4: message received */
- xpcMsgDelivered, /* 5: message delivered and acknowledged */
+ xpMsgReceived, /* 4: message received */
+ xpMsgDelivered, /* 5: message delivered and acknowledged */
- xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */
+ xpRETIRED2, /* 6: (formerly xpTransferFailed) */
- xpcNoWait, /* 7: operation would require wait */
- xpcRetry, /* 8: retry operation */
- xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */
- xpcInterrupted, /* 10: interrupted wait */
+ xpNoWait, /* 7: operation would require wait */
+ xpRetry, /* 8: retry operation */
+ xpTimeout, /* 9: timeout in xpc_allocate_msg_wait() */
+ xpInterrupted, /* 10: interrupted wait */
- xpcUnequalMsgSizes, /* 11: message size disparity between sides */
- xpcInvalidAddress, /* 12: invalid address */
+ xpUnequalMsgSizes, /* 11: message size disparity between sides */
+ xpInvalidAddress, /* 12: invalid address */
- xpcNoMemory, /* 13: no memory available for XPC structures */
- xpcLackOfResources, /* 14: insufficient resources for operation */
- xpcUnregistered, /* 15: channel is not registered */
- xpcAlreadyRegistered, /* 16: channel is already registered */
+ xpNoMemory, /* 13: no memory available for XPC structures */
+ xpLackOfResources, /* 14: insufficient resources for operation */
+ xpUnregistered, /* 15: channel is not registered */
+ xpAlreadyRegistered, /* 16: channel is already registered */
- xpcPartitionDown, /* 17: remote partition is down */
- xpcNotLoaded, /* 18: XPC module is not loaded */
- xpcUnloading, /* 19: this side is unloading XPC module */
+ xpPartitionDown, /* 17: remote partition is down */
+ xpNotLoaded, /* 18: XPC module is not loaded */
+ xpUnloading, /* 19: this side is unloading XPC module */
- xpcBadMagic, /* 20: XPC MAGIC string not found */
+ xpBadMagic, /* 20: XPC MAGIC string not found */
- xpcReactivating, /* 21: remote partition was reactivated */
+ xpReactivating, /* 21: remote partition was reactivated */
- xpcUnregistering, /* 22: this side is unregistering channel */
- xpcOtherUnregistering, /* 23: other side is unregistering channel */
+ xpUnregistering, /* 22: this side is unregistering channel */
+ xpOtherUnregistering, /* 23: other side is unregistering channel */
- xpcCloneKThread, /* 24: cloning kernel thread */
- xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */
+ xpCloneKThread, /* 24: cloning kernel thread */
+ xpCloneKThreadFailed, /* 25: cloning kernel thread failed */
- xpcNoHeartbeat, /* 26: remote partition has no heartbeat */
+ xpNoHeartbeat, /* 26: remote partition has no heartbeat */
- xpcPioReadError, /* 27: PIO read error */
- xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */
+ xpPioReadError, /* 27: PIO read error */
+ xpPhysAddrRegFailed, /* 28: registration of phys addr range failed */
- xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */
- xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */
- xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */
- xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */
- xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */
- xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */
- xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */
- xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */
- xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */
- xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */
+ xpRETIRED3, /* 29: (formerly xpBteDirectoryError) */
+ xpRETIRED4, /* 30: (formerly xpBtePoisonError) */
+ xpRETIRED5, /* 31: (formerly xpBteWriteError) */
+ xpRETIRED6, /* 32: (formerly xpBteAccessError) */
+ xpRETIRED7, /* 33: (formerly xpBtePWriteError) */
+ xpRETIRED8, /* 34: (formerly xpBtePReadError) */
+ xpRETIRED9, /* 35: (formerly xpBteTimeOutError) */
+ xpRETIRED10, /* 36: (formerly xpBteXtalkError) */
+ xpRETIRED11, /* 37: (formerly xpBteNotAvailable) */
+ xpRETIRED12, /* 38: (formerly xpBteUnmappedError) */
- xpcBadVersion, /* 39: bad version number */
- xpcVarsNotSet, /* 40: the XPC variables are not set up */
- xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */
- xpcInvalidPartid, /* 42: invalid partition ID */
- xpcLocalPartid, /* 43: local partition ID */
+ xpBadVersion, /* 39: bad version number */
+ xpVarsNotSet, /* 40: the XPC variables are not set up */
+ xpNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */
+ xpInvalidPartid, /* 42: invalid partition ID */
+ xpLocalPartid, /* 43: local partition ID */
- xpcOtherGoingDown, /* 44: other side going down, reason unknown */
- xpcSystemGoingDown, /* 45: system is going down, reason unknown */
- xpcSystemHalt, /* 46: system is being halted */
- xpcSystemReboot, /* 47: system is being rebooted */
- xpcSystemPoweroff, /* 48: system is being powered off */
+ xpOtherGoingDown, /* 44: other side going down, reason unknown */
+ xpSystemGoingDown, /* 45: system is going down, reason unknown */
+ xpSystemHalt, /* 46: system is being halted */
+ xpSystemReboot, /* 47: system is being rebooted */
+ xpSystemPoweroff, /* 48: system is being powered off */
- xpcDisconnecting, /* 49: channel disconnecting (closing) */
+ xpDisconnecting, /* 49: channel disconnecting (closing) */
- xpcOpenCloseError, /* 50: channel open/close protocol error */
+ xpOpenCloseError, /* 50: channel open/close protocol error */
- xpcDisconnected, /* 51: channel disconnected (closed) */
+ xpDisconnected, /* 51: channel disconnected (closed) */
- xpcBteSh2Start, /* 52: BTE CRB timeout */
+ xpBteCopyError, /* 52: bte_copy() returned error */
- /* 53: 0x1 BTE Error Response Short */
- xpcBteSh2RspShort = xpcBteSh2Start + BTEFAIL_SH2_RESP_SHORT,
-
- /* 54: 0x2 BTE Error Response Long */
- xpcBteSh2RspLong = xpcBteSh2Start + BTEFAIL_SH2_RESP_LONG,
-
- /* 56: 0x4 BTE Error Response DSB */
- xpcBteSh2RspDSB = xpcBteSh2Start + BTEFAIL_SH2_RESP_DSP,
-
- /* 60: 0x8 BTE Error Response Access */
- xpcBteSh2RspAccess = xpcBteSh2Start + BTEFAIL_SH2_RESP_ACCESS,
-
- /* 68: 0x10 BTE Error CRB timeout */
- xpcBteSh2CRBTO = xpcBteSh2Start + BTEFAIL_SH2_CRB_TO,
-
- /* 84: 0x20 BTE Error NACK limit */
- xpcBteSh2NACKLimit = xpcBteSh2Start + BTEFAIL_SH2_NACK_LIMIT,
-
- /* 115: BTE end */
- xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL,
-
- xpcUnknownReason /* 116: unknown reason - must be last in enum */
+ xpUnknownReason /* 53: unknown reason - must be last in enum */
};
/*
- * Define the callout function types used by XPC to update the user on
- * connection activity and state changes (via the user function registered by
- * xpc_connect()) and to notify them of messages received and delivered (via
- * the user function registered by xpc_send_notify()).
- *
- * The two function types are xpc_channel_func and xpc_notify_func and
- * both share the following arguments, with the exception of "data", which
- * only xpc_channel_func has.
+ * Define the callout function type used by XPC to update the user on
+ * connection activity and state changes via the user function registered
+ * by xpc_connect().
*
* Arguments:
*
- * reason - reason code. (See following table.)
+ * reason - reason code.
* partid - partition ID associated with condition.
* ch_number - channel # associated with condition.
- * data - pointer to optional data. (See following table.)
+ * data - pointer to optional data.
* key - pointer to optional user-defined value provided as the "key"
- * argument to xpc_connect() or xpc_send_notify().
+ * argument to xpc_connect().
*
- * In the following table the "Optional Data" column applies to callouts made
- * to functions registered by xpc_connect(). A "NA" in that column indicates
- * that this reason code can be passed to functions registered by
- * xpc_send_notify() (i.e. they don't have data arguments).
+ * A reason code of xpConnected indicates that a connection has been
+ * established to the specified partition on the specified channel. The data
+ * argument indicates the max number of entries allowed in the message queue.
*
- * Also, the first three reason codes in the following table indicate
- * success, whereas the others indicate failure. When a failure reason code
- * is received, one can assume that the channel is not connected.
+ * A reason code of xpMsgReceived indicates that a XPC message arrived from
+ * the specified partition on the specified channel. The data argument
+ * specifies the address of the message's payload. The user must call
+ * xpc_received() when finished with the payload.
*
- *
- * Reason Code | Cause | Optional Data
- * =====================+================================+=====================
- * xpcConnected | connection has been established| max #of entries
- * | to the specified partition on | allowed in message
- * | the specified channel | queue
- * ---------------------+--------------------------------+---------------------
- * xpcMsgReceived | an XPC message arrived from | address of payload
- * | the specified partition on the |
- * | specified channel | [the user must call
- * | | xpc_received() when
- * | | finished with the
- * | | payload]
- * ---------------------+--------------------------------+---------------------
- * xpcMsgDelivered | notification that the message | NA
- * | was delivered to the intended |
- * | recipient and that they have |
- * | acknowledged its receipt by |
- * | calling xpc_received() |
- * =====================+================================+=====================
- * xpcUnequalMsgSizes | can't connect to the specified | NULL
- * | partition on the specified |
- * | channel because of mismatched |
- * | message sizes |
- * ---------------------+--------------------------------+---------------------
- * xpcNoMemory | insufficient memory avaiable | NULL
- * | to allocate message queue |
- * ---------------------+--------------------------------+---------------------
- * xpcLackOfResources | lack of resources to create | NULL
- * | the necessary kthreads to |
- * | support the channel |
- * ---------------------+--------------------------------+---------------------
- * xpcUnregistering | this side's user has | NULL or NA
- * | unregistered by calling |
- * | xpc_disconnect() |
- * ---------------------+--------------------------------+---------------------
- * xpcOtherUnregistering| the other side's user has | NULL or NA
- * | unregistered by calling |
- * | xpc_disconnect() |
- * ---------------------+--------------------------------+---------------------
- * xpcNoHeartbeat | the other side's XPC is no | NULL or NA
- * | longer heartbeating |
- * | |
- * ---------------------+--------------------------------+---------------------
- * xpcUnloading | this side's XPC module is | NULL or NA
- * | being unloaded |
- * | |
- * ---------------------+--------------------------------+---------------------
- * xpcOtherUnloading | the other side's XPC module is | NULL or NA
- * | is being unloaded |
- * | |
- * ---------------------+--------------------------------+---------------------
- * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA
- * | error while sending an IPI |
- * | |
- * ---------------------+--------------------------------+---------------------
- * xpcInvalidAddress | the address either received or | NULL or NA
- * | sent by the specified partition|
- * | is invalid |
- * ---------------------+--------------------------------+---------------------
- * xpcBteNotAvailable | attempt to pull data from the | NULL or NA
- * xpcBtePoisonError | specified partition over the |
- * xpcBteWriteError | specified channel via a |
- * xpcBteAccessError | bte_copy() failed |
- * xpcBteTimeOutError | |
- * xpcBteXtalkError | |
- * xpcBteDirectoryError | |
- * xpcBteGenericError | |
- * xpcBteUnmappedError | |
- * ---------------------+--------------------------------+---------------------
- * xpcUnknownReason | the specified channel to the | NULL or NA
- * | specified partition was |
- * | unavailable for unknown reasons|
- * =====================+================================+=====================
+ * All other reason codes indicate failure. The data argmument is NULL.
+ * When a failure reason code is received, one can assume that the channel
+ * is not connected.
*/
-
-typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid,
+typedef void (*xpc_channel_func) (enum xp_retval reason, short partid,
int ch_number, void *data, void *key);
-typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid,
+/*
+ * Define the callout function type used by XPC to notify the user of
+ * messages received and delivered via the user function registered by
+ * xpc_send_notify().
+ *
+ * Arguments:
+ *
+ * reason - reason code.
+ * partid - partition ID associated with condition.
+ * ch_number - channel # associated with condition.
+ * key - pointer to optional user-defined value provided as the "key"
+ * argument to xpc_send_notify().
+ *
+ * A reason code of xpMsgDelivered indicates that the message was delivered
+ * to the intended recipient and that they have acknowledged its receipt by
+ * calling xpc_received().
+ *
+ * All other reason codes indicate failure.
+ */
+typedef void (*xpc_notify_func) (enum xp_retval reason, short partid,
int ch_number, void *key);
/*
@@ -401,57 +322,57 @@ struct xpc_registration {
struct xpc_interface {
void (*connect) (int);
void (*disconnect) (int);
- enum xpc_retval (*allocate) (partid_t, int, u32, void **);
- enum xpc_retval (*send) (partid_t, int, void *);
- enum xpc_retval (*send_notify) (partid_t, int, void *,
+ enum xp_retval (*allocate) (short, int, u32, void **);
+ enum xp_retval (*send) (short, int, void *);
+ enum xp_retval (*send_notify) (short, int, void *,
xpc_notify_func, void *);
- void (*received) (partid_t, int, void *);
- enum xpc_retval (*partid_to_nasids) (partid_t, void *);
+ void (*received) (short, int, void *);
+ enum xp_retval (*partid_to_nasids) (short, void *);
};
extern struct xpc_interface xpc_interface;
extern void xpc_set_interface(void (*)(int),
void (*)(int),
- enum xpc_retval (*)(partid_t, int, u32, void **),
- enum xpc_retval (*)(partid_t, int, void *),
- enum xpc_retval (*)(partid_t, int, void *,
+ enum xp_retval (*)(short, int, u32, void **),
+ enum xp_retval (*)(short, int, void *),
+ enum xp_retval (*)(short, int, void *,
xpc_notify_func, void *),
- void (*)(partid_t, int, void *),
- enum xpc_retval (*)(partid_t, void *));
+ void (*)(short, int, void *),
+ enum xp_retval (*)(short, void *));
extern void xpc_clear_interface(void);
-extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
+extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16,
u16, u32, u32);
extern void xpc_disconnect(int);
-static inline enum xpc_retval
-xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
+static inline enum xp_retval
+xpc_allocate(short partid, int ch_number, u32 flags, void **payload)
{
return xpc_interface.allocate(partid, ch_number, flags, payload);
}
-static inline enum xpc_retval
-xpc_send(partid_t partid, int ch_number, void *payload)
+static inline enum xp_retval
+xpc_send(short partid, int ch_number, void *payload)
{
return xpc_interface.send(partid, ch_number, payload);
}
-static inline enum xpc_retval
-xpc_send_notify(partid_t partid, int ch_number, void *payload,
+static inline enum xp_retval
+xpc_send_notify(short partid, int ch_number, void *payload,
xpc_notify_func func, void *key)
{
return xpc_interface.send_notify(partid, ch_number, payload, func, key);
}
static inline void
-xpc_received(partid_t partid, int ch_number, void *payload)
+xpc_received(short partid, int ch_number, void *payload)
{
return xpc_interface.received(partid, ch_number, payload);
}
-static inline enum xpc_retval
-xpc_partid_to_nasids(partid_t partid, void *nasids)
+static inline enum xp_retval
+xpc_partid_to_nasids(short partid, void *nasids)
{
return xpc_interface.partid_to_nasids(partid, nasids);
}
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 1fbf99bae96..196480b691a 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -42,21 +42,21 @@ EXPORT_SYMBOL_GPL(xpc_registrations);
/*
* Initialize the XPC interface to indicate that XPC isn't loaded.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_notloaded(void)
{
- return xpcNotLoaded;
+ return xpNotLoaded;
}
struct xpc_interface xpc_interface = {
(void (*)(int))xpc_notloaded,
(void (*)(int))xpc_notloaded,
- (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded,
- (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded,
- (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *))
+ (enum xp_retval(*)(short, int, u32, void **))xpc_notloaded,
+ (enum xp_retval(*)(short, int, void *))xpc_notloaded,
+ (enum xp_retval(*)(short, int, void *, xpc_notify_func, void *))
xpc_notloaded,
- (void (*)(partid_t, int, void *))xpc_notloaded,
- (enum xpc_retval(*)(partid_t, void *))xpc_notloaded
+ (void (*)(short, int, void *))xpc_notloaded,
+ (enum xp_retval(*)(short, void *))xpc_notloaded
};
EXPORT_SYMBOL_GPL(xpc_interface);
@@ -66,12 +66,12 @@ EXPORT_SYMBOL_GPL(xpc_interface);
void
xpc_set_interface(void (*connect) (int),
void (*disconnect) (int),
- enum xpc_retval (*allocate) (partid_t, int, u32, void **),
- enum xpc_retval (*send) (partid_t, int, void *),
- enum xpc_retval (*send_notify) (partid_t, int, void *,
+ enum xp_retval (*allocate) (short, int, u32, void **),
+ enum xp_retval (*send) (short, int, void *),
+ enum xp_retval (*send_notify) (short, int, void *,
xpc_notify_func, void *),
- void (*received) (partid_t, int, void *),
- enum xpc_retval (*partid_to_nasids) (partid_t, void *))
+ void (*received) (short, int, void *),
+ enum xp_retval (*partid_to_nasids) (short, void *))
{
xpc_interface.connect = connect;
xpc_interface.disconnect = disconnect;
@@ -91,16 +91,16 @@ xpc_clear_interface(void)
{
xpc_interface.connect = (void (*)(int))xpc_notloaded;
xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
- xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32,
+ xpc_interface.allocate = (enum xp_retval(*)(short, int, u32,
void **))xpc_notloaded;
- xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *))
+ xpc_interface.send = (enum xp_retval(*)(short, int, void *))
xpc_notloaded;
- xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *,
+ xpc_interface.send_notify = (enum xp_retval(*)(short, int, void *,
xpc_notify_func,
void *))xpc_notloaded;
- xpc_interface.received = (void (*)(partid_t, int, void *))
+ xpc_interface.received = (void (*)(short, int, void *))
xpc_notloaded;
- xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *))
+ xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
xpc_notloaded;
}
EXPORT_SYMBOL_GPL(xpc_clear_interface);
@@ -123,13 +123,13 @@ EXPORT_SYMBOL_GPL(xpc_clear_interface);
* nentries - max #of XPC message entries a message queue can contain.
* The actual number, which is determined when a connection
* is established and may be less then requested, will be
- * passed to the user via the xpcConnected callout.
+ * passed to the user via the xpConnected callout.
* assigned_limit - max number of kthreads allowed to be processing
* messages (per connection) at any given instant.
* idle_limit - max number of kthreads allowed to be idle at any given
* instant.
*/
-enum xpc_retval
+enum xp_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
u16 nentries, u32 assigned_limit, u32 idle_limit)
{
@@ -143,12 +143,12 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
registration = &xpc_registrations[ch_number];
if (mutex_lock_interruptible(&registration->mutex) != 0)
- return xpcInterrupted;
+ return xpInterrupted;
/* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) {
mutex_unlock(&registration->mutex);
- return xpcAlreadyRegistered;
+ return xpAlreadyRegistered;
}
/* register the channel for connection */
@@ -163,7 +163,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
xpc_interface.connect(ch_number);
- return xpcSuccess;
+ return xpSuccess;
}
EXPORT_SYMBOL_GPL(xpc_connect);
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 9eb6d4a3269..11ac267ed68 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -172,13 +172,13 @@ struct xpc_vars {
(_version >= _XPC_VERSION(3, 1))
static inline int
-xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
+xpc_hb_allowed(short partid, struct xpc_vars *vars)
{
return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
}
static inline void
-xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
+xpc_allow_hb(short partid, struct xpc_vars *vars)
{
u64 old_mask, new_mask;
@@ -190,7 +190,7 @@ xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
}
static inline void
-xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
+xpc_disallow_hb(short partid, struct xpc_vars *vars)
{
u64 old_mask, new_mask;
@@ -408,11 +408,11 @@ struct xpc_notify {
* messages.
*/
struct xpc_channel {
- partid_t partid; /* ID of remote partition connected */
+ short partid; /* ID of remote partition connected */
spinlock_t lock; /* lock for updating this structure */
u32 flags; /* general flags */
- enum xpc_retval reason; /* reason why channel is disconnect'g */
+ enum xp_retval reason; /* reason why channel is disconnect'g */
int reason_line; /* line# disconnect initiated from */
u16 number; /* channel # */
@@ -522,7 +522,7 @@ struct xpc_partition {
spinlock_t act_lock; /* protect updating of act_state */
u8 act_state; /* from XPC HB viewpoint */
u8 remote_vars_version; /* version# of partition's vars */
- enum xpc_retval reason; /* reason partition is deactivating */
+ enum xp_retval reason; /* reason partition is deactivating */
int reason_line; /* line# deactivation initiated from */
int reactivate_nasid; /* nasid in partition to reactivate */
@@ -615,7 +615,7 @@ struct xpc_partition {
/* interval in seconds to print 'waiting disengagement' messages */
#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
-#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
+#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0]))
/* found in xp_main.c */
extern struct xpc_registration xpc_registrations[];
@@ -646,31 +646,31 @@ extern void xpc_allow_IPI_ops(void);
extern void xpc_restrict_IPI_ops(void);
extern int xpc_identify_act_IRQ_sender(void);
extern int xpc_partition_disengaged(struct xpc_partition *);
-extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
+extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
extern void xpc_mark_partition_inactive(struct xpc_partition *);
extern void xpc_discovery(void);
extern void xpc_check_remote_hb(void);
extern void xpc_deactivate_partition(const int, struct xpc_partition *,
- enum xpc_retval);
-extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
+ enum xp_retval);
+extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *);
/* found in xpc_channel.c */
extern void xpc_initiate_connect(int);
extern void xpc_initiate_disconnect(int);
-extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
-extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
-extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
- xpc_notify_func, void *);
-extern void xpc_initiate_received(partid_t, int, void *);
-extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
-extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
+extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **);
+extern enum xp_retval xpc_initiate_send(short, int, void *);
+extern enum xp_retval xpc_initiate_send_notify(short, int, void *,
+ xpc_notify_func, void *);
+extern void xpc_initiate_received(short, int, void *);
+extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *);
+extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *);
extern void xpc_process_channel_activity(struct xpc_partition *);
extern void xpc_connected_callout(struct xpc_channel *);
extern void xpc_deliver_msg(struct xpc_channel *);
extern void xpc_disconnect_channel(const int, struct xpc_channel *,
- enum xpc_retval, unsigned long *);
-extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
-extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
+ enum xp_retval, unsigned long *);
+extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval);
+extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval);
extern void xpc_teardown_infrastructure(struct xpc_partition *);
static inline void
@@ -901,7 +901,7 @@ xpc_IPI_receive(AMO_t *amo)
return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
}
-static inline enum xpc_retval
+static inline enum xp_retval
xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
{
int ret = 0;
@@ -923,7 +923,7 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
local_irq_restore(irq_flags);
- return ((ret == 0) ? xpcSuccess : xpcPioReadError);
+ return ((ret == 0) ? xpSuccess : xpPioReadError);
}
/*
@@ -992,7 +992,7 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
unsigned long *irq_flags)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
- enum xpc_retval ret;
+ enum xp_retval ret;
if (likely(part->act_state != XPC_P_DEACTIVATING)) {
ret = xpc_IPI_send(part->remote_IPI_amo_va,
@@ -1001,7 +1001,7 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY);
dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
ipi_flag_string, ch->partid, ch->number, ret);
- if (unlikely(ret != xpcSuccess)) {
+ if (unlikely(ret != xpSuccess)) {
if (irq_flags != NULL)
spin_unlock_irqrestore(&ch->lock, *irq_flags);
XPC_DEACTIVATE_PARTITION(part, ret);
@@ -1123,41 +1123,10 @@ xpc_IPI_init(int index)
return amo;
}
-static inline enum xpc_retval
+static inline enum xp_retval
xpc_map_bte_errors(bte_result_t error)
{
- if (error == BTE_SUCCESS)
- return xpcSuccess;
-
- if (is_shub2()) {
- if (BTE_VALID_SH2_ERROR(error))
- return xpcBteSh2Start + error;
- return xpcBteUnmappedError;
- }
- switch (error) {
- case BTE_SUCCESS:
- return xpcSuccess;
- case BTEFAIL_DIR:
- return xpcBteDirectoryError;
- case BTEFAIL_POISON:
- return xpcBtePoisonError;
- case BTEFAIL_WERR:
- return xpcBteWriteError;
- case BTEFAIL_ACCESS:
- return xpcBteAccessError;
- case BTEFAIL_PWERR:
- return xpcBtePWriteError;
- case BTEFAIL_PRERR:
- return xpcBtePReadError;
- case BTEFAIL_TOUT:
- return xpcBteTimeOutError;
- case BTEFAIL_XTERR:
- return xpcBteXtalkError;
- case BTEFAIL_NOTAVAIL:
- return xpcBteNotAvailable;
- default:
- return xpcBteUnmappedError;
- }
+ return ((error == BTE_SUCCESS) ? xpSuccess : xpBteCopyError);
}
/*
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index bfcb9ea968e..9c90c2d55c0 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -53,7 +53,7 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
* Set up the initial values for the XPartition Communication channels.
*/
static void
-xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
+xpc_initialize_channels(struct xpc_partition *part, short partid)
{
int ch_number;
struct xpc_channel *ch;
@@ -90,12 +90,12 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
* Setup the infrastructure necessary to support XPartition Communication
* between the specified remote partition and the local one.
*/
-enum xpc_retval
+enum xp_retval
xpc_setup_infrastructure(struct xpc_partition *part)
{
int ret, cpuid;
struct timer_list *timer;
- partid_t partid = XPC_PARTID(part);
+ short partid = XPC_PARTID(part);
/*
* Zero out MOST of the entry for this partition. Only the fields
@@ -114,7 +114,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
GFP_KERNEL);
if (part->channels == NULL) {
dev_err(xpc_chan, "can't get memory for channels\n");
- return xpcNoMemory;
+ return xpNoMemory;
}
part->nchannels = XPC_NCHANNELS;
@@ -129,7 +129,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
part->channels = NULL;
dev_err(xpc_chan, "can't get memory for local get/put "
"values\n");
- return xpcNoMemory;
+ return xpNoMemory;
}
part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
@@ -143,7 +143,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
- return xpcNoMemory;
+ return xpNoMemory;
}
/* allocate all the required open and close args */
@@ -159,7 +159,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
- return xpcNoMemory;
+ return xpNoMemory;
}
part->remote_openclose_args =
@@ -175,7 +175,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
- return xpcNoMemory;
+ return xpNoMemory;
}
xpc_initialize_channels(part, partid);
@@ -209,7 +209,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
part->local_GPs = NULL;
kfree(part->channels);
part->channels = NULL;
- return xpcLackOfResources;
+ return xpLackOfResources;
}
/* Setup a timer to check for dropped IPIs */
@@ -243,7 +243,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
xpc_vars_part[partid].nchannels = part->nchannels;
xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
- return xpcSuccess;
+ return xpSuccess;
}
/*
@@ -254,7 +254,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
* dst must be a cacheline aligned virtual address on this partition.
* cnt must be an cacheline sized
*/
-static enum xpc_retval
+static enum xp_retval
xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
const void *src, size_t cnt)
{
@@ -270,7 +270,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt,
(BTE_NORMAL | BTE_WACQUIRE), NULL);
if (bte_ret == BTE_SUCCESS)
- return xpcSuccess;
+ return xpSuccess;
dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
XPC_PARTID(part), bte_ret);
@@ -282,7 +282,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
* Pull the remote per partition specific variables from the specified
* partition.
*/
-enum xpc_retval
+enum xp_retval
xpc_pull_remote_vars_part(struct xpc_partition *part)
{
u8 buffer[L1_CACHE_BYTES * 2];
@@ -290,8 +290,8 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
(struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer);
struct xpc_vars_part *pulled_entry;
u64 remote_entry_cacheline_pa, remote_entry_pa;
- partid_t partid = XPC_PARTID(part);
- enum xpc_retval ret;
+ short partid = XPC_PARTID(part);
+ enum xp_retval ret;
/* pull the cacheline that contains the variables we're interested in */
@@ -311,7 +311,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
(void *)remote_entry_cacheline_pa,
L1_CACHE_BYTES);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
"partition %d, ret=%d\n", partid, ret);
return ret;
@@ -326,11 +326,11 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
"partition %d has bad magic value (=0x%lx)\n",
partid, sn_partition_id, pulled_entry->magic);
- return xpcBadMagic;
+ return xpBadMagic;
}
/* they've not been initialized yet */
- return xpcRetry;
+ return xpRetry;
}
if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
@@ -344,7 +344,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
dev_err(xpc_chan, "partition %d's XPC vars_part for "
"partition %d are not valid\n", partid,
sn_partition_id);
- return xpcInvalidAddress;
+ return xpInvalidAddress;
}
/* the variables we imported look to be valid */
@@ -366,9 +366,9 @@ xpc_pull_remote_vars_part(struct xpc_partition *part)
}
if (pulled_entry->magic == XPC_VP_MAGIC1)
- return xpcRetry;
+ return xpRetry;
- return xpcSuccess;
+ return xpSuccess;
}
/*
@@ -379,7 +379,7 @@ xpc_get_IPI_flags(struct xpc_partition *part)
{
unsigned long irq_flags;
u64 IPI_amo;
- enum xpc_retval ret;
+ enum xp_retval ret;
/*
* See if there are any IPI flags to be handled.
@@ -398,7 +398,7 @@ xpc_get_IPI_flags(struct xpc_partition *part)
(void *)part->
remote_openclose_args_pa,
XPC_OPENCLOSE_ARGS_SIZE);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret);
dev_dbg(xpc_chan, "failed to pull openclose args from "
@@ -414,7 +414,7 @@ xpc_get_IPI_flags(struct xpc_partition *part)
ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
(void *)part->remote_GPs_pa,
XPC_GP_SIZE);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
XPC_DEACTIVATE_PARTITION(part, ret);
dev_dbg(xpc_chan, "failed to pull GPs from partition "
@@ -431,7 +431,7 @@ xpc_get_IPI_flags(struct xpc_partition *part)
/*
* Allocate the local message queue and the notify queue.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_local_msgqueue(struct xpc_channel *ch)
{
unsigned long irq_flags;
@@ -464,18 +464,18 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch)
ch->local_nentries = nentries;
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
+ return xpSuccess;
}
dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
"queue, partid=%d, channel=%d\n", ch->partid, ch->number);
- return xpcNoMemory;
+ return xpNoMemory;
}
/*
* Allocate the cached remote message queue.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
{
unsigned long irq_flags;
@@ -502,12 +502,12 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
ch->remote_nentries = nentries;
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
+ return xpSuccess;
}
dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
- return xpcNoMemory;
+ return xpNoMemory;
}
/*
@@ -515,20 +515,20 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
*
* Note: Assumes all of the channel sizes are filled in.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_msgqueues(struct xpc_channel *ch)
{
unsigned long irq_flags;
- enum xpc_retval ret;
+ enum xp_retval ret;
DBUG_ON(ch->flags & XPC_C_SETUP);
ret = xpc_allocate_local_msgqueue(ch);
- if (ret != xpcSuccess)
+ if (ret != xpSuccess)
return ret;
ret = xpc_allocate_remote_msgqueue(ch);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
kfree(ch->local_msgqueue_base);
ch->local_msgqueue = NULL;
kfree(ch->notify_queue);
@@ -540,7 +540,7 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
ch->flags |= XPC_C_SETUP;
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
+ return xpSuccess;
}
/*
@@ -552,7 +552,7 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
static void
xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
- enum xpc_retval ret;
+ enum xp_retval ret;
DBUG_ON(!spin_is_locked(&ch->lock));
@@ -568,7 +568,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
ret = xpc_allocate_msgqueues(ch);
spin_lock_irqsave(&ch->lock, *irq_flags);
- if (ret != xpcSuccess)
+ if (ret != xpSuccess)
XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
@@ -603,7 +603,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
* Notify those who wanted to be notified upon delivery of their message.
*/
static void
-xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
+xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put)
{
struct xpc_notify *notify;
u8 notify_type;
@@ -748,7 +748,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
spin_unlock_irqrestore(&ch->lock, *irq_flags);
- xpc_disconnect_callout(ch, xpcDisconnected);
+ xpc_disconnect_callout(ch, xpDisconnected);
spin_lock_irqsave(&ch->lock, *irq_flags);
}
@@ -791,7 +791,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
struct xpc_openclose_args *args =
&part->remote_openclose_args[ch_number];
struct xpc_channel *ch = &part->channels[ch_number];
- enum xpc_retval reason;
+ enum xp_retval reason;
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -871,10 +871,10 @@ again:
if (!(ch->flags & XPC_C_DISCONNECTING)) {
reason = args->reason;
- if (reason <= xpcSuccess || reason > xpcUnknownReason)
- reason = xpcUnknownReason;
- else if (reason == xpcUnregistering)
- reason = xpcOtherUnregistering;
+ if (reason <= xpSuccess || reason > xpUnknownReason)
+ reason = xpUnknownReason;
+ else if (reason == xpUnregistering)
+ reason = xpOtherUnregistering;
XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
@@ -961,7 +961,7 @@ again:
if (ch->flags & XPC_C_OPENREQUEST) {
if (args->msg_size != ch->msg_size) {
- XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
+ XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
@@ -991,7 +991,7 @@ again:
return;
}
if (!(ch->flags & XPC_C_OPENREQUEST)) {
- XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError,
+ XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
@@ -1042,18 +1042,18 @@ again:
/*
* Attempt to establish a channel connection to a remote partition.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_connect_channel(struct xpc_channel *ch)
{
unsigned long irq_flags;
struct xpc_registration *registration = &xpc_registrations[ch->number];
if (mutex_trylock(&registration->mutex) == 0)
- return xpcRetry;
+ return xpRetry;
if (!XPC_CHANNEL_REGISTERED(ch->number)) {
mutex_unlock(&registration->mutex);
- return xpcUnregistered;
+ return xpUnregistered;
}
spin_lock_irqsave(&ch->lock, irq_flags);
@@ -1095,10 +1095,10 @@ xpc_connect_channel(struct xpc_channel *ch)
* the channel lock as needed.
*/
mutex_unlock(&registration->mutex);
- XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
+ XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcUnequalMsgSizes;
+ return xpUnequalMsgSizes;
}
} else {
ch->msg_size = registration->msg_size;
@@ -1120,7 +1120,7 @@ xpc_connect_channel(struct xpc_channel *ch)
spin_unlock_irqrestore(&ch->lock, irq_flags);
- return xpcSuccess;
+ return xpSuccess;
}
/*
@@ -1203,7 +1203,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
* Notify senders that messages sent have been
* received and delivered by the other side.
*/
- xpc_notify_senders(ch, xpcMsgDelivered,
+ xpc_notify_senders(ch, xpMsgDelivered,
ch->remote_GP.get);
}
@@ -1335,7 +1335,7 @@ xpc_process_channel_activity(struct xpc_partition *part)
* at the same time.
*/
void
-xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
+xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
{
unsigned long irq_flags;
int ch_number;
@@ -1375,7 +1375,7 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason)
void
xpc_teardown_infrastructure(struct xpc_partition *part)
{
- partid_t partid = XPC_PARTID(part);
+ short partid = XPC_PARTID(part);
/*
* We start off by making this partition inaccessible to local
@@ -1428,7 +1428,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part)
void
xpc_initiate_connect(int ch_number)
{
- partid_t partid;
+ short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
@@ -1456,13 +1456,13 @@ xpc_connected_callout(struct xpc_channel *ch)
/* let the registerer know that a connection has been established */
if (ch->func != NULL) {
- dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
+ dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
- ch->func(xpcConnected, ch->partid, ch->number,
+ ch->func(xpConnected, ch->partid, ch->number,
(void *)(u64)ch->local_nentries, ch->key);
- dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
+ dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
"partid=%d, channel=%d\n", ch->partid, ch->number);
}
}
@@ -1484,7 +1484,7 @@ void
xpc_initiate_disconnect(int ch_number)
{
unsigned long irq_flags;
- partid_t partid;
+ short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
@@ -1503,7 +1503,7 @@ xpc_initiate_disconnect(int ch_number)
if (!(ch->flags & XPC_C_DISCONNECTED)) {
ch->flags |= XPC_C_WDISCONNECT;
- XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
+ XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
&irq_flags);
}
@@ -1528,7 +1528,7 @@ xpc_initiate_disconnect(int ch_number)
*/
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
- enum xpc_retval reason, unsigned long *irq_flags)
+ enum xp_retval reason, unsigned long *irq_flags)
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
@@ -1563,7 +1563,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
- /* start a kthread that will do the xpcDisconnecting callout */
+ /* start a kthread that will do the xpDisconnecting callout */
xpc_create_kthreads(ch, 1, 1);
}
@@ -1575,7 +1575,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
}
void
-xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
+xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
{
/*
* Let the channel's registerer know that the channel is being
@@ -1598,13 +1598,13 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
* Wait for a message entry to become available for the specified channel,
* but don't wait any longer than 1 jiffy.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
- enum xpc_retval ret;
+ enum xp_retval ret;
if (ch->flags & XPC_C_DISCONNECTING) {
- DBUG_ON(ch->reason == xpcInterrupted);
+ DBUG_ON(ch->reason == xpInterrupted);
return ch->reason;
}
@@ -1614,11 +1614,11 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
- DBUG_ON(ch->reason == xpcInterrupted);
+ DBUG_ON(ch->reason == xpInterrupted);
} else if (ret == 0) {
- ret = xpcTimeout;
+ ret = xpTimeout;
} else {
- ret = xpcInterrupted;
+ ret = xpInterrupted;
}
return ret;
@@ -1628,12 +1628,12 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
* Allocate an entry for a message from the message queue associated with the
* specified channel.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
struct xpc_msg **address_of_msg)
{
struct xpc_msg *msg;
- enum xpc_retval ret;
+ enum xp_retval ret;
s64 put;
/* this reference will be dropped in xpc_send_msg() */
@@ -1645,7 +1645,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
}
if (!(ch->flags & XPC_C_CONNECTED)) {
xpc_msgqueue_deref(ch);
- return xpcNotConnected;
+ return xpNotConnected;
}
/*
@@ -1653,7 +1653,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
* If none are available, we'll make sure that we grab the latest
* GP values.
*/
- ret = xpcTimeout;
+ ret = xpTimeout;
while (1) {
@@ -1683,16 +1683,16 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
* that will cause the IPI handler to fetch the latest
* GP values as if an IPI was sent by the other side.
*/
- if (ret == xpcTimeout)
+ if (ret == xpTimeout)
xpc_IPI_send_local_msgrequest(ch);
if (flags & XPC_NOWAIT) {
xpc_msgqueue_deref(ch);
- return xpcNoWait;
+ return xpNoWait;
}
ret = xpc_allocate_msg_wait(ch);
- if (ret != xpcInterrupted && ret != xpcTimeout) {
+ if (ret != xpInterrupted && ret != xpTimeout) {
xpc_msgqueue_deref(ch);
return ret;
}
@@ -1711,7 +1711,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
*address_of_msg = msg;
- return xpcSuccess;
+ return xpSuccess;
}
/*
@@ -1727,11 +1727,11 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
* payload - address of the allocated payload area pointer (filled in on
* return) in which the user-defined message is constructed.
*/
-enum xpc_retval
-xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
+enum xp_retval
+xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
- enum xpc_retval ret = xpcUnknownReason;
+ enum xp_retval ret = xpUnknownReason;
struct xpc_msg *msg = NULL;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -1814,11 +1814,11 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
* local message queue's Put value and sends an IPI to the partition the
* message is being sent to.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
xpc_notify_func func, void *key)
{
- enum xpc_retval ret = xpcSuccess;
+ enum xp_retval ret = xpSuccess;
struct xpc_notify *notify = notify;
s64 put, msg_number = msg->number;
@@ -1908,12 +1908,12 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
* payload - pointer to the payload area allocated via
* xpc_initiate_allocate().
*/
-enum xpc_retval
-xpc_initiate_send(partid_t partid, int ch_number, void *payload)
+enum xp_retval
+xpc_initiate_send(short partid, int ch_number, void *payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
- enum xpc_retval ret;
+ enum xp_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
partid, ch_number);
@@ -1957,13 +1957,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload)
* receipt. THIS FUNCTION MUST BE NON-BLOCKING.
* key - user-defined key to be passed to the function when it's called.
*/
-enum xpc_retval
-xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
+enum xp_retval
+xpc_initiate_send_notify(short partid, int ch_number, void *payload,
xpc_notify_func func, void *key)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
- enum xpc_retval ret;
+ enum xp_retval ret;
dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg,
partid, ch_number);
@@ -1985,7 +1985,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
struct xpc_msg *remote_msg, *msg;
u32 msg_index, nmsgs;
u64 msg_offset;
- enum xpc_retval ret;
+ enum xp_retval ret;
if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
/* we were interrupted by a signal */
@@ -2012,7 +2012,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
nmsgs * ch->msg_size);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
" msg %ld from partition %d, channel=%d, "
@@ -2112,7 +2112,7 @@ xpc_deliver_msg(struct xpc_channel *ch)
ch->number);
/* deliver the message to its intended recipient */
- ch->func(xpcMsgReceived, ch->partid, ch->number,
+ ch->func(xpMsgReceived, ch->partid, ch->number,
&msg->payload, ch->key);
dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
@@ -2203,7 +2203,7 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
* xpc_initiate_allocate().
*/
void
-xpc_initiate_received(partid_t partid, int ch_number, void *payload)
+xpc_initiate_received(short partid, int ch_number, void *payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index f673ba90eb0..08256ed0d9a 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -315,13 +315,13 @@ xpc_initiate_discovery(void *ignore)
* the XPC per partition variables from the remote partition and waiting for
* the remote partition to pull ours.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_make_first_contact(struct xpc_partition *part)
{
- enum xpc_retval ret;
+ enum xp_retval ret;
- while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
- if (ret != xpcRetry) {
+ while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) {
+ if (ret != xpRetry) {
XPC_DEACTIVATE_PARTITION(part, ret);
return ret;
}
@@ -406,7 +406,7 @@ xpc_partition_up(struct xpc_partition *part)
dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
- if (xpc_setup_infrastructure(part) != xpcSuccess)
+ if (xpc_setup_infrastructure(part) != xpSuccess)
return;
/*
@@ -418,7 +418,7 @@ xpc_partition_up(struct xpc_partition *part)
(void)xpc_part_ref(part); /* this will always succeed */
- if (xpc_make_first_contact(part) == xpcSuccess)
+ if (xpc_make_first_contact(part) == xpSuccess)
xpc_channel_mgr(part);
xpc_part_deref(part);
@@ -429,7 +429,7 @@ xpc_partition_up(struct xpc_partition *part)
static int
xpc_activating(void *__partid)
{
- partid_t partid = (u64)__partid;
+ short partid = (u64)__partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
@@ -470,7 +470,7 @@ xpc_activating(void *__partid)
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE;
- XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__);
+ XPC_SET_REASON(part, xpPhysAddrRegFailed, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
return 0;
@@ -488,7 +488,7 @@ xpc_activating(void *__partid)
xpc_disallow_hb(partid, xpc_vars);
xpc_mark_partition_inactive(part);
- if (part->reason == xpcReactivating) {
+ if (part->reason == xpReactivating) {
/* interrupting ourselves results in activating partition */
xpc_IPI_send_reactivate(part);
}
@@ -499,7 +499,7 @@ xpc_activating(void *__partid)
void
xpc_activate_partition(struct xpc_partition *part)
{
- partid_t partid = XPC_PARTID(part);
+ short partid = XPC_PARTID(part);
unsigned long irq_flags;
struct task_struct *kthread;
@@ -508,7 +508,7 @@ xpc_activate_partition(struct xpc_partition *part)
DBUG_ON(part->act_state != XPC_P_INACTIVE);
part->act_state = XPC_P_ACTIVATION_REQ;
- XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
+ XPC_SET_REASON(part, xpCloneKThread, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -517,7 +517,7 @@ xpc_activate_partition(struct xpc_partition *part)
if (IS_ERR(kthread)) {
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE;
- XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
+ XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
}
}
@@ -541,7 +541,7 @@ xpc_activate_partition(struct xpc_partition *part)
irqreturn_t
xpc_notify_IRQ_handler(int irq, void *dev_id)
{
- partid_t partid = (partid_t) (u64)dev_id;
+ short partid = (short)(u64)dev_id;
struct xpc_partition *part = &xpc_partitions[partid];
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
@@ -643,7 +643,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
static int
xpc_kthread_start(void *args)
{
- partid_t partid = XPC_UNPACK_ARG1(args);
+ short partid = XPC_UNPACK_ARG1(args);
u16 ch_number = XPC_UNPACK_ARG2(args);
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_channel *ch;
@@ -696,7 +696,7 @@ xpc_kthread_start(void *args)
ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
spin_unlock_irqrestore(&ch->lock, irq_flags);
- xpc_disconnect_callout(ch, xpcDisconnecting);
+ xpc_disconnect_callout(ch, xpDisconnecting);
spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
@@ -776,7 +776,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
* then we'll deadlock if all other kthreads assigned
* to this channel are blocked in the channel's
* registerer, because the only thing that will unblock
- * them is the xpcDisconnecting callout that this
+ * them is the xpDisconnecting callout that this
* failed kthread_run() would have made.
*/
@@ -796,7 +796,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
* to function.
*/
spin_lock_irqsave(&ch->lock, irq_flags);
- XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
+ XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
}
@@ -809,7 +809,7 @@ void
xpc_disconnect_wait(int ch_number)
{
unsigned long irq_flags;
- partid_t partid;
+ short partid;
struct xpc_partition *part;
struct xpc_channel *ch;
int wakeup_channel_mgr;
@@ -857,9 +857,9 @@ xpc_disconnect_wait(int ch_number)
}
static void
-xpc_do_exit(enum xpc_retval reason)
+xpc_do_exit(enum xp_retval reason)
{
- partid_t partid;
+ short partid;
int active_part_count, printed_waiting_msg = 0;
struct xpc_partition *part;
unsigned long printmsg_time, disengage_request_timeout = 0;
@@ -955,7 +955,7 @@ xpc_do_exit(enum xpc_retval reason)
del_timer_sync(&xpc_hb_timer);
DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
- if (reason == xpcUnloading) {
+ if (reason == xpUnloading) {
/* take ourselves off of the reboot_notifier_list */
(void)unregister_reboot_notifier(&xpc_reboot_notifier);
@@ -981,20 +981,20 @@ xpc_do_exit(enum xpc_retval reason)
static int
xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
{
- enum xpc_retval reason;
+ enum xp_retval reason;
switch (event) {
case SYS_RESTART:
- reason = xpcSystemReboot;
+ reason = xpSystemReboot;
break;
case SYS_HALT:
- reason = xpcSystemHalt;
+ reason = xpSystemHalt;
break;
case SYS_POWER_OFF:
- reason = xpcSystemPoweroff;
+ reason = xpSystemPoweroff;
break;
default:
- reason = xpcSystemGoingDown;
+ reason = xpSystemGoingDown;
}
xpc_do_exit(reason);
@@ -1008,7 +1008,7 @@ static void
xpc_die_disengage(void)
{
struct xpc_partition *part;
- partid_t partid;
+ short partid;
unsigned long engaged;
long time, printmsg_time, disengage_request_timeout;
@@ -1124,7 +1124,7 @@ int __init
xpc_init(void)
{
int ret;
- partid_t partid;
+ short partid;
struct xpc_partition *part;
struct task_struct *kthread;
size_t buf_size;
@@ -1279,7 +1279,7 @@ xpc_init(void)
/* mark this new thread as a non-starter */
complete(&xpc_discovery_exited);
- xpc_do_exit(xpcUnloading);
+ xpc_do_exit(xpUnloading);
return -EBUSY;
}
@@ -1297,7 +1297,7 @@ module_init(xpc_init);
void __exit
xpc_exit(void)
{
- xpc_do_exit(xpcUnloading);
+ xpc_do_exit(xpUnloading);
}
module_exit(xpc_exit);
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index acd3fd4285d..7dd4b5812c4 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -403,7 +403,7 @@ xpc_check_remote_hb(void)
{
struct xpc_vars *remote_vars;
struct xpc_partition *part;
- partid_t partid;
+ short partid;
bte_result_t bres;
remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
@@ -444,7 +444,7 @@ xpc_check_remote_hb(void)
(remote_vars->heartbeat_offline == 0)) ||
!xpc_hb_allowed(sn_partition_id, remote_vars)) {
- XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
+ XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat);
continue;
}
@@ -459,7 +459,7 @@ xpc_check_remote_hb(void)
* is large enough to contain a copy of their reserved page header and
* part_nasids mask.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
{
@@ -469,7 +469,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
if (*remote_rp_pa == 0)
- return xpcNoRsvdPageAddr;
+ return xpNoRsvdPageAddr;
/* pull over the reserved page header and part_nasids mask */
bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp,
@@ -489,18 +489,18 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
if (remote_rp->partid < 1 ||
remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
- return xpcInvalidPartid;
+ return xpInvalidPartid;
}
if (remote_rp->partid == sn_partition_id)
- return xpcLocalPartid;
+ return xpLocalPartid;
if (XPC_VERSION_MAJOR(remote_rp->version) !=
XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
- return xpcBadVersion;
+ return xpBadVersion;
}
- return xpcSuccess;
+ return xpSuccess;
}
/*
@@ -509,13 +509,13 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
* remote_vars points to a buffer that is cacheline aligned for BTE copies and
* assumed to be of size XPC_RP_VARS_SIZE.
*/
-static enum xpc_retval
+static enum xp_retval
xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
{
int bres;
if (remote_vars_pa == 0)
- return xpcVarsNotSet;
+ return xpVarsNotSet;
/* pull over the cross partition variables */
bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE,
@@ -525,10 +525,10 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
if (XPC_VERSION_MAJOR(remote_vars->version) !=
XPC_VERSION_MAJOR(XPC_V_VERSION)) {
- return xpcBadVersion;
+ return xpBadVersion;
}
- return xpcSuccess;
+ return xpSuccess;
}
/*
@@ -604,16 +604,16 @@ xpc_identify_act_IRQ_req(int nasid)
int reactivate = 0;
int stamp_diff;
struct timespec remote_rp_stamp = { 0, 0 };
- partid_t partid;
+ short partid;
struct xpc_partition *part;
- enum xpc_retval ret;
+ enum xp_retval ret;
/* pull over the reserved page structure */
remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer;
ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret);
return;
@@ -632,7 +632,7 @@ xpc_identify_act_IRQ_req(int nasid)
remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer;
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
"which sent interrupt, reason=%d\n", nasid, ret);
@@ -699,7 +699,7 @@ xpc_identify_act_IRQ_req(int nasid)
&remote_rp_stamp, remote_rp_pa,
remote_vars_pa, remote_vars);
part->reactivate_nasid = nasid;
- XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
+ XPC_DEACTIVATE_PARTITION(part, xpReactivating);
return;
}
@@ -754,11 +754,11 @@ xpc_identify_act_IRQ_req(int nasid)
if (reactivate) {
part->reactivate_nasid = nasid;
- XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
+ XPC_DEACTIVATE_PARTITION(part, xpReactivating);
} else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
xpc_partition_disengage_requested(1UL << partid)) {
- XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
+ XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
}
}
@@ -825,7 +825,7 @@ xpc_identify_act_IRQ_sender(void)
int
xpc_partition_disengaged(struct xpc_partition *part)
{
- partid_t partid = XPC_PARTID(part);
+ short partid = XPC_PARTID(part);
int disengaged;
disengaged = (xpc_partition_engaged(1UL << partid) == 0);
@@ -870,20 +870,20 @@ xpc_partition_disengaged(struct xpc_partition *part)
/*
* Mark specified partition as active.
*/
-enum xpc_retval
+enum xp_retval
xpc_mark_partition_active(struct xpc_partition *part)
{
unsigned long irq_flags;
- enum xpc_retval ret;
+ enum xp_retval ret;
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_ACTIVATING) {
part->act_state = XPC_P_ACTIVE;
- ret = xpcSuccess;
+ ret = xpSuccess;
} else {
- DBUG_ON(part->reason == xpcSuccess);
+ DBUG_ON(part->reason == xpSuccess);
ret = part->reason;
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -896,7 +896,7 @@ xpc_mark_partition_active(struct xpc_partition *part)
*/
void
xpc_deactivate_partition(const int line, struct xpc_partition *part,
- enum xpc_retval reason)
+ enum xp_retval reason)
{
unsigned long irq_flags;
@@ -905,15 +905,15 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
if (part->act_state == XPC_P_INACTIVE) {
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
- if (reason == xpcReactivating) {
+ if (reason == xpReactivating) {
/* we interrupt ourselves to reactivate partition */
xpc_IPI_send_reactivate(part);
}
return;
}
if (part->act_state == XPC_P_DEACTIVATING) {
- if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
- reason == xpcReactivating) {
+ if ((part->reason == xpUnloading && reason != xpUnloading) ||
+ reason == xpReactivating) {
XPC_SET_REASON(part, reason, line);
}
spin_unlock_irqrestore(&part->act_lock, irq_flags);
@@ -982,10 +982,10 @@ xpc_discovery(void)
int max_regions;
int nasid;
struct xpc_rsvd_page *rp;
- partid_t partid;
+ short partid;
struct xpc_partition *part;
u64 *discovered_nasids;
- enum xpc_retval ret;
+ enum xp_retval ret;
remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
xp_nasid_mask_bytes,
@@ -1063,12 +1063,12 @@ xpc_discovery(void)
ret = xpc_get_remote_rp(nasid, discovered_nasids,
remote_rp, &remote_rp_pa);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get reserved page "
"from nasid %d, reason=%d\n", nasid,
ret);
- if (ret == xpcLocalPartid)
+ if (ret == xpLocalPartid)
break;
continue;
@@ -1082,7 +1082,7 @@ xpc_discovery(void)
/* pull over the cross partition variables */
ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_dbg(xpc_part, "unable to get XPC variables "
"from nasid %d, reason=%d\n", nasid,
ret);
@@ -1116,7 +1116,7 @@ xpc_discovery(void)
"register xp_addr region 0x%016lx\n",
partid, remote_vars->amos_page_pa);
- XPC_SET_REASON(part, xpcPhysAddrRegFailed,
+ XPC_SET_REASON(part, xpPhysAddrRegFailed,
__LINE__);
break;
}
@@ -1151,8 +1151,8 @@ xpc_discovery(void)
* Given a partid, get the nasids owned by that partition from the
* remote partition's reserved page.
*/
-enum xpc_retval
-xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
+enum xp_retval
+xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
{
struct xpc_partition *part;
u64 part_nasid_pa;
@@ -1160,7 +1160,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
part = &xpc_partitions[partid];
if (part->remote_rp_pa == 0)
- return xpcPartitionDown;
+ return xpPartitionDown;
memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index a9543c65814..822dc8e8d7f 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -166,7 +166,7 @@ struct device *xpnet = &xpnet_dbg_subname;
* Packet was recevied by XPC and forwarded to us.
*/
static void
-xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
+xpnet_receive(short partid, int channel, struct xpnet_message *msg)
{
struct sk_buff *skb;
bte_result_t bret;
@@ -282,7 +282,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
* state or message reception on a connection.
*/
static void
-xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
+xpnet_connection_activity(enum xp_retval reason, short partid, int channel,
void *data, void *key)
{
long bp;
@@ -291,13 +291,13 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
DBUG_ON(channel != XPC_NET_CHANNEL);
switch (reason) {
- case xpcMsgReceived: /* message received */
+ case xpMsgReceived: /* message received */
DBUG_ON(data == NULL);
xpnet_receive(partid, channel, (struct xpnet_message *)data);
break;
- case xpcConnected: /* connection completed to a partition */
+ case xpConnected: /* connection completed to a partition */
spin_lock_bh(&xpnet_broadcast_lock);
xpnet_broadcast_partitions |= 1UL << (partid - 1);
bp = xpnet_broadcast_partitions;
@@ -330,7 +330,7 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
static int
xpnet_dev_open(struct net_device *dev)
{
- enum xpc_retval ret;
+ enum xp_retval ret;
dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, "
"%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
@@ -340,7 +340,7 @@ xpnet_dev_open(struct net_device *dev)
ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
- if (ret != xpcSuccess) {
+ if (ret != xpSuccess) {
dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
"ret=%d\n", dev->name, ret);
@@ -407,7 +407,7 @@ xpnet_dev_get_stats(struct net_device *dev)
* release the skb and then release our pending message structure.
*/
static void
-xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
+xpnet_send_completed(enum xp_retval reason, short partid, int channel,
void *__qm)
{
struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm;
@@ -439,12 +439,12 @@ static int
xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xpnet_pending_msg *queued_msg;
- enum xpc_retval ret;
+ enum xp_retval ret;
struct xpnet_message *msg;
u64 start_addr, end_addr;
long dp;
u8 second_mac_octet;
- partid_t dest_partid;
+ short dest_partid;
struct xpnet_dev_private *priv;
u16 embedded_bytes;
@@ -528,7 +528,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
XPC_NOWAIT, (void **)&msg);
- if (unlikely(ret != xpcSuccess))
+ if (unlikely(ret != xpSuccess))
continue;
msg->embedded_bytes = embedded_bytes;
@@ -557,7 +557,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
xpnet_send_completed, queued_msg);
- if (unlikely(ret != xpcSuccess)) {
+ if (unlikely(ret != xpSuccess)) {
atomic_dec(&queued_msg->use_count);
continue;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 626ac083f4e..da5fecad74d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -425,7 +425,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cclk = host->mclk;
} else {
clk = host->mclk / (2 * ios->clock) - 1;
- if (clk > 256)
+ if (clk >= 256)
clk = 255;
host->cclk = host->mclk / (2 * (clk + 1));
}
@@ -512,6 +512,18 @@ static int mmci_probe(struct amba_device *dev, void *id)
host->plat = plat;
host->mclk = clk_get_rate(host->clk);
+ /*
+ * According to the spec, mclk is max 100 MHz,
+ * so we try to adjust the clock down to this,
+ * (if possible).
+ */
+ if (host->mclk > 100000000) {
+ ret = clk_set_rate(host->clk, 100000000);
+ if (ret < 0)
+ goto clk_disable;
+ host->mclk = clk_get_rate(host->clk);
+ DBG(host, "eventual mclk rate: %u Hz\n", host->mclk);
+ }
host->mmc = mmc;
host->base = ioremap(dev->res.start, SZ_4K);
if (!host->base) {
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7fb02e177a3..299118de893 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -187,7 +187,7 @@ struct sdhci_host {
struct mmc_request *mrq; /* Current request */
struct mmc_command *cmd; /* Current command */
struct mmc_data *data; /* Current data request */
- int data_early:1; /* Data finished before cmd */
+ unsigned int data_early:1; /* Data finished before cmd */
struct scatterlist *cur_sg; /* We're working on this */
int num_sg; /* Entries left */
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 1bd69aa9e22..17bc87a43ff 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -374,7 +374,7 @@ config MTD_REDWOOD
config MTD_SOLUTIONENGINE
tristate "CFI Flash device mapped on Hitachi SolutionEngine"
- depends on SUPERH && MTD_CFI && MTD_REDBOOT_PARTS
+ depends on SUPERH && SOLUTION_ENGINE && MTD_CFI && MTD_REDBOOT_PARTS
help
This enables access to the flash chips on the Hitachi SolutionEngine and
similar boards. Say 'Y' if you are building a kernel for such a board.
@@ -480,13 +480,6 @@ config MTD_H720X
This enables access to the flash chips on the Hynix evaluation boards.
If you have such a board, say 'Y'.
-config MTD_MPC1211
- tristate "CFI Flash device mapped on Interface MPC-1211"
- depends on SH_MPC1211 && MTD_CFI
- help
- This enables access to the flash chips on the Interface MPC-1211(CTP/PCI/MPC-SH02).
- If you have such a board, say 'Y'.
-
config MTD_OMAP_NOR
tristate "TI OMAP board mappings"
depends on MTD_CFI && ARCH_OMAP
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index a9cbe80f99a..957fb5f70f5 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -58,7 +58,6 @@ obj-$(CONFIG_MTD_WALNUT) += walnut.o
obj-$(CONFIG_MTD_H720X) += h720x-flash.o
obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
obj-$(CONFIG_MTD_NOR_TOTO) += omap-toto-flash.o
-obj-$(CONFIG_MTD_MPC1211) += mpc1211.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
diff --git a/drivers/mtd/maps/mpc1211.c b/drivers/mtd/maps/mpc1211.c
deleted file mode 100644
index 45a00fac88a..00000000000
--- a/drivers/mtd/maps/mpc1211.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Flash on MPC-1211
- *
- * $Id: mpc1211.c,v 1.4 2004/09/16 23:27:13 gleixner Exp $
- *
- * (C) 2002 Interface, Saito.K & Jeanne
- *
- * GPL'd
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-static struct mtd_info *flash_mtd;
-static struct mtd_partition *parsed_parts;
-
-struct map_info mpc1211_flash_map = {
- .name = "MPC-1211 FLASH",
- .size = 0x80000,
- .bankwidth = 1,
-};
-
-static struct mtd_partition mpc1211_partitions[] = {
- {
- .name = "IPL & ETH-BOOT",
- .offset = 0x00000000,
- .size = 0x10000,
- },
- {
- .name = "Flash FS",
- .offset = 0x00010000,
- .size = MTDPART_SIZ_FULL,
- }
-};
-
-static int __init init_mpc1211_maps(void)
-{
- int nr_parts;
-
- mpc1211_flash_map.phys = 0;
- mpc1211_flash_map.virt = (void __iomem *)P2SEGADDR(0);
-
- simple_map_init(&mpc1211_flash_map);
-
- printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
- flash_mtd = do_map_probe("jedec_probe", &mpc1211_flash_map);
- if (!flash_mtd) {
- printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
- return -ENXIO;
- }
- printk(KERN_NOTICE "MPC-1211: Flash at 0x%08lx\n", mpc1211_flash_map.virt & 0x1fffffff);
- flash_mtd->module = THIS_MODULE;
-
- parsed_parts = mpc1211_partitions;
- nr_parts = ARRAY_SIZE(mpc1211_partitions);
-
- add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
- return 0;
-}
-
-static void __exit cleanup_mpc1211_maps(void)
-{
- if (parsed_parts)
- del_mtd_partitions(flash_mtd);
- else
- del_mtd_device(flash_mtd);
- map_destroy(flash_mtd);
-}
-
-module_init(init_mpc1211_maps);
-module_exit(cleanup_mpc1211_maps);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Saito.K & Jeanne <ksaito@interface.co.jp>");
-MODULE_DESCRIPTION("MTD map driver for MPC-1211 boards. Interface");
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 6f8e7d4cf74..2edda8cc7f9 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -319,7 +319,7 @@ static struct vortex_chip_info {
{"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
{"3c980 Cyclone",
- PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+ PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
{"3c980C Python-T",
PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
@@ -600,7 +600,6 @@ struct vortex_private {
struct sk_buff* tx_skbuff[TX_RING_SIZE];
unsigned int cur_rx, cur_tx; /* The next free ring entry */
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
- struct net_device_stats stats; /* Generic stats */
struct vortex_extra_stats xstats; /* NIC-specific extra stats */
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -1875,7 +1874,7 @@ static void vortex_tx_timeout(struct net_device *dev)
issue_and_wait(dev, TxReset);
- vp->stats.tx_errors++;
+ dev->stats.tx_errors++;
if (vp->full_bus_master_tx) {
printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
@@ -1887,7 +1886,7 @@ static void vortex_tx_timeout(struct net_device *dev)
iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
} else {
- vp->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
netif_wake_queue(dev);
}
@@ -1928,8 +1927,8 @@ vortex_error(struct net_device *dev, int status)
}
dump_tx_ring(dev);
}
- if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
- if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x14) dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
if (tx_status & 0x08) vp->xstats.tx_max_collisions++;
iowrite8(0, ioaddr + TxStatus);
if (tx_status & 0x30) { /* txJabber or txUnderrun */
@@ -2051,8 +2050,8 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
dev->name, tx_status);
- if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
- if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+ if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
if (tx_status & 0x30) {
issue_and_wait(dev, TxReset);
}
@@ -2350,7 +2349,7 @@ boomerang_interrupt(int irq, void *dev_id)
} else {
printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
}
- /* vp->stats.tx_packets++; Counted below. */
+ /* dev->stats.tx_packets++; Counted below. */
dirty_tx++;
}
vp->dirty_tx = dirty_tx;
@@ -2409,12 +2408,12 @@ static int vortex_rx(struct net_device *dev)
unsigned char rx_error = ioread8(ioaddr + RxErrors);
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
- vp->stats.rx_errors++;
- if (rx_error & 0x01) vp->stats.rx_over_errors++;
- if (rx_error & 0x02) vp->stats.rx_length_errors++;
- if (rx_error & 0x04) vp->stats.rx_frame_errors++;
- if (rx_error & 0x08) vp->stats.rx_crc_errors++;
- if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ if (rx_error & 0x01) dev->stats.rx_over_errors++;
+ if (rx_error & 0x02) dev->stats.rx_length_errors++;
+ if (rx_error & 0x04) dev->stats.rx_frame_errors++;
+ if (rx_error & 0x08) dev->stats.rx_crc_errors++;
+ if (rx_error & 0x10) dev->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
@@ -2446,7 +2445,7 @@ static int vortex_rx(struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
- vp->stats.rx_packets++;
+ dev->stats.rx_packets++;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -2455,7 +2454,7 @@ static int vortex_rx(struct net_device *dev)
} else if (vortex_debug > 0)
printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
"size %d.\n", dev->name, pkt_len);
- vp->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
issue_and_wait(dev, RxDiscard);
}
@@ -2482,12 +2481,12 @@ boomerang_rx(struct net_device *dev)
unsigned char rx_error = rx_status >> 16;
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
- vp->stats.rx_errors++;
- if (rx_error & 0x01) vp->stats.rx_over_errors++;
- if (rx_error & 0x02) vp->stats.rx_length_errors++;
- if (rx_error & 0x04) vp->stats.rx_frame_errors++;
- if (rx_error & 0x08) vp->stats.rx_crc_errors++;
- if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ if (rx_error & 0x01) dev->stats.rx_over_errors++;
+ if (rx_error & 0x02) dev->stats.rx_length_errors++;
+ if (rx_error & 0x04) dev->stats.rx_frame_errors++;
+ if (rx_error & 0x08) dev->stats.rx_crc_errors++;
+ if (rx_error & 0x10) dev->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
@@ -2529,7 +2528,7 @@ boomerang_rx(struct net_device *dev)
}
netif_rx(skb);
dev->last_rx = jiffies;
- vp->stats.rx_packets++;
+ dev->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
@@ -2591,7 +2590,7 @@ vortex_down(struct net_device *dev, int final_down)
del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
- /* Turn off statistics ASAP. We update vp->stats below. */
+ /* Turn off statistics ASAP. We update dev->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
/* Disable the receiver and transmitter. */
@@ -2728,7 +2727,7 @@ static struct net_device_stats *vortex_get_stats(struct net_device *dev)
update_stats(ioaddr, dev);
spin_unlock_irqrestore (&vp->lock, flags);
}
- return &vp->stats;
+ return &dev->stats;
}
/* Update statistics.
@@ -2748,18 +2747,18 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
- vp->stats.tx_carrier_errors += ioread8(ioaddr + 0);
- vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
- vp->stats.tx_window_errors += ioread8(ioaddr + 4);
- vp->stats.rx_fifo_errors += ioread8(ioaddr + 5);
- vp->stats.tx_packets += ioread8(ioaddr + 6);
- vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
+ dev->stats.tx_carrier_errors += ioread8(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
+ dev->stats.tx_window_errors += ioread8(ioaddr + 4);
+ dev->stats.rx_fifo_errors += ioread8(ioaddr + 5);
+ dev->stats.tx_packets += ioread8(ioaddr + 6);
+ dev->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
/* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
- vp->stats.rx_bytes += ioread16(ioaddr + 10);
- vp->stats.tx_bytes += ioread16(ioaddr + 12);
+ dev->stats.rx_bytes += ioread16(ioaddr + 10);
+ dev->stats.tx_bytes += ioread16(ioaddr + 12);
/* Extra stats for get_ethtool_stats() */
vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
vp->xstats.tx_single_collisions += ioread8(ioaddr + 3);
@@ -2767,14 +2766,14 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
EL3WINDOW(4);
vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
- vp->stats.collisions = vp->xstats.tx_multiple_collisions
+ dev->stats.collisions = vp->xstats.tx_multiple_collisions
+ vp->xstats.tx_single_collisions
+ vp->xstats.tx_max_collisions;
{
u8 up = ioread8(ioaddr + 13);
- vp->stats.rx_bytes += (up & 0x0f) << 16;
- vp->stats.tx_bytes += (up & 0xf0) << 12;
+ dev->stats.rx_bytes += (up & 0x0f) << 16;
+ dev->stats.tx_bytes += (up & 0xf0) << 12;
}
EL3WINDOW(old_window >> 13);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index af46341827f..9f6cc8a5607 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1273,20 +1273,6 @@ config PCNET32
To compile this driver as a module, choose M here. The module
will be called pcnet32.
-config PCNET32_NAPI
- bool "Use RX polling (NAPI)"
- depends on PCNET32
- help
- NAPI is a new driver API designed to reduce CPU and interrupt load
- when the driver is receiving lots of packets from the card. It is
- still somewhat experimental and thus not yet enabled by default.
-
- If your estimated Rx load is 10kpps or more, or if the card will be
- deployed on potentially unfriendly networks (e.g. in a firewall),
- then say Y here.
-
- If in doubt, say N.
-
config AMD8111_ETH
tristate "AMD 8111 (new PCI lance) support"
depends on NET_PCI && PCI
@@ -2440,7 +2426,7 @@ config CHELSIO_T3
config EHEA
tristate "eHEA Ethernet support"
- depends on IBMEBUS && INET && SPARSEMEM
+ depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG
select INET_LRO
---help---
This driver supports the IBM pSeries eHEA ethernet adapter.
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 82e9a5bd0dd..a0b4c851607 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -499,19 +499,13 @@ static void cops_reset(struct net_device *dev, int sleep)
{
outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */
inb(ioaddr+DAYNA_RESET); /* Clear the reset */
- if(sleep)
- {
- long snap=jiffies;
-
- /* Let card finish initializing, about 1/3 second */
- while (time_before(jiffies, snap + HZ/3))
- schedule();
- }
- else
- mdelay(333);
+ if (sleep)
+ msleep(333);
+ else
+ mdelay(333);
}
+
netif_wake_queue(dev);
- return;
}
static void cops_load (struct net_device *dev)
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0afe522b8f7..9c2394d4942 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1,7 +1,7 @@
/*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
@@ -36,7 +36,6 @@
* A very incomplete list of things that need to be dealt with:
*
* TODO:
- * Wake on LAN.
* Add more ethtool functions.
* Fix abstruse irq enable/disable condition described here:
* http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
@@ -638,21 +637,18 @@ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
}
/*
- *TODO: do something or get rid of this
+ * Force the PHY into power saving mode using vendor magic.
*/
#ifdef CONFIG_PM
-static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
+static void atl1_phy_enter_power_saving(struct atl1_hw *hw)
{
-/* s32 ret_val;
- * u16 phy_data;
- */
+ atl1_write_phy_reg(hw, MII_DBG_ADDR, 0);
+ atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E);
+ atl1_write_phy_reg(hw, MII_DBG_ADDR, 2);
+ atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
+ atl1_write_phy_reg(hw, MII_DBG_ADDR, 3);
+ atl1_write_phy_reg(hw, MII_DBG_DATA, 0);
-/*
- ret_val = atl1_write_phy_reg(hw, ...);
- ret_val = atl1_write_phy_reg(hw, ...);
- ....
-*/
- return 0;
}
#endif
@@ -2784,64 +2780,93 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
u32 wufc = adapter->wol;
+ u32 val;
+ int retval;
+ u16 speed;
+ u16 duplex;
netif_device_detach(netdev);
if (netif_running(netdev))
atl1_down(adapter);
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
- if (ctrl & BMSR_LSTATUS)
+ val = ctrl & BMSR_LSTATUS;
+ if (val)
wufc &= ~ATLX_WUFC_LNKC;
- /* reduce speed to 10/100M */
- if (wufc) {
- atl1_phy_enter_power_saving(hw);
- /* if resume, let driver to re- setup link */
- hw->phy_configured = false;
- atl1_set_mac_addr(hw);
- atlx_set_multi(netdev);
+ if (val && wufc) {
+ val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
+ if (val) {
+ if (netif_msg_ifdown(adapter))
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "error getting speed/duplex\n");
+ goto disable_wol;
+ }
ctrl = 0;
- /* turn on magic packet wol */
- if (wufc & ATLX_WUFC_MAG)
- ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
- /* turn on Link change WOL */
- if (wufc & ATLX_WUFC_LNKC)
- ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+ /* enable magic packet WOL */
+ if (wufc & ATLX_WUFC_MAG)
+ ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
-
- /* turn on all-multi mode if wake on multicast is enabled */
- ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
- ctrl &= ~MAC_CTRL_DBG;
- ctrl &= ~MAC_CTRL_PROMIS_EN;
- if (wufc & ATLX_WUFC_MC)
- ctrl |= MAC_CTRL_MC_ALL_EN;
- else
- ctrl &= ~MAC_CTRL_MC_ALL_EN;
-
- /* turn on broadcast mode if wake on-BC is enabled */
- if (wufc & ATLX_WUFC_BC)
+ ioread32(hw->hw_addr + REG_WOL_CTRL);
+
+ /* configure the mac */
+ ctrl = MAC_CTRL_RX_EN;
+ ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 :
+ MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT);
+ if (duplex == FULL_DUPLEX)
+ ctrl |= MAC_CTRL_DUPLX;
+ ctrl |= (((u32)adapter->hw.preamble_len &
+ MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+ if (adapter->vlgrp)
+ ctrl |= MAC_CTRL_RMV_VLAN;
+ if (wufc & ATLX_WUFC_MAG)
ctrl |= MAC_CTRL_BC_EN;
- else
- ctrl &= ~MAC_CTRL_BC_EN;
-
- /* enable RX */
- ctrl |= MAC_CTRL_RX_EN;
iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- pci_enable_wake(pdev, PCI_D3cold, 1);
- } else {
- iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ ioread32(hw->hw_addr + REG_MAC_CTRL);
+
+ /* poke the PHY */
+ ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
+ ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ goto exit;
}
- pci_save_state(pdev);
+ if (!val && wufc) {
+ ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+ iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
+ ioread32(hw->hw_addr + REG_WOL_CTRL);
+ iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
+ ioread32(hw->hw_addr + REG_MAC_CTRL);
+ hw->phy_configured = false;
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ goto exit;
+ }
+
+disable_wol:
+ iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
+ ioread32(hw->hw_addr + REG_WOL_CTRL);
+ ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+ ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
+ iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
+ ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
+ atl1_phy_enter_power_saving(hw);
+ hw->phy_configured = false;
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+exit:
+ if (netif_running(netdev))
+ pci_disable_msi(adapter->pdev);
pci_disable_device(pdev);
-
- pci_set_power_state(pdev, PCI_D3hot);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
@@ -2855,20 +2880,26 @@ static int atl1_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- /* FIXME: check and handle */
err = pci_enable_device(pdev);
+ if (err) {
+ if (netif_msg_ifup(adapter))
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "error enabling pci device\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+ iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
- atl1_reset(adapter);
+ atl1_reset_hw(&adapter->hw);
+ adapter->cmb.cmb->int_stats = 0;
if (netif_running(netdev))
atl1_up(adapter);
netif_device_attach(netdev);
- atl1_via_workaround(adapter);
-
return 0;
}
#else
@@ -2876,6 +2907,13 @@ static int atl1_resume(struct pci_dev *pdev)
#define atl1_resume NULL
#endif
+static void atl1_shutdown(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PM
+ atl1_suspend(pdev, PMSG_SUSPEND);
+#endif
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void atl1_poll_controller(struct net_device *netdev)
{
@@ -3122,7 +3160,8 @@ static struct pci_driver atl1_driver = {
.probe = atl1_probe,
.remove = __devexit_p(atl1_remove),
.suspend = atl1_suspend,
- .resume = atl1_resume
+ .resume = atl1_resume,
+ .shutdown = atl1_shutdown
};
/*
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 51893d66eae..a5015b14a42 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -1,7 +1,7 @@
/*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f06b854e250..b3e7fcf0f6e 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -2,7 +2,7 @@
*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h
index 3be7c09734d..297a03da6b7 100644
--- a/drivers/net/atlx/atlx.h
+++ b/drivers/net/atlx/atlx.h
@@ -2,7 +2,7 @@
*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
- * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com>
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
@@ -29,7 +29,7 @@
#include <linux/module.h>
#include <linux/types.h>
-#define ATLX_DRIVER_VERSION "2.1.1"
+#define ATLX_DRIVER_VERSION "2.1.3"
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
MODULE_LICENSE("GPL");
@@ -460,6 +460,9 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */
#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+#define MII_DBG_ADDR 0x1D
+#define MII_DBG_DATA 0x1E
+
/* PCI Command Register Bit Definitions */
#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
#define CMD_IO_SPACE 0x0001
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6425603bc37..50a40e43315 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1425,13 +1425,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = netdev_set_master(slave_dev, bond_dev);
if (res) {
dprintk("Error %d calling netdev_set_master\n", res);
- goto err_close;
+ goto err_restore_mac;
}
/* open the slave since the application closed it */
res = dev_open(slave_dev);
if (res) {
dprintk("Openning slave %s failed\n", slave_dev->name);
- goto err_restore_mac;
+ goto err_unset_master;
}
new_slave->dev = slave_dev;
@@ -1444,7 +1444,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
res = bond_alb_init_slave(bond, new_slave);
if (res) {
- goto err_unset_master;
+ goto err_close;
}
}
@@ -1619,7 +1619,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = bond_create_slave_symlinks(bond_dev, slave_dev);
if (res)
- goto err_unset_master;
+ goto err_close;
printk(KERN_INFO DRV_NAME
": %s: enslaving %s as a%s interface with a%s link.\n",
@@ -1631,12 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;
/* Undo stages on error */
-err_unset_master:
- netdev_set_master(slave_dev, NULL);
-
err_close:
dev_close(slave_dev);
+err_unset_master:
+ netdev_set_master(slave_dev, NULL);
+
err_restore_mac:
if (!bond->params.fail_over_mac) {
memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
@@ -4936,7 +4936,9 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
if (res < 0) {
rtnl_lock();
down_write(&bonding_rwsem);
- goto out_bond;
+ bond_deinit(bond_dev);
+ unregister_netdevice(bond_dev);
+ goto out_rtnl;
}
return 0;
@@ -4990,9 +4992,10 @@ err:
destroy_workqueue(bond->wq);
}
+ bond_destroy_sysfs();
+
rtnl_lock();
bond_free_all();
- bond_destroy_sysfs();
rtnl_unlock();
out:
return res;
@@ -5004,9 +5007,10 @@ static void __exit bonding_exit(void)
unregister_netdevice_notifier(&bond_netdev_notifier);
unregister_inetaddr_notifier(&bond_inetaddr_notifier);
+ bond_destroy_sysfs();
+
rtnl_lock();
bond_free_all();
- bond_destroy_sysfs();
rtnl_unlock();
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 979c2d05ff9..08f3d396bcd 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -146,29 +146,29 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
": Unable remove bond %s due to open references.\n",
ifname);
res = -EPERM;
- goto out;
+ goto out_unlock;
}
printk(KERN_INFO DRV_NAME
": %s is being deleted...\n",
bond->dev->name);
bond_destroy(bond);
- up_write(&bonding_rwsem);
- rtnl_unlock();
- goto out;
+ goto out_unlock;
}
printk(KERN_ERR DRV_NAME
": unable to delete non-existent bond %s\n", ifname);
res = -ENODEV;
- up_write(&bonding_rwsem);
- rtnl_unlock();
- goto out;
+ goto out_unlock;
}
err_no_cmd:
printk(KERN_ERR DRV_NAME
": no command found in bonding_masters. Use +ifname or -ifname.\n");
- res = -EPERM;
+ return -EPERM;
+
+out_unlock:
+ up_write(&bonding_rwsem);
+ rtnl_unlock();
/* Always return either count or an error. If you return 0, you'll
* get called forever, which is bad.
@@ -1437,8 +1437,16 @@ int bond_create_sysfs(void)
* configure multiple bonding devices.
*/
if (ret == -EEXIST) {
- netdev_class = NULL;
- return 0;
+ /* Is someone being kinky and naming a device bonding_master? */
+ if (__dev_get_by_name(&init_net,
+ class_attr_bonding_masters.attr.name))
+ printk(KERN_ERR
+ "network device named %s already exists in sysfs",
+ class_attr_bonding_masters.attr.name);
+ else {
+ netdev_class = NULL;
+ return 0;
+ }
}
return ret;
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 4fdb13f8447..acebe431d06 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -71,6 +71,7 @@ enum { /* adapter flags */
USING_MSIX = (1 << 2),
QUEUES_BOUND = (1 << 3),
TP_PARITY_INIT = (1 << 4),
+ NAPI_INIT = (1 << 5),
};
struct fl_pg_chunk {
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 91ee7277b81..579bee42a5c 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -698,6 +698,7 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
int reset);
+int t3_replay_prep_adapter(struct adapter *adapter);
void t3_led_ready(struct adapter *adapter);
void t3_fatal_err(struct adapter *adapter);
void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 05e5f59e87f..3a312721679 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -421,6 +421,13 @@ static void init_napi(struct adapter *adap)
netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
64);
}
+
+ /*
+ * netif_napi_add() can be called only once per napi_struct because it
+ * adds each new napi_struct to a list. Be careful not to call it a
+ * second time, e.g., during EEH recovery, by making a note of it.
+ */
+ adap->flags |= NAPI_INIT;
}
/*
@@ -896,7 +903,8 @@ static int cxgb_up(struct adapter *adap)
goto out;
setup_rss(adap);
- init_napi(adap);
+ if (!(adap->flags & NAPI_INIT))
+ init_napi(adap);
adap->flags |= FULL_INIT_DONE;
}
@@ -999,7 +1007,7 @@ static int offload_open(struct net_device *dev)
return 0;
if (!adap_up && (err = cxgb_up(adapter)) < 0)
- return err;
+ goto out;
t3_tp_set_offload_mode(adapter, 1);
tdev->lldev = adapter->port[0];
@@ -1061,10 +1069,8 @@ static int cxgb_open(struct net_device *dev)
int other_ports = adapter->open_device_map & PORT_MASK;
int err;
- if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
- quiesce_rx(adapter);
+ if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
return err;
- }
set_bit(pi->port_id, &adapter->open_device_map);
if (is_offload(adapter) && !ofld_disable) {
@@ -1894,11 +1900,11 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
u8 *fw_data;
struct ch_mem_range t;
- if (!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
-
+ /* Check t.len sanity ? */
fw_data = kmalloc(t.len, GFP_KERNEL);
if (!fw_data)
return -ENOMEM;
@@ -2424,14 +2430,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
offload_close(&adapter->tdev);
- /* Free sge resources */
- t3_free_sge_resources(adapter);
-
adapter->flags &= ~FULL_INIT_DONE;
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -2448,13 +2451,20 @@ static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
+ goto err;
}
pci_set_master(pdev);
+ pci_restore_state(pdev);
- t3_prep_adapter(adapter, adapter->params.info, 1);
+ /* Free sge resources */
+ t3_free_sge_resources(adapter);
+
+ if (t3_replay_prep_adapter(adapter))
+ goto err;
return PCI_ERS_RESULT_RECOVERED;
+err:
+ return PCI_ERS_RESULT_DISCONNECT;
}
/**
@@ -2483,13 +2493,6 @@ static void t3_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
}
-
- if (is_offload(adapter)) {
- __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
- if (offload_open(adapter->port[0]))
- printk(KERN_WARNING
- "Could not bring back offload capabilities\n");
- }
}
static struct pci_error_handlers t3_err_handler = {
@@ -2608,6 +2611,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
pci_set_master(pdev);
+ pci_save_state(pdev);
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 02dbbb30092..56717887934 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -444,6 +444,14 @@
#define A_PCIE_CFG 0x88
+#define S_ENABLELINKDWNDRST 21
+#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST)
+#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U)
+
+#define S_ENABLELINKDOWNRST 20
+#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST)
+#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U)
+
#define S_PCIE_CLIDECEN 16
#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 98a6bbd11d4..796eb305cdc 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -539,6 +539,31 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
}
/**
+ * t3_reset_qset - reset a sge qset
+ * @q: the queue set
+ *
+ * Reset the qset structure.
+ * the NAPI structure is preserved in the event of
+ * the qset's reincarnation, for example during EEH recovery.
+ */
+static void t3_reset_qset(struct sge_qset *q)
+{
+ if (q->adap &&
+ !(q->adap->flags & NAPI_INIT)) {
+ memset(q, 0, sizeof(*q));
+ return;
+ }
+
+ q->adap = NULL;
+ memset(&q->rspq, 0, sizeof(q->rspq));
+ memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
+ memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
+ q->txq_stopped = 0;
+ memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
+}
+
+
+/**
* free_qset - free the resources of an SGE queue set
* @adapter: the adapter owning the queue set
* @q: the queue set
@@ -594,7 +619,7 @@ static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
q->rspq.desc, q->rspq.phys_addr);
}
- memset(q, 0, sizeof(*q));
+ t3_reset_qset(q);
}
/**
@@ -1365,7 +1390,7 @@ static void restart_ctrlq(unsigned long data)
*/
int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
{
- int ret;
+ int ret;
local_bh_disable();
ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
local_bh_enable();
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index a99496a431c..d405a932c73 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -3264,6 +3264,7 @@ static void config_pcie(struct adapter *adap)
t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
t3_set_reg_field(adap, A_PCIE_CFG, 0,
+ F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
}
@@ -3655,3 +3656,30 @@ void t3_led_ready(struct adapter *adapter)
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
F_GPIO0_OUT_VAL);
}
+
+int t3_replay_prep_adapter(struct adapter *adapter)
+{
+ const struct adapter_info *ai = adapter->params.info;
+ unsigned int i, j = 0;
+ int ret;
+
+ early_hw_init(adapter, ai);
+ ret = init_parity(adapter);
+ if (ret)
+ return ret;
+
+ for_each_port(adapter, i) {
+ struct port_info *p = adap2pinfo(adapter, i);
+ while (!adapter->params.vpd.port_type[j])
+ ++j;
+
+ p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
+ ai->mdio_ops);
+
+ p->phy.ops->power_down(&p->phy, 1);
+ ++j;
+ }
+
+return 0;
+}
+
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 12b4626102e..32a9a922f15 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -117,6 +117,9 @@ typedef struct board_info {
struct mutex addr_lock; /* phy and eeprom access lock */
+ struct delayed_work phy_poll;
+ struct net_device *ndev;
+
spinlock_t lock;
struct mii_if_info mii;
@@ -297,6 +300,10 @@ static void dm9000_set_io(struct board_info *db, int byte_width)
}
}
+static void dm9000_schedule_poll(board_info_t *db)
+{
+ schedule_delayed_work(&db->phy_poll, HZ * 2);
+}
/* Our watchdog timed out. Called by the networking layer */
static void dm9000_timeout(struct net_device *dev)
@@ -465,6 +472,17 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
.set_eeprom = dm9000_set_eeprom,
};
+static void
+dm9000_poll_work(struct work_struct *w)
+{
+ struct delayed_work *dw = container_of(w, struct delayed_work, work);
+ board_info_t *db = container_of(dw, board_info_t, phy_poll);
+
+ mii_check_media(&db->mii, netif_msg_link(db), 0);
+
+ if (netif_running(db->ndev))
+ dm9000_schedule_poll(db);
+}
/* dm9000_release_board
*
@@ -503,7 +521,7 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db)
/*
* Search DM9000 board, allocate space and register it
*/
-static int
+static int __devinit
dm9000_probe(struct platform_device *pdev)
{
struct dm9000_plat_data *pdata = pdev->dev.platform_data;
@@ -525,17 +543,21 @@ dm9000_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
- dev_dbg(&pdev->dev, "dm9000_probe()");
+ dev_dbg(&pdev->dev, "dm9000_probe()\n");
/* setup board info structure */
db = (struct board_info *) ndev->priv;
memset(db, 0, sizeof (*db));
db->dev = &pdev->dev;
+ db->ndev = ndev;
spin_lock_init(&db->lock);
mutex_init(&db->addr_lock);
+ INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
+
+
if (pdev->num_resources < 2) {
ret = -ENODEV;
goto out;
@@ -761,6 +783,8 @@ dm9000_open(struct net_device *dev)
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
+
+ dm9000_schedule_poll(db);
return 0;
}
@@ -879,6 +903,8 @@ dm9000_stop(struct net_device *ndev)
if (netif_msg_ifdown(db))
dev_dbg(db->dev, "shutting down %s\n", ndev->name);
+ cancel_delayed_work(&db->phy_poll);
+
netif_stop_queue(ndev);
netif_carrier_off(ndev);
@@ -1288,6 +1314,8 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
spin_unlock_irqrestore(&db->lock,flags);
mutex_unlock(&db->addr_lock);
+
+ dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
return ret;
}
@@ -1301,6 +1329,7 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
unsigned long flags;
unsigned long reg_save;
+ dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock,flags);
@@ -1372,7 +1401,7 @@ dm9000_drv_resume(struct platform_device *dev)
return 0;
}
-static int
+static int __devexit
dm9000_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1393,7 +1422,7 @@ static struct platform_driver dm9000_driver = {
.owner = THIS_MODULE,
},
.probe = dm9000_probe,
- .remove = dm9000_drv_remove,
+ .remove = __devexit_p(dm9000_drv_remove),
.suspend = dm9000_drv_suspend,
.resume = dm9000_drv_resume,
};
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 2a53875cddb..f823b8ba578 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -648,6 +648,8 @@
#define IFE_E_PHY_ID 0x02A80330
#define IFE_PLUS_E_PHY_ID 0x02A80320
#define IFE_C_E_PHY_ID 0x02A80310
+#define BME1000_E_PHY_ID 0x01410CB0
+#define BME1000_E_PHY_ID_R2 0x01410CB1
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -701,6 +703,14 @@
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+/* BME1000 PHY Specific Control Register */
+#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
+
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+
/*
* Bits...
* 15-5: page
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 38bfd0d261f..d3bc6f8101f 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -127,7 +127,7 @@ struct e1000_buffer {
/* arrays of page information for packet split */
struct e1000_ps_page *ps_pages;
};
-
+ struct page *page;
};
struct e1000_ring {
@@ -304,6 +304,7 @@ struct e1000_info {
#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
+#define FLAG_IS_ICH (1 << 9)
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
#define FLAG_IS_QUAD_PORT_A (1 << 12)
#define FLAG_IS_QUAD_PORT (1 << 13)
@@ -386,6 +387,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
@@ -443,6 +445,9 @@ extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index ce045acce63..a14561f40db 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -494,8 +494,12 @@ static int e1000_get_eeprom(struct net_device *netdev,
for (i = 0; i < last_word - first_word + 1; i++) {
ret_val = e1000_read_nvm(hw, first_word + i, 1,
&eeprom_buff[i]);
- if (ret_val)
+ if (ret_val) {
+ /* a read error occurred, throw away the
+ * result */
+ memset(eeprom_buff, 0xff, sizeof(eeprom_buff));
break;
+ }
}
}
@@ -803,8 +807,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
/* restore previous status */
ew32(STATUS, before);
- if ((mac->type != e1000_ich8lan) &&
- (mac->type != e1000_ich9lan)) {
+ if (!(adapter->flags & FLAG_IS_ICH)) {
REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
@@ -824,15 +827,13 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
- before = (((mac->type == e1000_ich8lan) ||
- (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE);
+ before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
- if ((mac->type != e1000_ich8lan) &&
- (mac->type != e1000_ich9lan))
+ if (!(adapter->flags & FLAG_IS_ICH))
REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
@@ -911,9 +912,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Test each interrupt */
for (i = 0; i < 10; i++) {
-
- if (((adapter->hw.mac.type == e1000_ich8lan) ||
- (adapter->hw.mac.type == e1000_ich9lan)) && i == 8)
+ if ((adapter->flags & FLAG_IS_ICH) && (i == 8))
continue;
/* Interrupt to test */
@@ -1184,6 +1183,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
u32 stat_reg = 0;
+ u16 phy_reg = 0;
hw->mac.autoneg = 0;
@@ -1211,6 +1211,28 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_SPD_100 |/* Force Speed to 100 */
E1000_CTRL_FD); /* Force Duplex to FULL */
break;
+ case e1000_phy_bm:
+ /* Set Default MAC Interface speed to 1GB */
+ e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
+ phy_reg &= ~0x0007;
+ phy_reg |= 0x006;
+ e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
+ /* Assert SW reset for above settings to take effect */
+ e1000e_commit_phy(hw);
+ mdelay(1);
+ /* Force Full Duplex */
+ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
+ /* Set Link Up (in force link) */
+ e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
+ /* Force Link */
+ e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
+ /* Set Early Link Enable */
+ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+ e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
+ /* fall through */
default:
/* force 1000, set loopback */
e1e_wphy(hw, PHY_CONTROL, 0x4140);
@@ -1224,8 +1246,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
E1000_CTRL_FD); /* Force Duplex to FULL */
- if ((adapter->hw.mac.type == e1000_ich8lan) ||
- (adapter->hw.mac.type == e1000_ich9lan))
+ if (adapter->flags & FLAG_IS_ICH)
ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index a930e6d9cf0..74f263acb17 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -216,6 +216,21 @@ enum e1e_registers {
#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE 769
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
@@ -331,10 +346,16 @@ enum e1e_registers {
#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
#define E1000_DEV_ID_ICH8_IGP_M 0x104D
#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
+#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
+#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
+#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
#define E1000_DEV_ID_ICH9_IGP_C 0x294C
#define E1000_DEV_ID_ICH9_IFE 0x10C0
#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3
#define E1000_DEV_ID_ICH9_IFE_G 0x10C2
+#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
+#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
+#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_FUNC_1 1
@@ -378,6 +399,7 @@ enum e1000_phy_type {
e1000_phy_gg82563,
e1000_phy_igp_3,
e1000_phy_ife,
+ e1000_phy_bm,
};
enum e1000_bus_width {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 768485dbb2c..9e38452a738 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -38,6 +38,12 @@
* 82566DM Gigabit Network Connection
* 82566MC Gigabit Network Connection
* 82566MM Gigabit Network Connection
+ * 82567LM Gigabit Network Connection
+ * 82567LF Gigabit Network Connection
+ * 82567LM-2 Gigabit Network Connection
+ * 82567LF-2 Gigabit Network Connection
+ * 82567V-2 Gigabit Network Connection
+ * 82562GT-3 10/100 Network Connection
*/
#include <linux/netdevice.h>
@@ -198,6 +204,19 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->addr = 1;
phy->reset_delay_us = 100;
+ /*
+ * We may need to do this twice - once for IGP and if that fails,
+ * we'll set BM func pointers and try again
+ */
+ ret_val = e1000e_determine_phy_address(hw);
+ if (ret_val) {
+ hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
+ hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
+ ret_val = e1000e_determine_phy_address(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
phy->id = 0;
while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
(i++ < 100)) {
@@ -219,6 +238,13 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->type = e1000_phy_ife;
phy->autoneg_mask = E1000_ALL_NOT_GIG;
break;
+ case BME1000_E_PHY_ID:
+ phy->type = e1000_phy_bm;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
+ hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
+ hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
+ break;
default:
return -E1000_ERR_PHY;
break;
@@ -664,6 +690,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
return e1000_get_phy_info_ife_ich8lan(hw);
break;
case e1000_phy_igp_3:
+ case e1000_phy_bm:
return e1000e_get_phy_info_igp(hw);
break;
default:
@@ -728,7 +755,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
s32 ret_val = 0;
u16 data;
- if (phy->type != e1000_phy_igp_3)
+ if (phy->type == e1000_phy_ife)
return ret_val;
phy_ctrl = er32(PHY_CTRL);
@@ -1918,8 +1945,35 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_copper_link_setup_igp(hw);
if (ret_val)
return ret_val;
+ } else if (hw->phy.type == e1000_phy_bm) {
+ ret_val = e1000e_copper_link_setup_m88(hw);
+ if (ret_val)
+ return ret_val;
}
+ if (hw->phy.type == e1000_phy_ife) {
+ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
+ if (ret_val)
+ return ret_val;
+
+ reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+ switch (hw->phy.mdix) {
+ case 1:
+ reg_data &= ~IFE_PMC_FORCE_MDIX;
+ break;
+ case 2:
+ reg_data |= IFE_PMC_FORCE_MDIX;
+ break;
+ case 0:
+ default:
+ reg_data |= IFE_PMC_AUTO_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
+ if (ret_val)
+ return ret_val;
+ }
return e1000e_setup_copper_link(hw);
}
@@ -2127,6 +2181,31 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000e_disable_gig_wol_ich8lan - disable gig during WoL
+ * @hw: pointer to the HW structure
+ *
+ * During S0 to Sx transition, it is possible the link remains at gig
+ * instead of negotiating to a lower speed. Before going to Sx, set
+ * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
+ * to a lower speed.
+ *
+ * Should only be called for ICH9 devices.
+ **/
+void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
+{
+ u32 phy_ctrl;
+
+ if (hw->mac.type == e1000_ich9lan) {
+ phy_ctrl = er32(PHY_CTRL);
+ phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
+ E1000_PHY_CTRL_GBE_DISABLE;
+ ew32(PHY_CTRL, phy_ctrl);
+ }
+
+ return;
+}
+
+/**
* e1000_cleanup_led_ich8lan - Restore the default LED operation
* @hw: pointer to the HW structure
*
@@ -2247,6 +2326,7 @@ static struct e1000_nvm_operations ich8_nvm_ops = {
struct e1000_info e1000_ich8_info = {
.mac = e1000_ich8lan,
.flags = FLAG_HAS_WOL
+ | FLAG_IS_ICH
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
@@ -2262,6 +2342,7 @@ struct e1000_info e1000_ich8_info = {
struct e1000_info e1000_ich9_info = {
.mac = e1000_ich9lan,
.flags = FLAG_HAS_JUMBO_FRAMES
+ | FLAG_IS_ICH
| FLAG_HAS_WOL
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 8991ab8911e..8cbb40f3a50 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -43,10 +43,11 @@
#include <linux/if_vlan.h>
#include <linux/cpu.h>
#include <linux/smp.h>
+#include <linux/pm_qos_params.h>
#include "e1000.h"
-#define DRV_VERSION "0.2.1"
+#define DRV_VERSION "0.3.3.3-k2"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -341,6 +342,89 @@ no_buffers:
}
/**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @adapter: address of board private structure
+ * @rx_ring: pointer to receive ring structure
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+ int cleaned_count)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_rx_desc *rx_desc;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_buffer *buffer_info;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = 256 -
+ 16 /* for skb_reserve */ -
+ NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (cleaned_count--) {
+ skb = buffer_info->skb;
+ if (skb) {
+ skb_trim(skb, 0);
+ goto check_page;
+ }
+
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+
+ /* Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ buffer_info->skb = skb;
+check_page:
+ /* allocate a new page if necessary */
+ if (!buffer_info->page) {
+ buffer_info->page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!buffer_info->page)) {
+ adapter->alloc_rx_buff_failed++;
+ break;
+ }
+ }
+
+ if (!buffer_info->dma)
+ buffer_info->dma = pci_map_page(pdev,
+ buffer_info->page, 0,
+ PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
+ buffer_info = &rx_ring->buffer_info[i];
+ }
+
+ if (likely(rx_ring->next_to_use != i)) {
+ rx_ring->next_to_use = i;
+ if (unlikely(i-- == 0))
+ i = (rx_ring->count - 1);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64). */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
+}
+
+/**
* e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
*
@@ -783,6 +867,186 @@ next_desc:
}
/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
+ u16 length)
+{
+ bi->page = NULL;
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+ int *work_done, int work_to_do)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ struct e1000_rx_desc *rx_desc, *next_rxd;
+ struct e1000_buffer *buffer_info, *next_buffer;
+ u32 length;
+ unsigned int i;
+ int cleaned_count = 0;
+ bool cleaned = false;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+
+ while (rx_desc->status & E1000_RXD_STAT_DD) {
+ struct sk_buff *skb;
+ u8 status;
+
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ status = rx_desc->status;
+ skb = buffer_info->skb;
+ buffer_info->skb = NULL;
+
+ ++i;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC(*rx_ring, i);
+ prefetch(next_rxd);
+
+ next_buffer = &rx_ring->buffer_info[i];
+
+ cleaned = true;
+ cleaned_count++;
+ pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
+
+ length = le16_to_cpu(rx_desc->length);
+
+ /* errors is only valid for DD + EOP descriptors */
+ if (unlikely((status & E1000_RXD_STAT_EOP) &&
+ (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
+ /* recycle both page and skb */
+ buffer_info->skb = skb;
+ /* an error means any chain goes out the window
+ * too */
+ if (rx_ring->rx_skb_top)
+ dev_kfree_skb(rx_ring->rx_skb_top);
+ rx_ring->rx_skb_top = NULL;
+ goto next_desc;
+ }
+
+#define rxtop rx_ring->rx_skb_top
+ if (!(status & E1000_RXD_STAT_EOP)) {
+ /* this descriptor is only the beginning (or middle) */
+ if (!rxtop) {
+ /* this is the beginning of a chain */
+ rxtop = skb;
+ skb_fill_page_desc(rxtop, 0, buffer_info->page,
+ 0, length);
+ } else {
+ /* this is the middle of a chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the skb, only consumed the page */
+ buffer_info->skb = skb;
+ }
+ e1000_consume_page(buffer_info, rxtop, length);
+ goto next_desc;
+ } else {
+ if (rxtop) {
+ /* end of the chain */
+ skb_fill_page_desc(rxtop,
+ skb_shinfo(rxtop)->nr_frags,
+ buffer_info->page, 0, length);
+ /* re-use the current skb, we only consumed the
+ * page */
+ buffer_info->skb = skb;
+ skb = rxtop;
+ rxtop = NULL;
+ e1000_consume_page(buffer_info, skb, length);
+ } else {
+ /* no chain, got EOP, this buf is the packet
+ * copybreak to save the put_page/alloc_page */
+ if (length <= copybreak &&
+ skb_tailroom(skb) >= length) {
+ u8 *vaddr;
+ vaddr = kmap_atomic(buffer_info->page,
+ KM_SKB_DATA_SOFTIRQ);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
+ kunmap_atomic(vaddr,
+ KM_SKB_DATA_SOFTIRQ);
+ /* re-use the page, so don't erase
+ * buffer_info->page */
+ skb_put(skb, length);
+ } else {
+ skb_fill_page_desc(skb, 0,
+ buffer_info->page, 0,
+ length);
+ e1000_consume_page(buffer_info, skb,
+ length);
+ }
+ }
+ }
+
+ /* Receive Checksum Offload XXX recompute due to CRC strip? */
+ e1000_rx_checksum(adapter,
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /* eth type trans needs skb->data to point to something */
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ ndev_err(netdev, "pskb_may_pull failed.\n");
+ dev_kfree_skb(skb);
+ goto next_desc;
+ }
+
+ e1000_receive_skb(adapter, netdev, skb, status,
+ rx_desc->special);
+
+next_desc:
+ rx_desc->status = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+ adapter->alloc_rx_buf(adapter, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ buffer_info = next_buffer;
+ }
+ rx_ring->next_to_clean = i;
+
+ cleaned_count = e1000_desc_unused(rx_ring);
+ if (cleaned_count)
+ adapter->alloc_rx_buf(adapter, cleaned_count);
+
+ adapter->total_rx_bytes += total_rx_bytes;
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+ return cleaned;
+}
+
+/**
* e1000_clean_rx_ring - Free Rx Buffers per Queue
* @adapter: board private structure
**/
@@ -802,6 +1066,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
+ else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
+ pci_unmap_page(pdev, buffer_info->dma,
+ PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_ps_bsize0,
@@ -809,6 +1077,11 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info->dma = 0;
}
+ if (buffer_info->page) {
+ put_page(buffer_info->page);
+ buffer_info->page = NULL;
+ }
+
if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
@@ -1755,10 +2028,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
* a lot of memory, since we allocate 3 pages at all times
* per packet.
*/
- adapter->rx_ps_pages = 0;
pages = PAGE_USE_COUNT(adapter->netdev->mtu);
- if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
+ if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) &&
+ (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
adapter->rx_ps_pages = pages;
+ else
+ adapter->rx_ps_pages = 0;
if (adapter->rx_ps_pages) {
/* Configure extra packet-split registers */
@@ -1819,9 +2094,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
sizeof(union e1000_rx_desc_packet_split);
adapter->clean_rx = e1000_clean_rx_irq_ps;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
+ } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
+ adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+ adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
} else {
- rdlen = rx_ring->count *
- sizeof(struct e1000_rx_desc);
+ rdlen = rx_ring->count * sizeof(struct e1000_rx_desc);
adapter->clean_rx = e1000_clean_rx_irq;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
}
@@ -1885,8 +2163,21 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* units), e.g. using jumbo frames when setting to E1000_ERT_2048
*/
if ((adapter->flags & FLAG_HAS_ERT) &&
- (adapter->netdev->mtu > ETH_DATA_LEN))
- ew32(ERT, E1000_ERT_2048);
+ (adapter->netdev->mtu > ETH_DATA_LEN)) {
+ u32 rxdctl = er32(RXDCTL(0));
+ ew32(RXDCTL(0), rxdctl | 0x3);
+ ew32(ERT, E1000_ERT_2048 | (1 << 13));
+ /*
+ * With jumbo frames and early-receive enabled, excessive
+ * C4->C2 latencies result in dropped transactions.
+ */
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+ e1000e_driver_name, 55);
+ } else {
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+ e1000e_driver_name,
+ PM_QOS_DEFAULT_VALUE);
+ }
/* Enable Receives */
ew32(RCTL, rctl);
@@ -2155,6 +2446,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
+
+ /*
+ * For parts with AMT enabled, let the firmware know
+ * that the network interface is in control
+ */
+ if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw))
+ e1000_get_hw_control(adapter);
+
ew32(WUC, 0);
if (mac->ops.init_hw(hw))
@@ -3469,6 +3768,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
+ * However with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
*/
if (max_frame <= 256)
@@ -3626,6 +3927,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
ew32(CTRL_EXT, ctrl_ext);
}
+ if (adapter->flags & FLAG_IS_ICH)
+ e1000e_disable_gig_wol_ich8lan(&adapter->hw);
+
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
@@ -4292,6 +4596,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
+
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
{ } /* terminate list */
};
@@ -4326,7 +4637,9 @@ static int __init e1000_init_module(void)
printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
e1000e_driver_name);
ret = pci_register_driver(&e1000_driver);
-
+ pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name,
+ PM_QOS_DEFAULT_VALUE);
+
return ret;
}
module_init(e1000_init_module);
@@ -4340,6 +4653,7 @@ module_init(e1000_init_module);
static void __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
+ pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
}
module_exit(e1000_exit_module);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index e102332a6be..b133dcf0e95 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -34,6 +34,9 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] =
@@ -465,6 +468,10 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if (phy->disable_polarity_correction == 1)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+ /* Enable downshift on BM (disabled by default) */
+ if (phy->type == e1000_phy_bm)
+ phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
+
ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
return ret_val;
@@ -1776,6 +1783,10 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
case IFE_C_E_PHY_ID:
phy_type = e1000_phy_ife;
break;
+ case BME1000_E_PHY_ID:
+ case BME1000_E_PHY_ID_R2:
+ phy_type = e1000_phy_bm;
+ break;
default:
phy_type = e1000_phy_unknown;
break;
@@ -1784,6 +1795,273 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
}
/**
+ * e1000e_determine_phy_address - Determines PHY address.
+ * @hw: pointer to the HW structure
+ *
+ * This uses a trial and error method to loop through possible PHY
+ * addresses. It tests each by reading the PHY ID registers and
+ * checking for a match.
+ **/
+s32 e1000e_determine_phy_address(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_PHY_TYPE;
+ u32 phy_addr= 0;
+ u32 i = 0;
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ do {
+ for (phy_addr = 0; phy_addr < 4; phy_addr++) {
+ hw->phy.addr = phy_addr;
+ e1000e_get_phy_id(hw);
+ phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
+
+ /*
+ * If phy_type is valid, break - we found our
+ * PHY address
+ */
+ if (phy_type != e1000_phy_unknown) {
+ ret_val = 0;
+ break;
+ }
+ }
+ i++;
+ } while ((ret_val != 0) && (i < 100));
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
+ * @page: page to access
+ *
+ * Returns the phy address for the page requested.
+ **/
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
+{
+ u32 phy_addr = 2;
+
+ if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
+ phy_addr = 1;
+
+ return phy_addr;
+}
+
+/**
+ * e1000e_write_phy_reg_bm - Write BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ s32 ret_val;
+ u32 page_select = 0;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+ u32 page_shift = 0;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /*
+ * Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val) {
+ hw->phy.ops.release_phy(hw);
+ goto out;
+ }
+ }
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release_phy(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000e_read_phy_reg_bm - Read BM PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ s32 ret_val;
+ u32 page_select = 0;
+ u32 page = offset >> IGP_PAGE_SHIFT;
+ u32 page_shift = 0;
+
+ /* Page 800 works differently than the rest so it has its own func */
+ if (page == BM_WUC_PAGE) {
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true);
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val)
+ goto out;
+
+ hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ /*
+ * Page select is register 31 for phy address 1 and 22 for
+ * phy address 2 and 3. Page select is shifted only for
+ * phy address 1.
+ */
+ if (hw->phy.addr == 1) {
+ page_shift = IGP_PAGE_SHIFT;
+ page_select = IGP01E1000_PHY_PAGE_SELECT;
+ } else {
+ page_shift = 0;
+ page_select = BM_PHY_PAGE_SELECT;
+ }
+
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+ (page << page_shift));
+ if (ret_val) {
+ hw->phy.ops.release_phy(hw);
+ goto out;
+ }
+ }
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+ hw->phy.ops.release_phy(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to read or write
+ * @read: determines if operation is read or write
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and storing the retrieved information in data. Release any acquired
+ * semaphores before exiting. Note that procedure to read the wakeup
+ * registers are different. It works as such:
+ * 1) Set page 769, register 17, bit 2 = 1
+ * 2) Set page to 800 for host (801 if we were manageability)
+ * 3) Write the address using the address opcode (0x11)
+ * 4) Read or write the data using the data opcode (0x12)
+ * 5) Restore 769_17.2 to its original value
+ **/
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+ u16 reg = ((u16)offset) & PHY_REG_MASK;
+ u16 phy_reg = 0;
+ u8 phy_acquired = 1;
+
+
+ ret_val = hw->phy.ops.acquire_phy(hw);
+ if (ret_val) {
+ phy_acquired = 0;
+ goto out;
+ }
+
+ /* All operations in this function are phy address 1 */
+ hw->phy.addr = 1;
+
+ /* Set page 769 */
+ e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
+
+ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
+ if (ret_val)
+ goto out;
+
+ /* First clear bit 4 to avoid a power state change */
+ phy_reg &= ~(BM_WUC_HOST_WU_BIT);
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+ if (ret_val)
+ goto out;
+
+ /* Write bit 2 = 1, and clear bit 4 to 769_17 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG,
+ phy_reg | BM_WUC_ENABLE_BIT);
+ if (ret_val)
+ goto out;
+
+ /* Select page 800 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+
+ /* Write the page 800 offset value using opcode 0x11 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
+ if (ret_val)
+ goto out;
+
+ if (read) {
+ /* Read the page 800 value using opcode 0x12 */
+ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ data);
+ } else {
+ /* Read the page 800 value using opcode 0x12 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+ *data);
+ }
+
+ if (ret_val)
+ goto out;
+
+ /*
+ * Restore 769_17.2 to its original value
+ * Set page 769
+ */
+ e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
+
+ /* Clear 769_17.2 */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+
+out:
+ if (phy_acquired == 1)
+ hw->phy.ops.release_phy(hw);
+ return ret_val;
+}
+
+/**
* e1000e_commit_phy - Soft PHY reset
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 2eb82aba4a8..795c594a4b7 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -202,7 +202,7 @@ static unsigned short start_code[] = {
0x0000,Cmd_MCast,
0x0076, /* link to next command */
#define CONF_NR_MULTICAST 0x44
- 0x0000, /* number of multicast addresses */
+ 0x0000, /* number of bytes in multicast address(es) */
#define CONF_MULTICAST 0x46
0x0000, 0x0000, 0x0000, /* some addresses */
0x0000, 0x0000, 0x0000,
@@ -1569,7 +1569,7 @@ static void eexp_hw_init586(struct net_device *dev)
static void eexp_setup_filter(struct net_device *dev)
{
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
unsigned short ioaddr = dev->base_addr;
int count = dev->mc_count;
int i;
@@ -1580,9 +1580,9 @@ static void eexp_setup_filter(struct net_device *dev)
}
outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
- outw(count, ioaddr+SHADOW(CONF_NR_MULTICAST));
- for (i = 0; i < count; i++) {
- unsigned short *data = (unsigned short *)dmi->dmi_addr;
+ outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
+ for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) {
+ unsigned short *data;
if (!dmi) {
printk(KERN_INFO "%s: too few multicast addresses\n", dev->name);
break;
@@ -1591,6 +1591,7 @@ static void eexp_setup_filter(struct net_device *dev)
printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
continue;
}
+ data = (unsigned short *)dmi->dmi_addr;
outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index f5dacceab95..fe872fbd671 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0090"
+#define DRV_VERSION "EHEA_0091"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
@@ -118,6 +118,13 @@
#define EHEA_MR_ACC_CTRL 0x00800000
#define EHEA_BUSMAP_START 0x8000000000000000ULL
+#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
+#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
+#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
+#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
+#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
+#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
+
#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
@@ -192,10 +199,20 @@ struct h_epas {
set to 0 if unused */
};
-struct ehea_busmap {
- unsigned int entries; /* total number of entries */
- unsigned int valid_sections; /* number of valid sections */
- u64 *vaddr;
+/*
+ * Memory map data structures
+ */
+struct ehea_dir_bmap
+{
+ u64 ent[EHEA_MAP_ENTRIES];
+};
+struct ehea_top_bmap
+{
+ struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
+};
+struct ehea_bmap
+{
+ struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
};
struct ehea_qp;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f9bc21c74b5..d1b6d4e7495 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -35,6 +35,7 @@
#include <linux/if_ether.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
+#include <linux/memory.h>
#include <asm/kexec.h>
#include <linux/mutex.h>
@@ -3503,6 +3504,24 @@ void ehea_crash_handler(void)
0, H_DEREG_BCMC);
}
+static int ehea_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ switch (action) {
+ case MEM_OFFLINE:
+ ehea_info("memory has been removed");
+ ehea_rereg_mrs(NULL);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ehea_mem_nb = {
+ .notifier_call = ehea_mem_notifier,
+};
+
static int ehea_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
@@ -3581,6 +3600,10 @@ int __init ehea_module_init(void)
if (ret)
ehea_info("failed registering reboot notifier");
+ ret = register_memory_notifier(&ehea_mem_nb);
+ if (ret)
+ ehea_info("failed registering memory remove notifier");
+
ret = crash_shutdown_register(&ehea_crash_handler);
if (ret)
ehea_info("failed registering crash handler");
@@ -3604,6 +3627,7 @@ int __init ehea_module_init(void)
out3:
ibmebus_unregister_driver(&ehea_driver);
out2:
+ unregister_memory_notifier(&ehea_mem_nb);
unregister_reboot_notifier(&ehea_reboot_nb);
crash_shutdown_unregister(&ehea_crash_handler);
out:
@@ -3621,6 +3645,7 @@ static void __exit ehea_module_exit(void)
ret = crash_shutdown_unregister(&ehea_crash_handler);
if (ret)
ehea_info("failed unregistering crash handler");
+ unregister_memory_notifier(&ehea_mem_nb);
kfree(ehea_fw_handles.arr);
kfree(ehea_bcmc_regs.arr);
ehea_destroy_busmap();
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index d522e905f46..140f05baafd 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -31,8 +31,8 @@
#include "ehea_phyp.h"
#include "ehea_qmr.h"
+struct ehea_bmap *ehea_bmap = NULL;
-struct ehea_busmap ehea_bmap = { 0, 0, NULL };
static void *hw_qpageit_get_inc(struct hw_queue *queue)
@@ -559,125 +559,253 @@ int ehea_destroy_qp(struct ehea_qp *qp)
return 0;
}
-int ehea_create_busmap(void)
+static inline int ehea_calc_index(unsigned long i, unsigned long s)
{
- u64 vaddr = EHEA_BUSMAP_START;
- unsigned long high_section_index = 0;
- int i;
+ return (i >> s) & EHEA_INDEX_MASK;
+}
- /*
- * Sections are not in ascending order -> Loop over all sections and
- * find the highest PFN to compute the required map size.
- */
- ehea_bmap.valid_sections = 0;
+static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
+ int dir)
+{
+ if(!ehea_top_bmap->dir[dir]) {
+ ehea_top_bmap->dir[dir] =
+ kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
+ if (!ehea_top_bmap->dir[dir])
+ return -ENOMEM;
+ }
+ return 0;
+}
- for (i = 0; i < NR_MEM_SECTIONS; i++)
- if (valid_section_nr(i))
- high_section_index = i;
+static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
+{
+ if(!ehea_bmap->top[top]) {
+ ehea_bmap->top[top] =
+ kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
+ if (!ehea_bmap->top[top])
+ return -ENOMEM;
+ }
+ return ehea_init_top_bmap(ehea_bmap->top[top], dir);
+}
- ehea_bmap.entries = high_section_index + 1;
- ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
+static int ehea_create_busmap_callback(unsigned long pfn,
+ unsigned long nr_pages, void *arg)
+{
+ unsigned long i, mr_len, start_section, end_section;
+ start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
+ end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
+ mr_len = *(unsigned long *)arg;
- if (!ehea_bmap.vaddr)
+ ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
+ if (!ehea_bmap)
return -ENOMEM;
- for (i = 0 ; i < ehea_bmap.entries; i++) {
- unsigned long pfn = section_nr_to_pfn(i);
+ for (i = start_section; i < end_section; i++) {
+ int ret;
+ int top, dir, idx;
+ u64 vaddr;
+
+ top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
+ dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
+
+ ret = ehea_init_bmap(ehea_bmap, top, dir);
+ if(ret)
+ return ret;
- if (pfn_valid(pfn)) {
- ehea_bmap.vaddr[i] = vaddr;
- vaddr += EHEA_SECTSIZE;
- ehea_bmap.valid_sections++;
- } else
- ehea_bmap.vaddr[i] = 0;
+ idx = i & EHEA_INDEX_MASK;
+ vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE;
+
+ ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr;
}
+ mr_len += nr_pages * PAGE_SIZE;
+ *(unsigned long *)arg = mr_len;
+
return 0;
}
+static unsigned long ehea_mr_len;
+
+static DEFINE_MUTEX(ehea_busmap_mutex);
+
+int ehea_create_busmap(void)
+{
+ int ret;
+ mutex_lock(&ehea_busmap_mutex);
+ ehea_mr_len = 0;
+ ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len,
+ ehea_create_busmap_callback);
+ mutex_unlock(&ehea_busmap_mutex);
+ return ret;
+}
+
void ehea_destroy_busmap(void)
{
- vfree(ehea_bmap.vaddr);
+ int top, dir;
+ mutex_lock(&ehea_busmap_mutex);
+ if (!ehea_bmap)
+ goto out_destroy;
+
+ for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+ if (!ehea_bmap->top[top])
+ continue;
+
+ for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+ if (!ehea_bmap->top[top]->dir[dir])
+ continue;
+
+ kfree(ehea_bmap->top[top]->dir[dir]);
+ }
+
+ kfree(ehea_bmap->top[top]);
+ }
+
+ kfree(ehea_bmap);
+ ehea_bmap = NULL;
+out_destroy:
+ mutex_unlock(&ehea_busmap_mutex);
}
u64 ehea_map_vaddr(void *caddr)
{
- u64 mapped_addr;
- unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
-
- if (likely(index < ehea_bmap.entries)) {
- mapped_addr = ehea_bmap.vaddr[index];
- if (likely(mapped_addr))
- mapped_addr |= (((unsigned long)caddr)
- & (EHEA_SECTSIZE - 1));
- else
- mapped_addr = -1;
- } else
- mapped_addr = -1;
-
- if (unlikely(mapped_addr == -1))
- if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
- schedule_work(&ehea_rereg_mr_task);
-
- return mapped_addr;
+ int top, dir, idx;
+ unsigned long index, offset;
+
+ if (!ehea_bmap)
+ return EHEA_INVAL_ADDR;
+
+ index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
+ top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
+ if (!ehea_bmap->top[top])
+ return EHEA_INVAL_ADDR;
+
+ dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
+ if (!ehea_bmap->top[top]->dir[dir])
+ return EHEA_INVAL_ADDR;
+
+ idx = index & EHEA_INDEX_MASK;
+ if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
+ return EHEA_INVAL_ADDR;
+
+ offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
+ return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
+}
+
+static inline void *ehea_calc_sectbase(int top, int dir, int idx)
+{
+ unsigned long ret = idx;
+ ret |= dir << EHEA_DIR_INDEX_SHIFT;
+ ret |= top << EHEA_TOP_INDEX_SHIFT;
+ return abs_to_virt(ret << SECTION_SIZE_BITS);
+}
+
+static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
+ struct ehea_adapter *adapter,
+ struct ehea_mr *mr)
+{
+ void *pg;
+ u64 j, m, hret;
+ unsigned long k = 0;
+ u64 pt_abs = virt_to_abs(pt);
+
+ void *sectbase = ehea_calc_sectbase(top, dir, idx);
+
+ for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
+
+ for (m = 0; m < EHEA_MAX_RPAGE; m++) {
+ pg = sectbase + ((k++) * EHEA_PAGESIZE);
+ pt[m] = virt_to_abs(pg);
+ }
+ hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
+ 0, pt_abs, EHEA_MAX_RPAGE);
+
+ if ((hret != H_SUCCESS)
+ && (hret != H_PAGE_REGISTERED)) {
+ ehea_h_free_resource(adapter->handle, mr->handle,
+ FORCE_FREE);
+ ehea_error("register_rpage_mr failed");
+ return hret;
+ }
+ }
+ return hret;
+}
+
+static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
+ struct ehea_adapter *adapter,
+ struct ehea_mr *mr)
+{
+ u64 hret = H_SUCCESS;
+ int idx;
+
+ for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
+ if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
+ continue;
+
+ hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+ return hret;
+ }
+ return hret;
+}
+
+static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
+ struct ehea_adapter *adapter,
+ struct ehea_mr *mr)
+{
+ u64 hret = H_SUCCESS;
+ int dir;
+
+ for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+ if (!ehea_bmap->top[top]->dir[dir])
+ continue;
+
+ hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+ return hret;
+ }
+ return hret;
}
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
{
int ret;
u64 *pt;
- void *pg;
- u64 hret, pt_abs, i, j, m, mr_len;
+ u64 hret;
u32 acc_ctrl = EHEA_MR_ACC_CTRL;
- mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
+ unsigned long top;
- pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!pt) {
ehea_error("no mem");
ret = -ENOMEM;
goto out;
}
- pt_abs = virt_to_abs(pt);
- hret = ehea_h_alloc_resource_mr(adapter->handle,
- EHEA_BUSMAP_START, mr_len,
- acc_ctrl, adapter->pd,
+ hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
+ ehea_mr_len, acc_ctrl, adapter->pd,
&mr->handle, &mr->lkey);
+
if (hret != H_SUCCESS) {
ehea_error("alloc_resource_mr failed");
ret = -EIO;
goto out;
}
- for (i = 0 ; i < ehea_bmap.entries; i++)
- if (ehea_bmap.vaddr[i]) {
- void *sectbase = __va(i << SECTION_SIZE_BITS);
- unsigned long k = 0;
-
- for (j = 0; j < (EHEA_PAGES_PER_SECTION /
- EHEA_MAX_RPAGE); j++) {
-
- for (m = 0; m < EHEA_MAX_RPAGE; m++) {
- pg = sectbase + ((k++) * EHEA_PAGESIZE);
- pt[m] = virt_to_abs(pg);
- }
-
- hret = ehea_h_register_rpage_mr(adapter->handle,
- mr->handle,
- 0, 0, pt_abs,
- EHEA_MAX_RPAGE);
- if ((hret != H_SUCCESS)
- && (hret != H_PAGE_REGISTERED)) {
- ehea_h_free_resource(adapter->handle,
- mr->handle,
- FORCE_FREE);
- ehea_error("register_rpage_mr failed");
- ret = -EIO;
- goto out;
- }
- }
- }
+ if (!ehea_bmap) {
+ ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+ ehea_error("no busmap available");
+ ret = -EIO;
+ goto out;
+ }
+
+ for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+ if (!ehea_bmap->top[top])
+ continue;
+
+ hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
+ if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
+ break;
+ }
if (hret != H_SUCCESS) {
ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index ba75efc9f5b..f0014cfbb27 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -194,7 +194,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
ret = of_address_to_resource(ofdev->node, 0, &res);
if (ret)
- return ret;
+ goto out_res;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
@@ -236,6 +236,7 @@ out_free_irqs:
kfree(new_bus->irq);
out_unmap_regs:
iounmap(fec->fecp);
+out_res:
out_fec:
kfree(fec);
out_mii:
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 642dc633b44..393a0f17530 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -138,6 +138,7 @@ static int gfar_poll(struct napi_struct *napi, int budget);
static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
+static int gfar_clean_tx_ring(struct net_device *dev);
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
static void gfar_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
@@ -634,6 +635,8 @@ static void free_skb_resources(struct gfar_private *priv)
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
+
+ txbdp++;
}
kfree(priv->tx_skbuff);
@@ -1141,7 +1144,7 @@ static int gfar_close(struct net_device *dev)
}
/* Changes the mac address if the controller is not running. */
-int gfar_set_mac_address(struct net_device *dev)
+static int gfar_set_mac_address(struct net_device *dev)
{
gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
@@ -1260,7 +1263,7 @@ static void gfar_timeout(struct net_device *dev)
}
/* Interrupt Handler for Transmit complete */
-int gfar_clean_tx_ring(struct net_device *dev)
+static int gfar_clean_tx_ring(struct net_device *dev)
{
struct txbd8 *bdp;
struct gfar_private *priv = netdev_priv(dev);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index fd487be3993..27f37c81e52 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -782,5 +782,8 @@ extern void gfar_halt(struct net_device *dev);
extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
int enable, u32 regnum, u32 read);
void gfar_init_sysfs(struct net_device *dev);
+int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
+ int regnum, u16 value);
+int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 230878b9419..5116f68e01b 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -103,10 +103,10 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
spin_lock_irqsave(&priv->rxlock, flags);
if (length > priv->rx_buffer_size)
- return count;
+ goto out;
if (length == priv->rx_stash_size)
- return count;
+ goto out;
priv->rx_stash_size = length;
@@ -125,6 +125,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
gfar_write(&priv->regs->attr, temp);
+out:
spin_unlock_irqrestore(&priv->rxlock, flags);
return count;
@@ -154,10 +155,10 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
spin_lock_irqsave(&priv->rxlock, flags);
if (index > priv->rx_stash_size)
- return count;
+ goto out;
if (index == priv->rx_stash_index)
- return count;
+ goto out;
priv->rx_stash_index = index;
@@ -166,6 +167,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
temp |= ATTRELI_EI(index);
gfar_write(&priv->regs->attreli, flags);
+out:
spin_unlock_irqrestore(&priv->rxlock, flags);
return count;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index a873d2b315c..a7714da7c28 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -100,7 +100,9 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
+#ifdef CONFIG_PNP
static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id);
+#endif
/* These are the known NSC chips */
static nsc_chip_t chips[] = {
@@ -156,9 +158,11 @@ static const struct pnp_device_id nsc_ircc_pnp_table[] = {
MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table);
static struct pnp_driver nsc_ircc_pnp_driver = {
+#ifdef CONFIG_PNP
.name = "nsc-ircc",
.id_table = nsc_ircc_pnp_table,
.probe = nsc_ircc_pnp_probe,
+#endif
};
/* Some prototypes */
@@ -916,6 +920,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
return 0;
}
+#ifdef CONFIG_PNP
/* PNP probing */
static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
{
@@ -952,6 +957,7 @@ static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *i
return 0;
}
+#endif
/*
* Function nsc_ircc_setup (info)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 1f26da761e9..cfe0194fef7 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -376,6 +376,7 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
static int pnp_driver_registered;
+#ifdef CONFIG_PNP
static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
{
@@ -402,7 +403,9 @@ static struct pnp_driver smsc_ircc_pnp_driver = {
.id_table = smsc_ircc_pnp_table,
.probe = smsc_ircc_pnp_probe,
};
-
+#else /* CONFIG_PNP */
+static struct pnp_driver smsc_ircc_pnp_driver;
+#endif
/*******************************************************************************
*
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 2056cfc624d..c36a03ae9bf 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -450,7 +450,7 @@ static void macvlan_dellink(struct net_device *dev)
unregister_netdevice(dev);
if (list_empty(&port->vlans))
- macvlan_port_destroy(dev);
+ macvlan_port_destroy(port->dev);
}
static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index cb46446b269..03a9abcce52 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -551,7 +551,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
u64 mtt_seg;
int err = -ENOMEM;
- if (page_shift < 12 || page_shift >= 32)
+ if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
return -EINVAL;
/* All MTTs must fit in the same page */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 381b36e5f64..b7915cdcc6a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -91,6 +91,11 @@
*/
#define PHY_ADDR_REG 0x0000
#define SMI_REG 0x0004
+#define WINDOW_BASE(i) (0x0200 + ((i) << 3))
+#define WINDOW_SIZE(i) (0x0204 + ((i) << 3))
+#define WINDOW_REMAP_HIGH(i) (0x0280 + ((i) << 2))
+#define WINDOW_BAR_ENABLE 0x0290
+#define WINDOW_PROTECT(i) (0x0294 + ((i) << 4))
/*
* Per-port registers.
@@ -507,9 +512,23 @@ struct mv643xx_mib_counters {
u32 late_collision;
};
+struct mv643xx_shared_private {
+ void __iomem *eth_base;
+
+ /* used to protect SMI_REG, which is shared across ports */
+ spinlock_t phy_lock;
+
+ u32 win_protect;
+
+ unsigned int t_clk;
+};
+
struct mv643xx_private {
+ struct mv643xx_shared_private *shared;
int port_num; /* User Ethernet port number */
+ struct mv643xx_shared_private *shared_smi;
+
u32 rx_sram_addr; /* Base address of rx sram area */
u32 rx_sram_size; /* Size of rx sram area */
u32 tx_sram_addr; /* Base address of tx sram area */
@@ -614,19 +633,14 @@ static const struct ethtool_ops mv643xx_ethtool_ops;
static char mv643xx_driver_name[] = "mv643xx_eth";
static char mv643xx_driver_version[] = "1.0";
-static void __iomem *mv643xx_eth_base;
-
-/* used to protect SMI_REG, which is shared across ports */
-static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
-
static inline u32 rdl(struct mv643xx_private *mp, int offset)
{
- return readl(mv643xx_eth_base + offset);
+ return readl(mp->shared->eth_base + offset);
}
static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
{
- writel(data, mv643xx_eth_base + offset);
+ writel(data, mp->shared->eth_base + offset);
}
/*
@@ -1119,7 +1133,6 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
*
* INPUT:
* struct mv643xx_private *mp Ethernet port
- * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
* unsigned int delay Delay in usec
*
* OUTPUT:
@@ -1130,10 +1143,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
*
*/
static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
- unsigned int t_clk, unsigned int delay)
+ unsigned int delay)
{
unsigned int port_num = mp->port_num;
- unsigned int coal = ((t_clk / 1000000) * delay) / 64;
+ unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
/* Set RX Coalescing mechanism */
wrl(mp, SDMA_CONFIG_REG(port_num),
@@ -1158,7 +1171,6 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
*
* INPUT:
* struct mv643xx_private *mp Ethernet port
- * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
* unsigned int delay Delay in uSeconds
*
* OUTPUT:
@@ -1169,9 +1181,9 @@ static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
*
*/
static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
- unsigned int t_clk, unsigned int delay)
+ unsigned int delay)
{
- unsigned int coal = ((t_clk / 1000000) * delay) / 64;
+ unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
/* Set TX Coalescing mechanism */
wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
@@ -1413,11 +1425,11 @@ static int mv643xx_eth_open(struct net_device *dev)
#ifdef MV643XX_COAL
mp->rx_int_coal =
- eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL);
+ eth_port_set_rx_coal(mp, MV643XX_RX_COAL);
#endif
mp->tx_int_coal =
- eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL);
+ eth_port_set_tx_coal(mp, MV643XX_TX_COAL);
/* Unmask phy and link status changes interrupts */
wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
@@ -1827,6 +1839,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (pd->shared == NULL) {
+ printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n");
+ return -ENODEV;
+ }
+
dev = alloc_etherdev(sizeof(struct mv643xx_private));
if (!dev)
return -ENOMEM;
@@ -1877,8 +1894,16 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
spin_lock_init(&mp->lock);
+ mp->shared = platform_get_drvdata(pd->shared);
port_num = mp->port_num = pd->port_number;
+ if (mp->shared->win_protect)
+ wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect);
+
+ mp->shared_smi = mp->shared;
+ if (pd->shared_smi != NULL)
+ mp->shared_smi = platform_get_drvdata(pd->shared_smi);
+
/* set default config values */
eth_port_uc_addr_get(mp, dev->dev_addr);
mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
@@ -1983,30 +2008,91 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
return 0;
}
+static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp,
+ struct mbus_dram_target_info *dram)
+{
+ void __iomem *base = msp->eth_base;
+ u32 win_enable;
+ u32 win_protect;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ writel(0, base + WINDOW_BASE(i));
+ writel(0, base + WINDOW_SIZE(i));
+ if (i < 4)
+ writel(0, base + WINDOW_REMAP_HIGH(i));
+ }
+
+ win_enable = 0x3f;
+ win_protect = 0;
+
+ for (i = 0; i < dram->num_cs; i++) {
+ struct mbus_dram_window *cs = dram->cs + i;
+
+ writel((cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ dram->mbus_dram_target_id, base + WINDOW_BASE(i));
+ writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
+
+ win_enable &= ~(1 << i);
+ win_protect |= 3 << (2 * i);
+ }
+
+ writel(win_enable, base + WINDOW_BAR_ENABLE);
+ msp->win_protect = win_protect;
+}
+
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
{
static int mv643xx_version_printed = 0;
+ struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
+ struct mv643xx_shared_private *msp;
struct resource *res;
+ int ret;
if (!mv643xx_version_printed++)
printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
+ ret = -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL)
- return -ENODEV;
+ goto out;
- mv643xx_eth_base = ioremap(res->start, res->end - res->start + 1);
- if (mv643xx_eth_base == NULL)
- return -ENOMEM;
+ ret = -ENOMEM;
+ msp = kmalloc(sizeof(*msp), GFP_KERNEL);
+ if (msp == NULL)
+ goto out;
+ memset(msp, 0, sizeof(*msp));
+
+ msp->eth_base = ioremap(res->start, res->end - res->start + 1);
+ if (msp->eth_base == NULL)
+ goto out_free;
+
+ spin_lock_init(&msp->phy_lock);
+ msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
+
+ platform_set_drvdata(pdev, msp);
+
+ /*
+ * (Re-)program MBUS remapping windows if we are asked to.
+ */
+ if (pd != NULL && pd->dram != NULL)
+ mv643xx_eth_conf_mbus_windows(msp, pd->dram);
return 0;
+out_free:
+ kfree(msp);
+out:
+ return ret;
}
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
- iounmap(mv643xx_eth_base);
- mv643xx_eth_base = NULL;
+ struct mv643xx_shared_private *msp = platform_get_drvdata(pdev);
+
+ iounmap(msp->eth_base);
+ kfree(msp);
return 0;
}
@@ -2906,15 +2992,16 @@ static void eth_port_reset(struct mv643xx_private *mp)
static void eth_port_read_smi_reg(struct mv643xx_private *mp,
unsigned int phy_reg, unsigned int *value)
{
+ void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG;
int phy_addr = ethernet_phy_get(mp);
unsigned long flags;
int i;
/* the SMI register is a shared resource */
- spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
+ spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
/* wait for the SMI register to become available */
- for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
+ for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("%s: PHY busy timeout\n", mp->dev->name);
goto out;
@@ -2922,11 +3009,11 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
udelay(PHY_WAIT_MICRO_SECONDS);
}
- wrl(mp, SMI_REG,
- (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
+ writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ,
+ smi_reg);
/* now wait for the data to be valid */
- for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) {
+ for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("%s: PHY read timeout\n", mp->dev->name);
goto out;
@@ -2934,9 +3021,9 @@ static void eth_port_read_smi_reg(struct mv643xx_private *mp,
udelay(PHY_WAIT_MICRO_SECONDS);
}
- *value = rdl(mp, SMI_REG) & 0xffff;
+ *value = readl(smi_reg) & 0xffff;
out:
- spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
+ spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
/*
@@ -2962,17 +3049,16 @@ out:
static void eth_port_write_smi_reg(struct mv643xx_private *mp,
unsigned int phy_reg, unsigned int value)
{
- int phy_addr;
- int i;
+ void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG;
+ int phy_addr = ethernet_phy_get(mp);
unsigned long flags;
-
- phy_addr = ethernet_phy_get(mp);
+ int i;
/* the SMI register is a shared resource */
- spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
+ spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
/* wait for the SMI register to become available */
- for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
+ for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) {
if (i == PHY_WAIT_ITERATIONS) {
printk("%s: PHY busy timeout\n", mp->dev->name);
goto out;
@@ -2980,10 +3066,10 @@ static void eth_port_write_smi_reg(struct mv643xx_private *mp,
udelay(PHY_WAIT_MICRO_SECONDS);
}
- wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
- ETH_SMI_OPCODE_WRITE | (value & 0xffff));
+ writel((phy_addr << 16) | (phy_reg << 21) |
+ ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg);
out:
- spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
+ spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
/*
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ef63c8d2bd7..c91b12ea26a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -144,11 +144,13 @@ struct myri10ge_tx_buf {
char *req_bytes;
struct myri10ge_tx_buffer_state *info;
int mask; /* number of transmit slots -1 */
- int boundary; /* boundary transmits cannot cross */
int req ____cacheline_aligned; /* transmit slots submitted */
int pkt_start; /* packets started */
+ int stop_queue;
+ int linearized;
int done ____cacheline_aligned; /* transmit slots completed */
int pkt_done; /* packets completed */
+ int wake_queue;
};
struct myri10ge_rx_done {
@@ -160,29 +162,50 @@ struct myri10ge_rx_done {
struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
};
-struct myri10ge_priv {
- int running; /* running? */
- int csum_flag; /* rx_csums? */
+struct myri10ge_slice_netstats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+};
+
+struct myri10ge_slice_state {
struct myri10ge_tx_buf tx; /* transmit ring */
struct myri10ge_rx_buf rx_small;
struct myri10ge_rx_buf rx_big;
struct myri10ge_rx_done rx_done;
+ struct net_device *dev;
+ struct napi_struct napi;
+ struct myri10ge_priv *mgp;
+ struct myri10ge_slice_netstats stats;
+ __be32 __iomem *irq_claim;
+ struct mcp_irq_data *fw_stats;
+ dma_addr_t fw_stats_bus;
+ int watchdog_tx_done;
+ int watchdog_tx_req;
+};
+
+struct myri10ge_priv {
+ struct myri10ge_slice_state ss;
+ int tx_boundary; /* boundary transmits cannot cross */
+ int running; /* running? */
+ int csum_flag; /* rx_csums? */
int small_bytes;
int big_bytes;
+ int max_intr_slots;
struct net_device *dev;
- struct napi_struct napi;
struct net_device_stats stats;
+ spinlock_t stats_lock;
u8 __iomem *sram;
int sram_size;
unsigned long board_span;
unsigned long iomem_base;
- __be32 __iomem *irq_claim;
__be32 __iomem *irq_deassert;
char *mac_addr_string;
struct mcp_cmd_response *cmd;
dma_addr_t cmd_bus;
- struct mcp_irq_data *fw_stats;
- dma_addr_t fw_stats_bus;
struct pci_dev *pdev;
int msi_enabled;
u32 link_state;
@@ -191,20 +214,16 @@ struct myri10ge_priv {
__be32 __iomem *intr_coal_delay_ptr;
int mtrr;
int wc_enabled;
- int wake_queue;
- int stop_queue;
int down_cnt;
wait_queue_head_t down_wq;
struct work_struct watchdog_work;
struct timer_list watchdog_timer;
- int watchdog_tx_done;
- int watchdog_tx_req;
- int watchdog_pause;
int watchdog_resets;
- int tx_linearized;
+ int watchdog_pause;
int pause;
char *fw_name;
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
+ char *product_code_string;
char fw_version[128];
int fw_ver_major;
int fw_ver_minor;
@@ -228,58 +247,54 @@ static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n");
+MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
static int myri10ge_ecrc_enable = 1;
module_param(myri10ge_ecrc_enable, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n");
-
-static int myri10ge_max_intr_slots = 1024;
-module_param(myri10ge_max_intr_slots, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n");
+MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
static int myri10ge_small_bytes = -1; /* -1 == auto */
module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n");
+MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
static int myri10ge_msi = 1; /* enable msi by default */
module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n");
+MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
static int myri10ge_intr_coal_delay = 75;
module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n");
+MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
static int myri10ge_flow_control = 1;
module_param(myri10ge_flow_control, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n");
+MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
static int myri10ge_deassert_wait = 1;
module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_deassert_wait,
- "Wait when deasserting legacy interrupts\n");
+ "Wait when deasserting legacy interrupts");
static int myri10ge_force_firmware = 0;
module_param(myri10ge_force_firmware, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_force_firmware,
- "Force firmware to assume aligned completions\n");
+ "Force firmware to assume aligned completions");
static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
module_param(myri10ge_initial_mtu, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n");
+MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
static int myri10ge_napi_weight = 64;
module_param(myri10ge_napi_weight, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n");
+MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
static int myri10ge_watchdog_timeout = 1;
module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n");
+MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
static int myri10ge_max_irq_loops = 1048576;
module_param(myri10ge_max_irq_loops, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_irq_loops,
- "Set stuck legacy IRQ detection threshold\n");
+ "Set stuck legacy IRQ detection threshold");
#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
@@ -289,21 +304,22 @@ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
static int myri10ge_lro = 1;
module_param(myri10ge_lro, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n");
+MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload");
static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n");
+MODULE_PARM_DESC(myri10ge_lro_max_pkts,
+ "Number of LRO packets to be aggregated");
static int myri10ge_fill_thresh = 256;
module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n");
+MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
static int myri10ge_reset_recover = 1;
static int myri10ge_wcfifo = 0;
module_param(myri10ge_wcfifo, int, S_IRUGO);
-MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n");
+MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled");
#define MYRI10GE_FW_OFFSET 1024*1024
#define MYRI10GE_HIGHPART_TO_U32(X) \
@@ -359,8 +375,10 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
for (sleep_total = 0;
sleep_total < 1000
&& response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
- sleep_total += 10)
+ sleep_total += 10) {
udelay(10);
+ mb();
+ }
} else {
/* use msleep for most command */
for (sleep_total = 0;
@@ -420,6 +438,10 @@ static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
ptr += 1;
}
}
+ if (memcmp(ptr, "PC=", 3) == 0) {
+ ptr += 3;
+ mgp->product_code_string = ptr;
+ }
if (memcmp((const void *)ptr, "SN=", 3) == 0) {
ptr += 3;
mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
@@ -442,7 +464,7 @@ abort:
static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
{
char __iomem *submit;
- __be32 buf[16];
+ __be32 buf[16] __attribute__ ((__aligned__(8)));
u32 dma_low, dma_high;
int i;
@@ -609,13 +631,38 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
return status;
}
+int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
+{
+ struct myri10ge_cmd cmd;
+ int status;
+
+ /* probe for IPv6 TSO support */
+ mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
+ &cmd, 0);
+ if (status == 0) {
+ mgp->max_tso6 = cmd.data0;
+ mgp->features |= NETIF_F_TSO6;
+ }
+
+ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
+ if (status != 0) {
+ dev_err(&mgp->pdev->dev,
+ "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
+ return -ENXIO;
+ }
+
+ mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
+
+ return 0;
+}
+
static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
{
char __iomem *submit;
- __be32 buf[16];
+ __be32 buf[16] __attribute__ ((__aligned__(8)));
u32 dma_low, dma_high, size;
int status, i;
- struct myri10ge_cmd cmd;
size = 0;
status = myri10ge_load_hotplug_firmware(mgp, &size);
@@ -635,7 +682,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
}
dev_info(&mgp->pdev->dev,
"Successfully adopted running firmware\n");
- if (mgp->tx.boundary == 4096) {
+ if (mgp->tx_boundary == 4096) {
dev_warn(&mgp->pdev->dev,
"Using firmware currently running on NIC"
". For optimal\n");
@@ -646,7 +693,9 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
}
mgp->fw_name = "adopted";
- mgp->tx.boundary = 2048;
+ mgp->tx_boundary = 2048;
+ myri10ge_dummy_rdma(mgp, 1);
+ status = myri10ge_get_firmware_capabilities(mgp);
return status;
}
@@ -681,26 +730,18 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
msleep(1);
mb();
i = 0;
- while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) {
- msleep(1);
+ while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
+ msleep(1 << i);
i++;
}
if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
dev_err(&mgp->pdev->dev, "handoff failed\n");
return -ENXIO;
}
- dev_info(&mgp->pdev->dev, "handoff confirmed\n");
myri10ge_dummy_rdma(mgp, 1);
+ status = myri10ge_get_firmware_capabilities(mgp);
- /* probe for IPv6 TSO support */
- mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
- status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
- &cmd, 0);
- if (status == 0) {
- mgp->max_tso6 = cmd.data0;
- mgp->features |= NETIF_F_TSO6;
- }
- return 0;
+ return status;
}
static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
@@ -772,7 +813,7 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
* transfers took to complete.
*/
- len = mgp->tx.boundary;
+ len = mgp->tx_boundary;
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
@@ -834,17 +875,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
/* Now exchange information about interrupts */
- bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
- memset(mgp->rx_done.entry, 0, bytes);
+ bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
+ memset(mgp->ss.rx_done.entry, 0, bytes);
cmd.data0 = (u32) bytes;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
- cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
- cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus);
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0);
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
- mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0);
+ mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0);
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
&cmd, 0);
mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
@@ -858,17 +899,17 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
}
put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
- memset(mgp->rx_done.entry, 0, bytes);
+ memset(mgp->ss.rx_done.entry, 0, bytes);
/* reset mcp/driver shared state back to 0 */
- mgp->tx.req = 0;
- mgp->tx.done = 0;
- mgp->tx.pkt_start = 0;
- mgp->tx.pkt_done = 0;
- mgp->rx_big.cnt = 0;
- mgp->rx_small.cnt = 0;
- mgp->rx_done.idx = 0;
- mgp->rx_done.cnt = 0;
+ mgp->ss.tx.req = 0;
+ mgp->ss.tx.done = 0;
+ mgp->ss.tx.pkt_start = 0;
+ mgp->ss.tx.pkt_done = 0;
+ mgp->ss.rx_big.cnt = 0;
+ mgp->ss.rx_small.cnt = 0;
+ mgp->ss.rx_done.idx = 0;
+ mgp->ss.rx_done.cnt = 0;
mgp->link_changes = 0;
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
myri10ge_change_pause(mgp, mgp->pause);
@@ -1020,9 +1061,10 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
* page into an skb */
static inline int
-myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
int bytes, int len, __wsum csum)
{
+ struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
int i, idx, hlen, remainder;
@@ -1052,11 +1094,10 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
rx_frags[0].page_offset += MXGEFW_PAD;
rx_frags[0].size -= MXGEFW_PAD;
len -= MXGEFW_PAD;
- lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags,
+ lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
len, len,
- /* opaque, will come back in get_frag_header */
- (void *)(__force unsigned long)csum,
- csum);
+ /* opaque, will come back in get_frag_header */
+ (void *)(__force unsigned long)csum, csum);
return 1;
}
@@ -1096,10 +1137,11 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
return 1;
}
-static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
+static inline void
+myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
{
- struct pci_dev *pdev = mgp->pdev;
- struct myri10ge_tx_buf *tx = &mgp->tx;
+ struct pci_dev *pdev = ss->mgp->pdev;
+ struct myri10ge_tx_buf *tx = &ss->tx;
struct sk_buff *skb;
int idx, len;
@@ -1117,8 +1159,8 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
len = pci_unmap_len(&tx->info[idx], len);
pci_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
- mgp->stats.tx_bytes += skb->len;
- mgp->stats.tx_packets++;
+ ss->stats.tx_bytes += skb->len;
+ ss->stats.tx_packets++;
dev_kfree_skb_irq(skb);
if (len)
pci_unmap_single(pdev,
@@ -1134,16 +1176,18 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
}
}
/* start the queue if we've stopped it */
- if (netif_queue_stopped(mgp->dev)
+ if (netif_queue_stopped(ss->dev)
&& tx->req - tx->done < (tx->mask >> 1)) {
- mgp->wake_queue++;
- netif_wake_queue(mgp->dev);
+ tx->wake_queue++;
+ netif_wake_queue(ss->dev);
}
}
-static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget)
+static inline int
+myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
{
- struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+ struct myri10ge_rx_done *rx_done = &ss->rx_done;
+ struct myri10ge_priv *mgp = ss->mgp;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
@@ -1159,40 +1203,40 @@ static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget)
rx_done->entry[idx].length = 0;
checksum = csum_unfold(rx_done->entry[idx].checksum);
if (length <= mgp->small_bytes)
- rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small,
+ rx_ok = myri10ge_rx_done(ss, &ss->rx_small,
mgp->small_bytes,
length, checksum);
else
- rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big,
+ rx_ok = myri10ge_rx_done(ss, &ss->rx_big,
mgp->big_bytes,
length, checksum);
rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length;
cnt++;
- idx = cnt & (myri10ge_max_intr_slots - 1);
+ idx = cnt & (mgp->max_intr_slots - 1);
work_done++;
}
rx_done->idx = idx;
rx_done->cnt = cnt;
- mgp->stats.rx_packets += rx_packets;
- mgp->stats.rx_bytes += rx_bytes;
+ ss->stats.rx_packets += rx_packets;
+ ss->stats.rx_bytes += rx_bytes;
if (myri10ge_lro)
lro_flush_all(&rx_done->lro_mgr);
/* restock receive rings if needed */
- if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh)
- myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
+ if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
- if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh)
- myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
+ if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
return work_done;
}
static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
{
- struct mcp_irq_data *stats = mgp->fw_stats;
+ struct mcp_irq_data *stats = mgp->ss.fw_stats;
if (unlikely(stats->stats_updated)) {
unsigned link_up = ntohl(stats->link_up);
@@ -1219,9 +1263,9 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
}
}
if (mgp->rdma_tags_available !=
- ntohl(mgp->fw_stats->rdma_tags_available)) {
+ ntohl(stats->rdma_tags_available)) {
mgp->rdma_tags_available =
- ntohl(mgp->fw_stats->rdma_tags_available);
+ ntohl(stats->rdma_tags_available);
printk(KERN_WARNING "myri10ge: %s: RDMA timed out! "
"%d tags left\n", mgp->dev->name,
mgp->rdma_tags_available);
@@ -1234,26 +1278,27 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
static int myri10ge_poll(struct napi_struct *napi, int budget)
{
- struct myri10ge_priv *mgp =
- container_of(napi, struct myri10ge_priv, napi);
- struct net_device *netdev = mgp->dev;
+ struct myri10ge_slice_state *ss =
+ container_of(napi, struct myri10ge_slice_state, napi);
+ struct net_device *netdev = ss->mgp->dev;
int work_done;
/* process as many rx events as NAPI will allow */
- work_done = myri10ge_clean_rx_done(mgp, budget);
+ work_done = myri10ge_clean_rx_done(ss, budget);
if (work_done < budget) {
netif_rx_complete(netdev, napi);
- put_be32(htonl(3), mgp->irq_claim);
+ put_be32(htonl(3), ss->irq_claim);
}
return work_done;
}
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
- struct myri10ge_priv *mgp = arg;
- struct mcp_irq_data *stats = mgp->fw_stats;
- struct myri10ge_tx_buf *tx = &mgp->tx;
+ struct myri10ge_slice_state *ss = arg;
+ struct myri10ge_priv *mgp = ss->mgp;
+ struct mcp_irq_data *stats = ss->fw_stats;
+ struct myri10ge_tx_buf *tx = &ss->tx;
u32 send_done_count;
int i;
@@ -1264,7 +1309,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
/* low bit indicates receives are present, so schedule
* napi poll handler */
if (stats->valid & 1)
- netif_rx_schedule(mgp->dev, &mgp->napi);
+ netif_rx_schedule(ss->dev, &ss->napi);
if (!mgp->msi_enabled) {
put_be32(0, mgp->irq_deassert);
@@ -1281,7 +1326,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
/* check for transmit completes and receives */
send_done_count = ntohl(stats->send_done_count);
if (send_done_count != tx->pkt_done)
- myri10ge_tx_done(mgp, (int)send_done_count);
+ myri10ge_tx_done(ss, (int)send_done_count);
if (unlikely(i > myri10ge_max_irq_loops)) {
printk(KERN_WARNING "myri10ge: %s: irq stuck?\n",
mgp->dev->name);
@@ -1296,16 +1341,46 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
myri10ge_check_statblock(mgp);
- put_be32(htonl(3), mgp->irq_claim + 1);
+ put_be32(htonl(3), ss->irq_claim + 1);
return (IRQ_HANDLED);
}
static int
myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ char *ptr;
+ int i;
+
cmd->autoneg = AUTONEG_DISABLE;
cmd->speed = SPEED_10000;
cmd->duplex = DUPLEX_FULL;
+
+ /*
+ * parse the product code to deterimine the interface type
+ * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
+ * after the 3rd dash in the driver's cached copy of the
+ * EEPROM's product code string.
+ */
+ ptr = mgp->product_code_string;
+ if (ptr == NULL) {
+ printk(KERN_ERR "myri10ge: %s: Missing product code\n",
+ netdev->name);
+ return 0;
+ }
+ for (i = 0; i < 3; i++, ptr++) {
+ ptr = strchr(ptr, '-');
+ if (ptr == NULL) {
+ printk(KERN_ERR "myri10ge: %s: Invalid product "
+ "code %s\n", netdev->name,
+ mgp->product_code_string);
+ return 0;
+ }
+ }
+ if (*ptr == 'R' || *ptr == 'Q') {
+ /* We've found either an XFP or quad ribbon fiber */
+ cmd->port = PORT_FIBRE;
+ }
return 0;
}
@@ -1324,6 +1399,7 @@ static int
myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
+
coal->rx_coalesce_usecs = mgp->intr_coal_delay;
return 0;
}
@@ -1370,10 +1446,10 @@ myri10ge_get_ringparam(struct net_device *netdev,
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
- ring->rx_mini_max_pending = mgp->rx_small.mask + 1;
- ring->rx_max_pending = mgp->rx_big.mask + 1;
+ ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1;
+ ring->rx_max_pending = mgp->ss.rx_big.mask + 1;
ring->rx_jumbo_max_pending = 0;
- ring->tx_max_pending = mgp->rx_small.mask + 1;
+ ring->tx_max_pending = mgp->ss.rx_small.mask + 1;
ring->rx_mini_pending = ring->rx_mini_max_pending;
ring->rx_pending = ring->rx_max_pending;
ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
@@ -1383,6 +1459,7 @@ myri10ge_get_ringparam(struct net_device *netdev,
static u32 myri10ge_get_rx_csum(struct net_device *netdev)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
+
if (mgp->csum_flag)
return 1;
else
@@ -1392,6 +1469,7 @@ static u32 myri10ge_get_rx_csum(struct net_device *netdev)
static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
+
if (csum_enabled)
mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
else
@@ -1411,7 +1489,7 @@ static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
return 0;
}
-static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
+static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
"rx_length_errors", "rx_over_errors", "rx_crc_errors",
@@ -1421,28 +1499,39 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
/* device-specific stats */
"tx_boundary", "WC", "irq", "MSI",
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
- "serial_number", "tx_pkt_start", "tx_pkt_done",
- "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
- "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
+ "serial_number", "watchdog_resets",
"link_changes", "link_up", "dropped_link_overflow",
"dropped_link_error_or_filtered",
"dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
"dropped_unicast_filtered", "dropped_multicast_filtered",
"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
- "dropped_no_big_buffer", "LRO aggregated", "LRO flushed",
+ "dropped_no_big_buffer"
+};
+
+static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
+ "----------- slice ---------",
+ "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
+ "rx_small_cnt", "rx_big_cnt",
+ "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated",
+ "LRO flushed",
"LRO avg aggr", "LRO no_desc"
};
#define MYRI10GE_NET_STATS_LEN 21
-#define MYRI10GE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_stats)
+#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
+#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
static void
myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
{
switch (stringset) {
case ETH_SS_STATS:
- memcpy(data, *myri10ge_gstrings_stats,
- sizeof(myri10ge_gstrings_stats));
+ memcpy(data, *myri10ge_gstrings_main_stats,
+ sizeof(myri10ge_gstrings_main_stats));
+ data += sizeof(myri10ge_gstrings_main_stats);
+ memcpy(data, *myri10ge_gstrings_slice_stats,
+ sizeof(myri10ge_gstrings_slice_stats));
+ data += sizeof(myri10ge_gstrings_slice_stats);
break;
}
}
@@ -1451,7 +1540,7 @@ static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return MYRI10GE_STATS_LEN;
+ return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1462,12 +1551,13 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 * data)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
+ struct myri10ge_slice_state *ss;
int i;
for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
data[i] = ((unsigned long *)&mgp->stats)[i];
- data[i++] = (unsigned int)mgp->tx.boundary;
+ data[i++] = (unsigned int)mgp->tx_boundary;
data[i++] = (unsigned int)mgp->wc_enabled;
data[i++] = (unsigned int)mgp->pdev->irq;
data[i++] = (unsigned int)mgp->msi_enabled;
@@ -1475,40 +1565,44 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i++] = (unsigned int)mgp->write_dma;
data[i++] = (unsigned int)mgp->read_write_dma;
data[i++] = (unsigned int)mgp->serial_number;
- data[i++] = (unsigned int)mgp->tx.pkt_start;
- data[i++] = (unsigned int)mgp->tx.pkt_done;
- data[i++] = (unsigned int)mgp->tx.req;
- data[i++] = (unsigned int)mgp->tx.done;
- data[i++] = (unsigned int)mgp->rx_small.cnt;
- data[i++] = (unsigned int)mgp->rx_big.cnt;
- data[i++] = (unsigned int)mgp->wake_queue;
- data[i++] = (unsigned int)mgp->stop_queue;
data[i++] = (unsigned int)mgp->watchdog_resets;
- data[i++] = (unsigned int)mgp->tx_linearized;
data[i++] = (unsigned int)mgp->link_changes;
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
- data[i++] =
- (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause);
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy);
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32);
+
+ /* firmware stats are useful only in the first slice */
+ ss = &mgp->ss;
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
data[i++] =
- (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered);
+ (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
data[i++] =
- (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered);
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
- data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
- data[i++] = mgp->rx_done.lro_mgr.stats.aggregated;
- data[i++] = mgp->rx_done.lro_mgr.stats.flushed;
- if (mgp->rx_done.lro_mgr.stats.flushed)
- data[i++] = mgp->rx_done.lro_mgr.stats.aggregated /
- mgp->rx_done.lro_mgr.stats.flushed;
+ (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
+ data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
+
+ data[i++] = 0;
+ data[i++] = (unsigned int)ss->tx.pkt_start;
+ data[i++] = (unsigned int)ss->tx.pkt_done;
+ data[i++] = (unsigned int)ss->tx.req;
+ data[i++] = (unsigned int)ss->tx.done;
+ data[i++] = (unsigned int)ss->rx_small.cnt;
+ data[i++] = (unsigned int)ss->rx_big.cnt;
+ data[i++] = (unsigned int)ss->tx.wake_queue;
+ data[i++] = (unsigned int)ss->tx.stop_queue;
+ data[i++] = (unsigned int)ss->tx.linearized;
+ data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
+ data[i++] = ss->rx_done.lro_mgr.stats.flushed;
+ if (ss->rx_done.lro_mgr.stats.flushed)
+ data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
+ ss->rx_done.lro_mgr.stats.flushed;
else
data[i++] = 0;
- data[i++] = mgp->rx_done.lro_mgr.stats.no_desc;
+ data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
}
static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
@@ -1544,19 +1638,17 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_msglevel = myri10ge_get_msglevel
};
-static int myri10ge_allocate_rings(struct net_device *dev)
+static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
{
- struct myri10ge_priv *mgp;
+ struct myri10ge_priv *mgp = ss->mgp;
struct myri10ge_cmd cmd;
+ struct net_device *dev = mgp->dev;
int tx_ring_size, rx_ring_size;
int tx_ring_entries, rx_ring_entries;
int i, status;
size_t bytes;
- mgp = netdev_priv(dev);
-
/* get ring sizes */
-
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
tx_ring_size = cmd.data0;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
@@ -1566,144 +1658,142 @@ static int myri10ge_allocate_rings(struct net_device *dev)
tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
- mgp->tx.mask = tx_ring_entries - 1;
- mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1;
+ ss->tx.mask = tx_ring_entries - 1;
+ ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
status = -ENOMEM;
/* allocate the host shadow rings */
bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
- * sizeof(*mgp->tx.req_list);
- mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
- if (mgp->tx.req_bytes == NULL)
+ * sizeof(*ss->tx.req_list);
+ ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
+ if (ss->tx.req_bytes == NULL)
goto abort_with_nothing;
/* ensure req_list entries are aligned to 8 bytes */
- mgp->tx.req_list = (struct mcp_kreq_ether_send *)
- ALIGN((unsigned long)mgp->tx.req_bytes, 8);
+ ss->tx.req_list = (struct mcp_kreq_ether_send *)
+ ALIGN((unsigned long)ss->tx.req_bytes, 8);
- bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow);
- mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
- if (mgp->rx_small.shadow == NULL)
+ bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
+ ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
+ if (ss->rx_small.shadow == NULL)
goto abort_with_tx_req_bytes;
- bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow);
- mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
- if (mgp->rx_big.shadow == NULL)
+ bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
+ ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
+ if (ss->rx_big.shadow == NULL)
goto abort_with_rx_small_shadow;
/* allocate the host info rings */
- bytes = tx_ring_entries * sizeof(*mgp->tx.info);
- mgp->tx.info = kzalloc(bytes, GFP_KERNEL);
- if (mgp->tx.info == NULL)
+ bytes = tx_ring_entries * sizeof(*ss->tx.info);
+ ss->tx.info = kzalloc(bytes, GFP_KERNEL);
+ if (ss->tx.info == NULL)
goto abort_with_rx_big_shadow;
- bytes = rx_ring_entries * sizeof(*mgp->rx_small.info);
- mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL);
- if (mgp->rx_small.info == NULL)
+ bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
+ ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
+ if (ss->rx_small.info == NULL)
goto abort_with_tx_info;
- bytes = rx_ring_entries * sizeof(*mgp->rx_big.info);
- mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL);
- if (mgp->rx_big.info == NULL)
+ bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
+ ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
+ if (ss->rx_big.info == NULL)
goto abort_with_rx_small_info;
/* Fill the receive rings */
- mgp->rx_big.cnt = 0;
- mgp->rx_small.cnt = 0;
- mgp->rx_big.fill_cnt = 0;
- mgp->rx_small.fill_cnt = 0;
- mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
- mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
- mgp->rx_small.watchdog_needed = 0;
- mgp->rx_big.watchdog_needed = 0;
- myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
+ ss->rx_big.cnt = 0;
+ ss->rx_small.cnt = 0;
+ ss->rx_big.fill_cnt = 0;
+ ss->rx_small.fill_cnt = 0;
+ ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
+ ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
+ ss->rx_small.watchdog_needed = 0;
+ ss->rx_big.watchdog_needed = 0;
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
- if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) {
+ if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n",
- dev->name, mgp->rx_small.fill_cnt);
+ dev->name, ss->rx_small.fill_cnt);
goto abort_with_rx_small_ring;
}
- myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);
- if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) {
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
+ if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n",
- dev->name, mgp->rx_big.fill_cnt);
+ dev->name, ss->rx_big.fill_cnt);
goto abort_with_rx_big_ring;
}
return 0;
abort_with_rx_big_ring:
- for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
- int idx = i & mgp->rx_big.mask;
- myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
+ for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
+ int idx = i & ss->rx_big.mask;
+ myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
mgp->big_bytes);
- put_page(mgp->rx_big.info[idx].page);
+ put_page(ss->rx_big.info[idx].page);
}
abort_with_rx_small_ring:
- for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
- int idx = i & mgp->rx_small.mask;
- myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
+ for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
+ int idx = i & ss->rx_small.mask;
+ myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
- put_page(mgp->rx_small.info[idx].page);
+ put_page(ss->rx_small.info[idx].page);
}
- kfree(mgp->rx_big.info);
+ kfree(ss->rx_big.info);
abort_with_rx_small_info:
- kfree(mgp->rx_small.info);
+ kfree(ss->rx_small.info);
abort_with_tx_info:
- kfree(mgp->tx.info);
+ kfree(ss->tx.info);
abort_with_rx_big_shadow:
- kfree(mgp->rx_big.shadow);
+ kfree(ss->rx_big.shadow);
abort_with_rx_small_shadow:
- kfree(mgp->rx_small.shadow);
+ kfree(ss->rx_small.shadow);
abort_with_tx_req_bytes:
- kfree(mgp->tx.req_bytes);
- mgp->tx.req_bytes = NULL;
- mgp->tx.req_list = NULL;
+ kfree(ss->tx.req_bytes);
+ ss->tx.req_bytes = NULL;
+ ss->tx.req_list = NULL;
abort_with_nothing:
return status;
}
-static void myri10ge_free_rings(struct net_device *dev)
+static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
{
- struct myri10ge_priv *mgp;
+ struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct myri10ge_tx_buf *tx;
int i, len, idx;
- mgp = netdev_priv(dev);
-
- for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {
- idx = i & mgp->rx_big.mask;
- if (i == mgp->rx_big.fill_cnt - 1)
- mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
- myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],
+ for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
+ idx = i & ss->rx_big.mask;
+ if (i == ss->rx_big.fill_cnt - 1)
+ ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
+ myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
mgp->big_bytes);
- put_page(mgp->rx_big.info[idx].page);
+ put_page(ss->rx_big.info[idx].page);
}
- for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {
- idx = i & mgp->rx_small.mask;
- if (i == mgp->rx_small.fill_cnt - 1)
- mgp->rx_small.info[idx].page_offset =
+ for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
+ idx = i & ss->rx_small.mask;
+ if (i == ss->rx_small.fill_cnt - 1)
+ ss->rx_small.info[idx].page_offset =
MYRI10GE_ALLOC_SIZE;
- myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],
+ myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
- put_page(mgp->rx_small.info[idx].page);
+ put_page(ss->rx_small.info[idx].page);
}
- tx = &mgp->tx;
+ tx = &ss->tx;
while (tx->done != tx->req) {
idx = tx->done & tx->mask;
skb = tx->info[idx].skb;
@@ -1714,7 +1804,7 @@ static void myri10ge_free_rings(struct net_device *dev)
len = pci_unmap_len(&tx->info[idx], len);
pci_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
- mgp->stats.tx_dropped++;
+ ss->stats.tx_dropped++;
dev_kfree_skb_any(skb);
if (len)
pci_unmap_single(mgp->pdev,
@@ -1729,19 +1819,19 @@ static void myri10ge_free_rings(struct net_device *dev)
PCI_DMA_TODEVICE);
}
}
- kfree(mgp->rx_big.info);
+ kfree(ss->rx_big.info);
- kfree(mgp->rx_small.info);
+ kfree(ss->rx_small.info);
- kfree(mgp->tx.info);
+ kfree(ss->tx.info);
- kfree(mgp->rx_big.shadow);
+ kfree(ss->rx_big.shadow);
- kfree(mgp->rx_small.shadow);
+ kfree(ss->rx_small.shadow);
- kfree(mgp->tx.req_bytes);
- mgp->tx.req_bytes = NULL;
- mgp->tx.req_list = NULL;
+ kfree(ss->tx.req_bytes);
+ ss->tx.req_bytes = NULL;
+ ss->tx.req_list = NULL;
}
static int myri10ge_request_irq(struct myri10ge_priv *mgp)
@@ -1840,13 +1930,11 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
static int myri10ge_open(struct net_device *dev)
{
- struct myri10ge_priv *mgp;
+ struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
struct net_lro_mgr *lro_mgr;
int status, big_pow2;
- mgp = netdev_priv(dev);
-
if (mgp->running != MYRI10GE_ETH_STOPPED)
return -EBUSY;
@@ -1883,16 +1971,16 @@ static int myri10ge_open(struct net_device *dev)
/* get the lanai pointers to the send and receive rings */
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
- mgp->tx.lanai =
+ mgp->ss.tx.lanai =
(struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
- mgp->rx_small.lanai =
+ mgp->ss.rx_small.lanai =
(struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
- mgp->rx_big.lanai =
+ mgp->ss.rx_big.lanai =
(struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
if (status != 0) {
@@ -1904,15 +1992,15 @@ static int myri10ge_open(struct net_device *dev)
}
if (myri10ge_wcfifo && mgp->wc_enabled) {
- mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
- mgp->rx_small.wc_fifo =
+ mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
+ mgp->ss.rx_small.wc_fifo =
(u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
- mgp->rx_big.wc_fifo =
+ mgp->ss.rx_big.wc_fifo =
(u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
} else {
- mgp->tx.wc_fifo = NULL;
- mgp->rx_small.wc_fifo = NULL;
- mgp->rx_big.wc_fifo = NULL;
+ mgp->ss.tx.wc_fifo = NULL;
+ mgp->ss.rx_small.wc_fifo = NULL;
+ mgp->ss.rx_big.wc_fifo = NULL;
}
/* Firmware needs the big buff size as a power of 2. Lie and
@@ -1929,7 +2017,7 @@ static int myri10ge_open(struct net_device *dev)
mgp->big_bytes = big_pow2;
}
- status = myri10ge_allocate_rings(dev);
+ status = myri10ge_allocate_rings(&mgp->ss);
if (status != 0)
goto abort_with_irq;
@@ -1948,12 +2036,12 @@ static int myri10ge_open(struct net_device *dev)
goto abort_with_rings;
}
- cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
- cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
+ cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus);
+ cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus);
cmd.data2 = sizeof(struct mcp_irq_data);
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
if (status == -ENOSYS) {
- dma_addr_t bus = mgp->fw_stats_bus;
+ dma_addr_t bus = mgp->ss.fw_stats_bus;
bus += offsetof(struct mcp_irq_data, send_done_count);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
@@ -1974,20 +2062,20 @@ static int myri10ge_open(struct net_device *dev)
mgp->link_state = ~0U;
mgp->rdma_tags_available = 15;
- lro_mgr = &mgp->rx_done.lro_mgr;
+ lro_mgr = &mgp->ss.rx_done.lro_mgr;
lro_mgr->dev = dev;
lro_mgr->features = LRO_F_NAPI;
lro_mgr->ip_summed = CHECKSUM_COMPLETE;
lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
- lro_mgr->lro_arr = mgp->rx_done.lro_desc;
+ lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc;
lro_mgr->get_frag_header = myri10ge_get_frag_header;
lro_mgr->max_aggr = myri10ge_lro_max_pkts;
lro_mgr->frag_align_pad = 2;
if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
lro_mgr->max_aggr = MAX_SKB_FRAGS;
- napi_enable(&mgp->napi); /* must happen prior to any irq */
+ napi_enable(&mgp->ss.napi); /* must happen prior to any irq */
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
if (status) {
@@ -1996,8 +2084,8 @@ static int myri10ge_open(struct net_device *dev)
goto abort_with_rings;
}
- mgp->wake_queue = 0;
- mgp->stop_queue = 0;
+ mgp->ss.tx.wake_queue = 0;
+ mgp->ss.tx.stop_queue = 0;
mgp->running = MYRI10GE_ETH_RUNNING;
mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
add_timer(&mgp->watchdog_timer);
@@ -2005,7 +2093,7 @@ static int myri10ge_open(struct net_device *dev)
return 0;
abort_with_rings:
- myri10ge_free_rings(dev);
+ myri10ge_free_rings(&mgp->ss);
abort_with_irq:
myri10ge_free_irq(mgp);
@@ -2017,21 +2105,19 @@ abort_with_nothing:
static int myri10ge_close(struct net_device *dev)
{
- struct myri10ge_priv *mgp;
+ struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
int status, old_down_cnt;
- mgp = netdev_priv(dev);
-
if (mgp->running != MYRI10GE_ETH_RUNNING)
return 0;
- if (mgp->tx.req_bytes == NULL)
+ if (mgp->ss.tx.req_bytes == NULL)
return 0;
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
- napi_disable(&mgp->napi);
+ napi_disable(&mgp->ss.napi);
netif_carrier_off(dev);
netif_stop_queue(dev);
old_down_cnt = mgp->down_cnt;
@@ -2047,7 +2133,7 @@ static int myri10ge_close(struct net_device *dev)
netif_tx_disable(dev);
myri10ge_free_irq(mgp);
- myri10ge_free_rings(dev);
+ myri10ge_free_rings(&mgp->ss);
mgp->running = MYRI10GE_ETH_STOPPED;
return 0;
@@ -2143,7 +2229,7 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
/*
* Transmit a packet. We need to split the packet so that a single
- * segment does not cross myri10ge->tx.boundary, so this makes segment
+ * segment does not cross myri10ge->tx_boundary, so this makes segment
* counting tricky. So rather than try to count segments up front, we
* just give up if there are too few segments to hold a reasonably
* fragmented packet currently available. If we run
@@ -2154,8 +2240,9 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
+ struct myri10ge_slice_state *ss;
struct mcp_kreq_ether_send *req;
- struct myri10ge_tx_buf *tx = &mgp->tx;
+ struct myri10ge_tx_buf *tx;
struct skb_frag_struct *frag;
dma_addr_t bus;
u32 low;
@@ -2166,6 +2253,9 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
int cum_len, seglen, boundary, rdma_count;
u8 flags, odd_flag;
+ /* always transmit through slot 0 */
+ ss = &mgp->ss;
+ tx = &ss->tx;
again:
req = tx->req_list;
avail = tx->mask - 1 - (tx->req - tx->done);
@@ -2180,7 +2270,7 @@ again:
if ((unlikely(avail < max_segments))) {
/* we are out of transmit resources */
- mgp->stop_queue++;
+ tx->stop_queue++;
netif_stop_queue(dev);
return 1;
}
@@ -2242,7 +2332,7 @@ again:
if (skb_padto(skb, ETH_ZLEN)) {
/* The packet is gone, so we must
* return 0 */
- mgp->stats.tx_dropped += 1;
+ ss->stats.tx_dropped += 1;
return 0;
}
/* adjust the len to account for the zero pad
@@ -2284,7 +2374,7 @@ again:
while (1) {
/* Break the SKB or Fragment up into pieces which
- * do not cross mgp->tx.boundary */
+ * do not cross mgp->tx_boundary */
low = MYRI10GE_LOWPART_TO_U32(bus);
high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
while (len) {
@@ -2294,7 +2384,8 @@ again:
if (unlikely(count == max_segments))
goto abort_linearize;
- boundary = (low + tx->boundary) & ~(tx->boundary - 1);
+ boundary =
+ (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
seglen = boundary - low;
if (seglen > len)
seglen = len;
@@ -2378,7 +2469,7 @@ again:
myri10ge_submit_req_wc(tx, tx->req_list, count);
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
- mgp->stop_queue++;
+ tx->stop_queue++;
netif_stop_queue(dev);
}
dev->trans_start = jiffies;
@@ -2420,12 +2511,12 @@ abort_linearize:
if (skb_linearize(skb))
goto drop;
- mgp->tx_linearized++;
+ tx->linearized++;
goto again;
drop:
dev_kfree_skb_any(skb);
- mgp->stats.tx_dropped += 1;
+ ss->stats.tx_dropped += 1;
return 0;
}
@@ -2433,7 +2524,7 @@ drop:
static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev)
{
struct sk_buff *segs, *curr;
- struct myri10ge_priv *mgp = dev->priv;
+ struct myri10ge_priv *mgp = netdev_priv(dev);
int status;
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
@@ -2473,14 +2564,13 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
static void myri10ge_set_multicast_list(struct net_device *dev)
{
+ struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
- struct myri10ge_priv *mgp;
struct dev_mc_list *mc_list;
__be32 data[2] = { 0, 0 };
int err;
DECLARE_MAC_BUF(mac);
- mgp = netdev_priv(dev);
/* can be called from atomic contexts,
* pass 1 to force atomicity in myri10ge_send_cmd() */
myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
@@ -2616,13 +2706,14 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
if (myri10ge_ecrc_enable > 1) {
- struct pci_dev *old_bridge = bridge;
+ struct pci_dev *prev_bridge, *old_bridge = bridge;
/* Walk the hierarchy up to the root port
* where ECRC has to be enabled */
do {
+ prev_bridge = bridge;
bridge = bridge->bus->self;
- if (!bridge) {
+ if (!bridge || prev_bridge == bridge) {
dev_err(dev,
"Failed to find root port"
" to force ECRC\n");
@@ -2681,9 +2772,9 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
* already been enabled, then it must use a firmware image which works
* around unaligned completion packets (myri10ge_ethp_z8e.dat), and it
* should also ensure that it never gives the device a Read-DMA which is
- * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is
+ * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
* enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat)
- * firmware image, and set tx.boundary to 4KB.
+ * firmware image, and set tx_boundary to 4KB.
*/
static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
@@ -2692,7 +2783,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
struct device *dev = &pdev->dev;
int status;
- mgp->tx.boundary = 4096;
+ mgp->tx_boundary = 4096;
/*
* Verify the max read request size was set to 4KB
* before trying the test with 4KB.
@@ -2704,7 +2795,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
}
if (status != 4096) {
dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
- mgp->tx.boundary = 2048;
+ mgp->tx_boundary = 2048;
}
/*
* load the optimized firmware (which assumes aligned PCIe
@@ -2737,7 +2828,7 @@ static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
"Please install up to date fw\n");
abort:
/* fall back to using the unaligned firmware */
- mgp->tx.boundary = 2048;
+ mgp->tx_boundary = 2048;
mgp->fw_name = myri10ge_fw_unaligned;
}
@@ -2758,7 +2849,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
if (link_width < 8) {
dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
link_width);
- mgp->tx.boundary = 4096;
+ mgp->tx_boundary = 4096;
mgp->fw_name = myri10ge_fw_aligned;
} else {
myri10ge_firmware_probe(mgp);
@@ -2767,12 +2858,12 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
if (myri10ge_force_firmware == 1) {
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (forced)\n");
- mgp->tx.boundary = 4096;
+ mgp->tx_boundary = 4096;
mgp->fw_name = myri10ge_fw_aligned;
} else {
dev_info(&mgp->pdev->dev,
"Assuming unaligned completions (forced)\n");
- mgp->tx.boundary = 2048;
+ mgp->tx_boundary = 2048;
mgp->fw_name = myri10ge_fw_unaligned;
}
}
@@ -2889,6 +2980,7 @@ static void myri10ge_watchdog(struct work_struct *work)
{
struct myri10ge_priv *mgp =
container_of(work, struct myri10ge_priv, watchdog_work);
+ struct myri10ge_tx_buf *tx;
u32 reboot;
int status;
u16 cmd, vendor;
@@ -2938,15 +3030,16 @@ static void myri10ge_watchdog(struct work_struct *work)
printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
mgp->dev->name);
+ tx = &mgp->ss.tx;
printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
- mgp->dev->name, mgp->tx.req, mgp->tx.done,
- mgp->tx.pkt_start, mgp->tx.pkt_done,
- (int)ntohl(mgp->fw_stats->send_done_count));
+ mgp->dev->name, tx->req, tx->done,
+ tx->pkt_start, tx->pkt_done,
+ (int)ntohl(mgp->ss.fw_stats->send_done_count));
msleep(2000);
printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
- mgp->dev->name, mgp->tx.req, mgp->tx.done,
- mgp->tx.pkt_start, mgp->tx.pkt_done,
- (int)ntohl(mgp->fw_stats->send_done_count));
+ mgp->dev->name, tx->req, tx->done,
+ tx->pkt_start, tx->pkt_done,
+ (int)ntohl(mgp->ss.fw_stats->send_done_count));
}
rtnl_lock();
myri10ge_close(mgp->dev);
@@ -2969,28 +3062,31 @@ static void myri10ge_watchdog(struct work_struct *work)
static void myri10ge_watchdog_timer(unsigned long arg)
{
struct myri10ge_priv *mgp;
+ struct myri10ge_slice_state *ss;
u32 rx_pause_cnt;
mgp = (struct myri10ge_priv *)arg;
- if (mgp->rx_small.watchdog_needed) {
- myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,
+ rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause);
+
+ ss = &mgp->ss;
+ if (ss->rx_small.watchdog_needed) {
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 1);
- if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >=
+ if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
myri10ge_fill_thresh)
- mgp->rx_small.watchdog_needed = 0;
+ ss->rx_small.watchdog_needed = 0;
}
- if (mgp->rx_big.watchdog_needed) {
- myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1);
- if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >=
+ if (ss->rx_big.watchdog_needed) {
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1);
+ if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
myri10ge_fill_thresh)
- mgp->rx_big.watchdog_needed = 0;
+ ss->rx_big.watchdog_needed = 0;
}
- rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause);
- if (mgp->tx.req != mgp->tx.done &&
- mgp->tx.done == mgp->watchdog_tx_done &&
- mgp->watchdog_tx_req != mgp->watchdog_tx_done) {
+ if (ss->tx.req != ss->tx.done &&
+ ss->tx.done == ss->watchdog_tx_done &&
+ ss->watchdog_tx_req != ss->watchdog_tx_done) {
/* nic seems like it might be stuck.. */
if (rx_pause_cnt != mgp->watchdog_pause) {
if (net_ratelimit())
@@ -3005,8 +3101,8 @@ static void myri10ge_watchdog_timer(unsigned long arg)
/* rearm timer */
mod_timer(&mgp->watchdog_timer,
jiffies + myri10ge_watchdog_timeout * HZ);
- mgp->watchdog_tx_done = mgp->tx.done;
- mgp->watchdog_tx_req = mgp->tx.req;
+ ss->watchdog_tx_done = ss->tx.done;
+ ss->watchdog_tx_req = ss->tx.req;
mgp->watchdog_pause = rx_pause_cnt;
}
@@ -3030,7 +3126,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mgp = netdev_priv(netdev);
mgp->dev = netdev;
- netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight);
+ netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight);
mgp->pdev = pdev;
mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
mgp->pause = myri10ge_flow_control;
@@ -3076,9 +3172,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (mgp->cmd == NULL)
goto abort_with_netdev;
- mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
- &mgp->fw_stats_bus, GFP_KERNEL);
- if (mgp->fw_stats == NULL)
+ mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
+ &mgp->ss.fw_stats_bus, GFP_KERNEL);
+ if (mgp->ss.fw_stats == NULL)
goto abort_with_cmd;
mgp->board_span = pci_resource_len(pdev, 0);
@@ -3118,12 +3214,12 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->dev_addr[i] = mgp->mac_addr[i];
/* allocate rx done ring */
- bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
- mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
- &mgp->rx_done.bus, GFP_KERNEL);
- if (mgp->rx_done.entry == NULL)
+ bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
+ mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+ &mgp->ss.rx_done.bus, GFP_KERNEL);
+ if (mgp->ss.rx_done.entry == NULL)
goto abort_with_ioremap;
- memset(mgp->rx_done.entry, 0, bytes);
+ memset(mgp->ss.rx_done.entry, 0, bytes);
myri10ge_select_firmware(mgp);
@@ -3183,7 +3279,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
(mgp->msi_enabled ? "MSI" : "xPIC"),
- netdev->irq, mgp->tx.boundary, mgp->fw_name,
+ netdev->irq, mgp->tx_boundary, mgp->fw_name,
(mgp->wc_enabled ? "Enabled" : "Disabled"));
return 0;
@@ -3195,9 +3291,9 @@ abort_with_firmware:
myri10ge_dummy_rdma(mgp, 0);
abort_with_rx_done:
- bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
dma_free_coherent(&pdev->dev, bytes,
- mgp->rx_done.entry, mgp->rx_done.bus);
+ mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
abort_with_ioremap:
iounmap(mgp->sram);
@@ -3207,8 +3303,8 @@ abort_with_wc:
if (mgp->mtrr >= 0)
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
#endif
- dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
- mgp->fw_stats, mgp->fw_stats_bus);
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
+ mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
abort_with_cmd:
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
@@ -3246,9 +3342,9 @@ static void myri10ge_remove(struct pci_dev *pdev)
/* avoid a memory leak */
pci_restore_state(pdev);
- bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+ bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry);
dma_free_coherent(&pdev->dev, bytes,
- mgp->rx_done.entry, mgp->rx_done.bus);
+ mgp->ss.rx_done.entry, mgp->ss.rx_done.bus);
iounmap(mgp->sram);
@@ -3256,8 +3352,8 @@ static void myri10ge_remove(struct pci_dev *pdev)
if (mgp->mtrr >= 0)
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
#endif
- dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
- mgp->fw_stats, mgp->fw_stats_bus);
+ dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats),
+ mgp->ss.fw_stats, mgp->ss.fw_stats_bus);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index 58e57178c56..fdbeeee0737 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -10,7 +10,7 @@ struct mcp_dma_addr {
__be32 low;
};
-/* 4 Bytes. 8 Bytes for NDIS drivers. */
+/* 4 Bytes */
struct mcp_slot {
__sum16 checksum;
__be16 length;
@@ -144,6 +144,7 @@ enum myri10ge_mcp_cmd_type {
* a power of 2 number of entries. */
MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */
+#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31)
/* command to bring ethernet interface up. Above parameters
* (plus mtu & mac address) must have been exchanged prior
@@ -221,10 +222,14 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_GET_MAX_RSS_QUEUES,
MXGEFW_CMD_ENABLE_RSS_QUEUES,
/* data0 = number of slices n (0, 1, ..., n-1) to enable
- * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue.
+ * data1 = interrupt mode.
+ * 0=share one INTx/MSI, 1=use one MSI-X per queue.
* If all queues share one interrupt, the driver must have set
* RSS_SHARED_INTERRUPT_DMA before enabling queues.
*/
+#define MXGEFW_SLICE_INTR_MODE_SHARED 0
+#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1
+
MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET,
MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA,
/* data0, data1 = bus address lsw, msw */
@@ -241,10 +246,14 @@ enum myri10ge_mcp_cmd_type {
* 0: disable rss. nic does not distribute receive packets.
* 1: enable rss. nic distributes receive packets among queues.
* data1 = hash type
- * 1: IPV4
- * 2: TCP_IPV4
- * 3: IPV4 | TCP_IPV4
+ * 1: IPV4 (required by RSS)
+ * 2: TCP_IPV4 (required by RSS)
+ * 3: IPV4 | TCP_IPV4 (required by RSS)
+ * 4: source port
*/
+#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1
+#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2
+#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4
MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
/* Return data = the max. size of the entire headers of a IPv6 TSO packet.
@@ -260,6 +269,8 @@ enum myri10ge_mcp_cmd_type {
* 0: Linux/FreeBSD style (NIC default)
* 1: NDIS/NetBSD style
*/
+#define MXGEFW_TSO_MODE_LINUX 0
+#define MXGEFW_TSO_MODE_NDIS 1
MXGEFW_CMD_MDIO_READ,
/* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */
@@ -286,6 +297,38 @@ enum myri10ge_mcp_cmd_type {
/* Return data = NIC memory offset of mcp_vpump_public_global */
MXGEFW_CMD_RESET_VPUMP,
/* Resets the VPUMP state */
+
+ MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE,
+ /* data0 = mcp_slot type to use.
+ * 0 = the default 4B mcp_slot
+ * 1 = 8B mcp_slot_8
+ */
+#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0
+#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1
+
+ MXGEFW_CMD_SET_THROTTLE_FACTOR,
+ /* set the throttle factor for ethp_z8e
+ * data0 = throttle_factor
+ * throttle_factor = 256 * pcie-raw-speed / tx_speed
+ * tx_speed = 256 * pcie-raw-speed / throttle_factor
+ *
+ * For PCI-E x8: pcie-raw-speed == 16Gb/s
+ * For PCI-E x4: pcie-raw-speed == 8Gb/s
+ *
+ * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s
+ * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s
+ *
+ * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s
+ * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s
+ */
+
+ MXGEFW_CMD_VPUMP_UP,
+ /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */
+ MXGEFW_CMD_GET_VPUMP_CLK,
+ /* Get the lanai clock */
+
+ MXGEFW_CMD_GET_DCA_OFFSET,
+ /* offset of dca control for WDMAs */
};
enum myri10ge_mcp_cmd_status {
@@ -302,7 +345,8 @@ enum myri10ge_mcp_cmd_status {
MXGEFW_CMD_ERROR_UNALIGNED,
MXGEFW_CMD_ERROR_NO_MDIO,
MXGEFW_CMD_ERROR_XFP_FAILURE,
- MXGEFW_CMD_ERROR_XFP_ABSENT
+ MXGEFW_CMD_ERROR_XFP_ABSENT,
+ MXGEFW_CMD_ERROR_BAD_PCIE_LINK
};
#define MXGEFW_OLD_IRQ_DATA_LEN 40
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 16a810dd6d5..07d65c2cbb2 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -1,30 +1,6 @@
#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
#define __MYRI10GE_MCP_GEN_HEADER_H__
-/* this file define a standard header used as a first entry point to
- * exchange information between firmware/driver and driver. The
- * header structure can be anywhere in the mcp. It will usually be in
- * the .data section, because some fields needs to be initialized at
- * compile time.
- * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must
- * contains the location of the header.
- *
- * Typically a MCP will start with the following:
- * .text
- * .space 52 ! to help catch MEMORY_INT errors
- * bt start ! jump to real code
- * nop
- * .long _gen_mcp_header
- *
- * The source will have a definition like:
- *
- * mcp_gen_header_t gen_mcp_header = {
- * .header_length = sizeof(mcp_gen_header_t),
- * .mcp_type = MCP_TYPE_XXX,
- * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $",
- * .mcp_globals = (unsigned)&Globals
- * };
- */
#define MCP_HEADER_PTR_OFFSET 0x3c
@@ -32,13 +8,14 @@
#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */
#define MCP_TYPE_ETH 0x45544820 /* "ETH " */
#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */
+#define MCP_TYPE_DFLT 0x20202020 /* " " */
struct mcp_gen_header {
/* the first 4 fields are filled at compile time */
unsigned header_length;
__be32 mcp_type;
char version[128];
- unsigned mcp_globals; /* pointer to mcp-type specific structure */
+ unsigned mcp_private; /* pointer to mcp-type specific structure */
/* filled by the MCP at run-time */
unsigned sram_size;
@@ -53,6 +30,18 @@ struct mcp_gen_header {
*
* Never remove any field. Keep everything naturally align.
*/
+
+ /* Specifies if the running mcp is mcp0, 1, or 2. */
+ unsigned char mcp_index;
+ unsigned char disable_rabbit;
+ unsigned char unaligned_tlp;
+ unsigned char pad1;
+ unsigned counters_addr;
+ unsigned copy_block_info; /* for small mcps loaded with "lload -d" */
+ unsigned short handoff_id_major; /* must be equal */
+ unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */
+ unsigned msix_table_addr; /* start address of msix table in firmware */
+ /* 8 */
};
#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 4009c4ce96b..918f802fe08 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -1,6 +1,6 @@
/* niu.c: Neptune ethernet driver.
*
- * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
@@ -33,8 +33,8 @@
#define DRV_MODULE_NAME "niu"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "0.8"
-#define DRV_MODULE_RELDATE "April 24, 2008"
+#define DRV_MODULE_VERSION "0.9"
+#define DRV_MODULE_RELDATE "May 4, 2008"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -865,7 +865,6 @@ static int link_status_1g_serdes(struct niu *np, int *link_up_p)
return 0;
}
-
static int link_status_10g_serdes(struct niu *np, int *link_up_p)
{
unsigned long flags;
@@ -900,7 +899,6 @@ static int link_status_10g_serdes(struct niu *np, int *link_up_p)
return 0;
}
-
static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
{
struct niu_link_config *lp = &np->link_config;
@@ -957,7 +955,6 @@ out:
return err;
}
-
static int bcm8704_reset(struct niu *np)
{
int err, limit;
@@ -1357,8 +1354,6 @@ static int mii_reset(struct niu *np)
return 0;
}
-
-
static int xcvr_init_1g_rgmii(struct niu *np)
{
int err;
@@ -1419,7 +1414,6 @@ static int xcvr_init_1g_rgmii(struct niu *np)
return 0;
}
-
static int mii_init_common(struct niu *np)
{
struct niu_link_config *lp = &np->link_config;
@@ -7008,31 +7002,20 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np,
return 0;
}
-/* niu board models have a trailing dash version incremented
- * with HW rev change. Need to ingnore the dash version while
- * checking for match
- *
- * for example, for the 10G card the current vpd.board_model
- * is 501-5283-04, of which -04 is the dash version and have
- * to be ignored
- */
-static int niu_board_model_match(struct niu *np, const char *model)
-{
- return !strncmp(np->vpd.board_model, model, strlen(model));
-}
-
static int niu_pci_vpd_get_nports(struct niu *np)
{
int ports = 0;
- if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) ||
- (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) ||
- (niu_board_model_match(np, NIU_ALONSO_BM_STR))) {
+ if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
ports = 4;
- } else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) ||
- (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) ||
- (niu_board_model_match(np, NIU_FOXXY_BM_STR)) ||
- (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) {
+ } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
+ (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
ports = 2;
}
@@ -7053,8 +7036,8 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
return;
}
- if (!strcmp(np->vpd.model, "SUNW,CP3220") ||
- !strcmp(np->vpd.model, "SUNW,CP3260")) {
+ if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+ !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
np->flags |= NIU_FLAGS_10G;
np->flags &= ~NIU_FLAGS_FIBER;
np->flags |= NIU_FLAGS_XCVR_SERDES;
@@ -7065,7 +7048,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
}
if (np->flags & NIU_FLAGS_10G)
np->mac_xcvr = MAC_XCVR_XPCS;
- } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) {
+ } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
NIU_FLAGS_HOTPLUG_PHY);
} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
@@ -7264,8 +7247,11 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
ESPC_NUM_PORTS_MACS_VAL;
+ /* All of the current probing methods fail on
+ * Maramba on-board parts.
+ */
if (!parent->num_ports)
- return -ENODEV;
+ parent->num_ports = 4;
}
}
}
@@ -7538,8 +7524,8 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
u32 val;
int err;
- if (!strcmp(np->vpd.model, "SUNW,CP3220") ||
- !strcmp(np->vpd.model, "SUNW,CP3260")) {
+ if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+ !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
num_10g = 0;
num_1g = 2;
parent->plat_type = PLAT_TYPE_ATCA_CP3220;
@@ -7548,7 +7534,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
phy_encode(PORT_TYPE_1G, 1) |
phy_encode(PORT_TYPE_1G, 2) |
phy_encode(PORT_TYPE_1G, 3));
- } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) {
+ } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
num_10g = 2;
num_1g = 0;
parent->num_ports = 2;
@@ -7943,6 +7929,7 @@ static int __devinit niu_get_of_props(struct niu *np)
struct device_node *dp;
const char *phy_type;
const u8 *mac_addr;
+ const char *model;
int prop_len;
if (np->parent->plat_type == PLAT_TYPE_NIU)
@@ -7997,6 +7984,11 @@ static int __devinit niu_get_of_props(struct niu *np)
memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+ model = of_get_property(dp, "model", &prop_len);
+
+ if (model)
+ strcpy(np->vpd.model, model);
+
return 0;
#else
return -EINVAL;
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 101a3f1a8ec..c6fa883daa2 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2946,6 +2946,15 @@ struct rx_ring_info {
#define NIU_ALONSO_BM_STR "373-0202"
#define NIU_FOXXY_BM_STR "501-7961"
#define NIU_2XGF_MRVL_BM_STR "SK-6E82"
+#define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc"
+#define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf"
+#define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem"
+#define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem"
+#define NIU_ALONSO_MDL_STR "SUNW,CP3220"
+#define NIU_KIMI_MDL_STR "SUNW,CP3260"
+#define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune"
+#define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem"
+#define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf"
#define NIU_VPD_MIN_MAJOR 3
#define NIU_VPD_MIN_MINOR 4
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index d0d5585114b..81fd85214b9 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,12 +22,8 @@
*************************************************************************/
#define DRV_NAME "pcnet32"
-#ifdef CONFIG_PCNET32_NAPI
-#define DRV_VERSION "1.34-NAPI"
-#else
-#define DRV_VERSION "1.34"
-#endif
-#define DRV_RELDATE "14.Aug.2007"
+#define DRV_VERSION "1.35"
+#define DRV_RELDATE "21.Apr.2008"
#define PFX DRV_NAME ": "
static const char *const version =
@@ -445,30 +441,24 @@ static struct pcnet32_access pcnet32_dwio = {
static void pcnet32_netif_stop(struct net_device *dev)
{
-#ifdef CONFIG_PCNET32_NAPI
struct pcnet32_private *lp = netdev_priv(dev);
-#endif
+
dev->trans_start = jiffies;
-#ifdef CONFIG_PCNET32_NAPI
napi_disable(&lp->napi);
-#endif
netif_tx_disable(dev);
}
static void pcnet32_netif_start(struct net_device *dev)
{
-#ifdef CONFIG_PCNET32_NAPI
struct pcnet32_private *lp = netdev_priv(dev);
ulong ioaddr = dev->base_addr;
u16 val;
-#endif
+
netif_wake_queue(dev);
-#ifdef CONFIG_PCNET32_NAPI
val = lp->a.read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a.write_csr(ioaddr, CSR3, val);
napi_enable(&lp->napi);
-#endif
}
/*
@@ -911,11 +901,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
rc = 1; /* default to fail */
if (netif_running(dev))
-#ifdef CONFIG_PCNET32_NAPI
pcnet32_netif_stop(dev);
-#else
- pcnet32_close(dev);
-#endif
spin_lock_irqsave(&lp->lock, flags);
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1046,7 +1032,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
a->write_bcr(ioaddr, 32, (x & ~0x0002));
-#ifdef CONFIG_PCNET32_NAPI
if (netif_running(dev)) {
pcnet32_netif_start(dev);
pcnet32_restart(dev, CSR0_NORMAL);
@@ -1055,16 +1040,6 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
-#else
- if (netif_running(dev)) {
- spin_unlock_irqrestore(&lp->lock, flags);
- pcnet32_open(dev);
- } else {
- pcnet32_purge_rx_ring(dev);
- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
- spin_unlock_irqrestore(&lp->lock, flags);
- }
-#endif
return (rc);
} /* end pcnet32_loopback_test */
@@ -1270,11 +1245,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
}
dev->stats.rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, dev);
-#ifdef CONFIG_PCNET32_NAPI
netif_receive_skb(skb);
-#else
- netif_rx(skb);
-#endif
dev->last_rx = jiffies;
dev->stats.rx_packets++;
return;
@@ -1403,7 +1374,6 @@ static int pcnet32_tx(struct net_device *dev)
return must_restart;
}
-#ifdef CONFIG_PCNET32_NAPI
static int pcnet32_poll(struct napi_struct *napi, int budget)
{
struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
@@ -1442,7 +1412,6 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
}
return work_done;
}
-#endif
#define PCNET32_REGS_PER_PHY 32
#define PCNET32_MAX_PHYS 32
@@ -1864,9 +1833,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* napi.weight is used in both the napi and non-napi cases */
lp->napi.weight = lp->rx_ring_size / 2;
-#ifdef CONFIG_PCNET32_NAPI
netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
-#endif
if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
@@ -2297,9 +2264,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
-#ifdef CONFIG_PCNET32_NAPI
napi_enable(&lp->napi);
-#endif
/* Re-initialize the PCNET32, and start it when done. */
lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2623,7 +2588,6 @@ pcnet32_interrupt(int irq, void *dev_id)
dev->name, csr0);
/* unlike for the lance, there is no restart needed */
}
-#ifdef CONFIG_PCNET32_NAPI
if (netif_rx_schedule_prep(dev, &lp->napi)) {
u16 val;
/* set interrupt masks */
@@ -2634,24 +2598,9 @@ pcnet32_interrupt(int irq, void *dev_id)
__netif_rx_schedule(dev, &lp->napi);
break;
}
-#else
- pcnet32_rx(dev, lp->napi.weight);
- if (pcnet32_tx(dev)) {
- /* reset the chip to clear the error condition, then restart */
- lp->a.reset(ioaddr);
- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
- pcnet32_restart(dev, CSR0_START);
- netif_wake_queue(dev);
- }
-#endif
csr0 = lp->a.read_csr(ioaddr, CSR0);
}
-#ifndef CONFIG_PCNET32_NAPI
- /* Set interrupt enable. */
- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
-#endif
-
if (netif_msg_intr(lp))
printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
dev->name, lp->a.read_csr(ioaddr, CSR0));
@@ -2670,9 +2619,7 @@ static int pcnet32_close(struct net_device *dev)
del_timer_sync(&lp->watchdog_timer);
netif_stop_queue(dev);
-#ifdef CONFIG_PCNET32_NAPI
napi_disable(&lp->napi);
-#endif
spin_lock_irqsave(&lp->lock, flags);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3c18bb59495..45cc2914d34 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -547,7 +547,7 @@ static void phy_force_reduction(struct phy_device *phydev)
* Must not be called from interrupt context, or while the
* phydev->lock is held.
*/
-void phy_error(struct phy_device *phydev)
+static void phy_error(struct phy_device *phydev)
{
mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index d3207c0da89..1f4ca2b54a7 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2458,6 +2458,7 @@ ppp_create_interface(int unit, int *retp)
out3:
atomic_dec(&ppp_unit_count);
+ unregister_netdev(dev);
out2:
mutex_unlock(&all_ppp_mutex);
free_netdev(dev);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 244d7830c92..79359919335 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1621,9 +1621,16 @@ out_no_ppp:
end:
release_sock(sk);
- if (error != 0)
- PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
- "%s: connect failed: %d\n", session->name, error);
+ if (error != 0) {
+ if (session)
+ PRINTK(session->debug,
+ PPPOL2TP_MSG_CONTROL, KERN_WARNING,
+ "%s: connect failed: %d\n",
+ session->name, error);
+ else
+ PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
+ "connect failed: %d\n", error);
+ }
return error;
}
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 0d32123085e..1dae1f2ed81 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2474,6 +2474,8 @@ static void gelic_wl_free(struct gelic_wl_info *wl)
pr_debug("%s: <-\n", __func__);
+ free_page((unsigned long)wl->buf);
+
pr_debug("%s: destroy queues\n", __func__);
destroy_workqueue(wl->eurus_cmd_queue);
destroy_workqueue(wl->event_queue);
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 0f023447eaf..1d2daeec7ac 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,5 +1,5 @@
sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
- i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \
- tenxpress.o boards.o sfe4001.o
+ i2c-direct.o selftest.o ethtool.o xfp_phy.o \
+ mdio_10g.o tenxpress.o boards.o sfe4001.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index f56341d428e..695764dc2e6 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -22,5 +22,7 @@ enum efx_board_type {
extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
extern int sfe4001_poweron(struct efx_nic *efx);
extern void sfe4001_poweroff(struct efx_nic *efx);
+/* Are we putting the PHY into flash config mode */
+extern unsigned int sfe4001_phy_flash_cfg;
#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 59edcf793c1..418f2e53a95 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1873,6 +1873,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
tx_queue->queue = i;
tx_queue->buffer = NULL;
tx_queue->channel = &efx->channel[0]; /* for safety */
+ tx_queue->tso_headers_free = NULL;
}
for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
rx_queue = &efx->rx_queue[i];
@@ -2071,7 +2072,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
net_dev = alloc_etherdev(sizeof(*efx));
if (!net_dev)
return -ENOMEM;
- net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
+ net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_TSO);
if (lro)
net_dev->features |= NETIF_F_LRO;
efx = net_dev->priv;
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index 43663a4619d..c53290d08e2 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -10,6 +10,55 @@
#ifndef EFX_ENUM_H
#define EFX_ENUM_H
+/**
+ * enum efx_loopback_mode - loopback modes
+ * @LOOPBACK_NONE: no loopback
+ * @LOOPBACK_XGMII: loopback within MAC at XGMII level
+ * @LOOPBACK_XGXS: loopback within MAC at XGXS level
+ * @LOOPBACK_XAUI: loopback within MAC at XAUI level
+ * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level
+ * @LOOPBACK_PCS: loopback within PHY at PCS level
+ * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level
+ * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
+ */
+/* Please keep in order and up-to-date w.r.t the following two #defines */
+enum efx_loopback_mode {
+ LOOPBACK_NONE = 0,
+ LOOPBACK_MAC = 1,
+ LOOPBACK_XGMII = 2,
+ LOOPBACK_XGXS = 3,
+ LOOPBACK_XAUI = 4,
+ LOOPBACK_PHY = 5,
+ LOOPBACK_PHYXS = 6,
+ LOOPBACK_PCS = 7,
+ LOOPBACK_PMAPMD = 8,
+ LOOPBACK_NETWORK = 9,
+ LOOPBACK_MAX
+};
+
+#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
+
+extern const char *efx_loopback_mode_names[];
+#define LOOPBACK_MODE_NAME(mode) \
+ STRING_TABLE_LOOKUP(mode, efx_loopback_mode)
+#define LOOPBACK_MODE(efx) \
+ LOOPBACK_MODE_NAME(efx->loopback_mode)
+
+/* These loopbacks occur within the controller */
+#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI))
+
+#define LOOPBACK_MASK(_efx) \
+ (1 << (_efx)->loopback_mode)
+
+#define LOOPBACK_INTERNAL(_efx) \
+ ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0)
+
+#define LOOPBACK_OUT_OF(_from, _to, _mask) \
+ (((LOOPBACK_MASK(_from) & (_mask)) && \
+ ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
+
/*****************************************************************************/
/**
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index ad541badbd9..e2c75d10161 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -12,12 +12,26 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
+#include "selftest.h"
#include "efx.h"
#include "ethtool.h"
#include "falcon.h"
#include "gmii.h"
#include "mac.h"
+const char *efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_MAC] = "MAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_PHY] = "PHY",
+ [LOOPBACK_PHYXS] = "PHY(XS)",
+ [LOOPBACK_PCS] = "PHY(PCS)",
+ [LOOPBACK_PMAPMD] = "PHY(PMAPMD)",
+ [LOOPBACK_NETWORK] = "NETWORK",
+};
+
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
struct ethtool_string {
@@ -217,23 +231,179 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "channel\%d")
+ * @unit_id: Unit id (e.g. 0 for "channel0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index,
+ struct ethtool_string *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ struct ethtool_string unit_str, test_str;
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ snprintf(unit_str.name, sizeof(unit_str.name),
+ unit_format, unit_id);
+ snprintf(test_str.name, sizeof(test_str.name),
+ test_format, test_id);
+ snprintf(strings[test_index].name,
+ sizeof(strings[test_index].name),
+ "%-9s%-17s", unit_str.name, test_str.name);
+ }
+}
+
+#define EFX_PORT_NAME "port%d", 0
+#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ struct ethtool_string *strings, u64 *data)
+{
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_tx_queue(tx_queue, efx) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ EFX_PORT_NAME,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ EFX_PORT_NAME,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ */
+static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ struct ethtool_string *strings,
+ u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0;
+ enum efx_loopback_mode mode;
+
+ /* Interrupt */
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_poll[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.poll", NULL);
+ }
+
+ /* PHY presence */
+ efx_fill_test(n++, strings, data, &tests->phy_ok,
+ EFX_PORT_NAME, "phy_ok", NULL);
+
+ /* Loopback tests */
+ efx_fill_test(n++, strings, data, &tests->loopback_speed,
+ EFX_PORT_NAME, "loopback.speed", NULL);
+ efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
+ EFX_PORT_NAME, "loopback.full_duplex", NULL);
+ for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
static int efx_ethtool_get_stats_count(struct net_device *net_dev)
{
return EFX_ETHTOOL_NUM_STATS;
}
+static int efx_ethtool_self_test_count(struct net_device *net_dev)
+{
+ struct efx_nic *efx = net_dev->priv;
+
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+}
+
static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
+ struct efx_nic *efx = net_dev->priv;
struct ethtool_string *ethtool_strings =
(struct ethtool_string *)strings;
int i;
- if (string_set == ETH_SS_STATS)
+ switch (string_set) {
+ case ETH_SS_STATS:
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
strncpy(ethtool_strings[i].name,
efx_ethtool_stats[i].name,
sizeof(ethtool_strings[i].name));
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL,
+ ethtool_strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
}
static void efx_ethtool_get_stats(struct net_device *net_dev,
@@ -272,6 +442,22 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
}
}
+static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
+{
+ int rc;
+
+ /* Our TSO requires TX checksumming, so force TX checksumming
+ * on when TSO is enabled.
+ */
+ if (enable) {
+ rc = efx_ethtool_set_tx_csum(net_dev, 1);
+ if (rc)
+ return rc;
+ }
+
+ return ethtool_op_set_tso(net_dev, enable);
+}
+
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx = net_dev->priv;
@@ -283,6 +469,15 @@ static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
efx_flush_queues(efx);
+ /* Our TSO requires TX checksumming, so disable TSO when
+ * checksumming is disabled
+ */
+ if (!enable) {
+ rc = efx_ethtool_set_tso(net_dev, 0);
+ if (rc)
+ return rc;
+ }
+
return 0;
}
@@ -305,6 +500,64 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
return efx->rx_checksum_enabled;
}
+static void efx_ethtool_self_test(struct net_device *net_dev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct efx_nic *efx = net_dev->priv;
+ struct efx_self_tests efx_tests;
+ int offline, already_up;
+ int rc;
+
+ ASSERT_RTNL();
+ if (efx->state != STATE_RUNNING) {
+ rc = -EIO;
+ goto fail1;
+ }
+
+ /* We need rx buffers and interrupts. */
+ already_up = (efx->net_dev->flags & IFF_UP);
+ if (!already_up) {
+ rc = dev_open(efx->net_dev);
+ if (rc) {
+ EFX_ERR(efx, "failed opening device.\n");
+ goto fail2;
+ }
+ }
+
+ memset(&efx_tests, 0, sizeof(efx_tests));
+ offline = (test->flags & ETH_TEST_FL_OFFLINE);
+
+ /* Perform online self tests first */
+ rc = efx_online_test(efx, &efx_tests);
+ if (rc)
+ goto out;
+
+ /* Perform offline tests only if online tests passed */
+ if (offline) {
+ /* Stop the kernel from sending packets during the test. */
+ efx_stop_queue(efx);
+ rc = efx_flush_queues(efx);
+ if (!rc)
+ rc = efx_offline_test(efx, &efx_tests,
+ efx->loopback_modes);
+ efx_wake_queue(efx);
+ }
+
+ out:
+ if (!already_up)
+ dev_close(efx->net_dev);
+
+ EFX_LOG(efx, "%s all %sline self-tests\n",
+ rc == 0 ? "passed" : "failed", offline ? "off" : "on");
+
+ fail2:
+ fail1:
+ /* Fill ethtool results structures */
+ efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+ if (rc)
+ test->flags |= ETH_TEST_FL_FAILED;
+}
+
/* Restart autonegotiation */
static int efx_ethtool_nway_reset(struct net_device *net_dev)
{
@@ -451,8 +704,12 @@ struct ethtool_ops efx_ethtool_ops = {
.set_tx_csum = efx_ethtool_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = efx_ethtool_set_tso,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
+ .self_test_count = efx_ethtool_self_test_count,
+ .self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.phys_id = efx_ethtool_phys_id,
.get_stats_count = efx_ethtool_get_stats_count,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 46db549ce58..b57cc68058c 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1129,6 +1129,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
case RX_RECOVERY_EV_DECODE:
EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY :
@@ -1731,7 +1732,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
efx_oword_t temp;
int count;
- if (FALCON_REV(efx) < FALCON_REV_B0)
+ if ((FALCON_REV(efx) < FALCON_REV_B0) ||
+ (efx->loopback_mode != LOOPBACK_NONE))
return;
falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
@@ -2091,6 +2093,8 @@ static int falcon_probe_phy(struct efx_nic *efx)
efx->phy_type);
return -1;
}
+
+ efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks;
return 0;
}
@@ -2468,14 +2472,12 @@ int falcon_probe_nic(struct efx_nic *efx)
fail5:
falcon_free_buffer(efx, &efx->irq_status);
fail4:
- /* fall-thru */
fail3:
if (nic_data->pci_dev2) {
pci_dev_put(nic_data->pci_dev2);
nic_data->pci_dev2 = NULL;
}
fail2:
- /* fall-thru */
fail1:
kfree(efx->nic_data);
return rc;
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 0485a63eaff..06e2d68fc3d 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -636,6 +636,14 @@
#define XX_HIDRVA_WIDTH 1
#define XX_LODRVA_LBN 8
#define XX_LODRVA_WIDTH 1
+#define XX_LPBKD_LBN 3
+#define XX_LPBKD_WIDTH 1
+#define XX_LPBKC_LBN 2
+#define XX_LPBKC_WIDTH 1
+#define XX_LPBKB_LBN 1
+#define XX_LPBKB_WIDTH 1
+#define XX_LPBKA_LBN 0
+#define XX_LPBKA_WIDTH 1
#define XX_TXDRV_CTL_REG_MAC 0x12
#define XX_DEQD_LBN 28
@@ -656,8 +664,14 @@
#define XX_DTXA_WIDTH 4
/* XAUI XGXS core status register */
-#define XX_FORCE_SIG_DECODE_FORCED 0xff
#define XX_CORE_STAT_REG_MAC 0x16
+#define XX_FORCE_SIG_LBN 24
+#define XX_FORCE_SIG_WIDTH 8
+#define XX_FORCE_SIG_DECODE_FORCED 0xff
+#define XX_XGXS_LB_EN_LBN 23
+#define XX_XGXS_LB_EN_WIDTH 1
+#define XX_XGMII_LB_EN_LBN 22
+#define XX_XGMII_LB_EN_WIDTH 1
#define XX_ALIGN_DONE_LBN 20
#define XX_ALIGN_DONE_WIDTH 1
#define XX_SYNC_STAT_LBN 16
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index aa7521b24a5..a74b7931a3c 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -32,7 +32,7 @@
(FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
void falcon_xmac_writel(struct efx_nic *efx,
- efx_dword_t *value, unsigned int mac_reg)
+ efx_dword_t *value, unsigned int mac_reg)
{
efx_oword_t temp;
@@ -69,6 +69,10 @@ static int falcon_reset_xmac(struct efx_nic *efx)
udelay(10);
}
+ /* This often fails when DSP is disabled, ignore it */
+ if (sfe4001_phy_flash_cfg != 0)
+ return 0;
+
EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
return -ETIMEDOUT;
}
@@ -223,7 +227,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
/* The ISR latches, so clear it and re-read */
falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
-
+
if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
@@ -237,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
{
efx_dword_t reg;
- if (FALCON_REV(efx) < FALCON_REV_B0)
+ if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
return;
/* Flush the ISR */
@@ -284,6 +288,9 @@ int falcon_xaui_link_ok(struct efx_nic *efx)
efx_dword_t reg;
int align_done, sync_status, link_ok = 0;
+ if (LOOPBACK_INTERNAL(efx))
+ return 1;
+
/* Read link status */
falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
@@ -374,6 +381,61 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
}
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+ int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0;
+ int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0;
+ int xgmii_loopback =
+ (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
+
+ /* XGXS block is flaky and will need to be reset if moving
+ * into our out of XGMII, XGXS or XAUI loopbacks. */
+ if (EFX_WORKAROUND_5147(efx)) {
+ int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+ int reset_xgxs;
+
+ falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+ old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN);
+ old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN);
+
+ falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
+ old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA);
+
+ /* The PHY driver may have turned XAUI off */
+ reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
+ (xaui_loopback != old_xaui_loopback) ||
+ (xgmii_loopback != old_xgmii_loopback));
+ if (reset_xgxs) {
+ falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
+ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
+ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
+ falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+ udelay(1);
+ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
+ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
+ falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+ udelay(1);
+ }
+ }
+
+ falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+ EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG,
+ (xgxs_loopback || xaui_loopback) ?
+ XX_FORCE_SIG_DECODE_FORCED : 0);
+ EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
+ falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
+
+ falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
+ EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
+ EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
+ EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
+ EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
+ falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC);
+}
+
+
/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
* to come back up. Bash it until it comes back up */
static int falcon_check_xaui_link_up(struct efx_nic *efx)
@@ -382,7 +444,8 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
max_tries = tries;
- if (efx->phy_type == PHY_TYPE_NONE)
+ if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
+ (efx->phy_type == PHY_TYPE_NONE))
return 0;
while (tries) {
@@ -408,8 +471,13 @@ void falcon_reconfigure_xmac(struct efx_nic *efx)
falcon_mask_status_intr(efx, 0);
falcon_deconfigure_mac_wrapper(efx);
+
+ efx->tx_disabled = LOOPBACK_INTERNAL(efx);
efx->phy_op->reconfigure(efx);
+
+ falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
+
falcon_reconfigure_mac_wrapper(efx);
/* Ensure XAUI link is up */
@@ -491,13 +559,15 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
(mac_stats->rx_bytes - mac_stats->rx_good_bytes);
}
-#define EFX_XAUI_RETRAIN_MAX 8
-
int falcon_check_xmac(struct efx_nic *efx)
{
unsigned xaui_link_ok;
int rc;
+ if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
+ (efx->phy_type == PHY_TYPE_NONE))
+ return 0;
+
falcon_mask_status_intr(efx, 0);
xaui_link_ok = falcon_xaui_link_ok(efx);
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index dc06bb0aa57..c4f540e93b7 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -44,6 +44,9 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
int status;
int phy_id = efx->mii.phy_id;
+ if (LOOPBACK_INTERNAL(efx))
+ return 0;
+
/* Read MMD STATUS2 to check it is responding. */
status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
@@ -164,6 +167,22 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
int mmd = 0;
int good;
+ /* If the port is in loopback, then we should only consider a subset
+ * of mmd's */
+ if (LOOPBACK_INTERNAL(efx))
+ return 1;
+ else if (efx->loopback_mode == LOOPBACK_NETWORK)
+ return 0;
+ else if (efx->loopback_mode == LOOPBACK_PHYXS)
+ mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
+ MDIO_MMDREG_DEVS0_PCS |
+ MDIO_MMDREG_DEVS0_PMAPMD);
+ else if (efx->loopback_mode == LOOPBACK_PCS)
+ mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS |
+ MDIO_MMDREG_DEVS0_PMAPMD);
+ else if (efx->loopback_mode == LOOPBACK_PMAPMD)
+ mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD;
+
while (mmd_mask) {
if (mmd_mask & 1) {
/* Double reads because link state is latched, and a
@@ -182,6 +201,65 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return ok;
}
+void mdio_clause45_transmit_disable(struct efx_nic *efx)
+{
+ int phy_id = efx->mii.phy_id;
+ int ctrl1, ctrl2;
+
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_TXDIS);
+ if (efx->tx_disabled)
+ ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
+ else
+ ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_TXDIS, ctrl2);
+}
+
+void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
+{
+ int phy_id = efx->mii.phy_id;
+ int ctrl1, ctrl2;
+
+ /* Handle (with debouncing) PMA/PMD loopback */
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_CTRL1);
+
+ if (efx->loopback_mode == LOOPBACK_PMAPMD)
+ ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
+ else
+ ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
+
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_CTRL1, ctrl2);
+
+ /* Handle (with debouncing) PCS loopback */
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
+ MDIO_MMDREG_CTRL1);
+ if (efx->loopback_mode == LOOPBACK_PCS)
+ ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+ else
+ ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS,
+ MDIO_MMDREG_CTRL1, ctrl2);
+
+ /* Handle (with debouncing) PHYXS network loopback */
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
+ MDIO_MMDREG_CTRL1);
+ if (efx->loopback_mode == LOOPBACK_NETWORK)
+ ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+ else
+ ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
+ MDIO_MMDREG_CTRL1, ctrl2);
+}
+
/**
* mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
* @efx: Efx NIC
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 2214b6d820a..cb99f3f4491 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -44,11 +44,16 @@
#define MDIO_MMDREG_DEVS1 (6)
#define MDIO_MMDREG_CTRL2 (7)
#define MDIO_MMDREG_STAT2 (8)
+#define MDIO_MMDREG_TXDIS (9)
/* Bits in MMDREG_CTRL1 */
/* Reset */
#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
+/* Loopback */
+/* Loopback bit for WIS, PCS, PHYSX and DTEXS */
+#define MDIO_MMDREG_CTRL1_LBACK_LBN (14)
+#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1)
/* Bits in MMDREG_STAT1 */
#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
@@ -56,6 +61,9 @@
/* Link state */
#define MDIO_MMDREG_STAT1_LINK_LBN (2)
#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
+/* Low power ability */
+#define MDIO_MMDREG_STAT1_LPABLE_LBN (1)
+#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1)
/* Bits in ID reg */
#define MDIO_ID_REV(_id32) (_id32 & 0xf)
@@ -76,6 +84,14 @@
#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
+/* Bits in MMDREG_TXDIS */
+#define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0)
+#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1)
+
+/* MMD-specific bits, ordered by MMD, then register */
+#define MDIO_PMAPMD_CTRL1_LBACK_LBN (0)
+#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1)
+
/* PMA type (4 bits) */
#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
@@ -95,7 +111,7 @@
#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
-/* /\* PHY XGXS lane state *\/ */
+/* PHY XGXS lane state */
#define MDIO_PHYXS_LANE_STATE (0x18)
#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
@@ -217,6 +233,12 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
extern int mdio_clause45_links_ok(struct efx_nic *efx,
unsigned int mmd_mask);
+/* Generic transmit disable support though PMAPMD */
+extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
+
+/* Generic part of reconfigure: set/clear loopback bits */
+extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx);
+
/* Read (some of) the PHY settings over MDIO */
extern void mdio_clause45_get_settings(struct efx_nic *efx,
struct ethtool_cmd *ecmd);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index c505482c252..59f261b4171 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -134,6 +134,8 @@ struct efx_special_buffer {
* Set only on the final fragment of a packet; %NULL for all other
* fragments. When this fragment completes, then we can free this
* skb.
+ * @tsoh: The associated TSO header structure, or %NULL if this
+ * buffer is not a TSO header.
* @dma_addr: DMA address of the fragment.
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
@@ -144,6 +146,7 @@ struct efx_special_buffer {
*/
struct efx_tx_buffer {
const struct sk_buff *skb;
+ struct efx_tso_header *tsoh;
dma_addr_t dma_addr;
unsigned short len;
unsigned char continuation;
@@ -187,6 +190,13 @@ struct efx_tx_buffer {
* variable indicates that the queue is full. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
+ * @tso_headers_free: A list of TSO headers allocated for this TX queue
+ * that are not in use, and so available for new TSO sends. The list
+ * is protected by the TX queue lock.
+ * @tso_bursts: Number of times TSO xmit invoked by kernel
+ * @tso_long_headers: Number of packets with headers too long for standard
+ * blocks
+ * @tso_packets: Number of packets via the TSO xmit path
*/
struct efx_tx_queue {
/* Members which don't change on the fast path */
@@ -206,6 +216,10 @@ struct efx_tx_queue {
unsigned int insert_count ____cacheline_aligned_in_smp;
unsigned int write_count;
unsigned int old_read_count;
+ struct efx_tso_header *tso_headers_free;
+ unsigned int tso_bursts;
+ unsigned int tso_long_headers;
+ unsigned int tso_packets;
};
/**
@@ -434,6 +448,9 @@ struct efx_board {
struct efx_blinker blinker;
};
+#define STRING_TABLE_LOOKUP(val, member) \
+ member ## _names[val]
+
enum efx_int_mode {
/* Be careful if altering to correct macro below */
EFX_INT_MODE_MSIX = 0,
@@ -506,6 +523,7 @@ enum efx_fc_type {
* @check_hw: Check hardware
* @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
* @mmds: MMD presence mask
+ * @loopbacks: Supported loopback modes mask
*/
struct efx_phy_operations {
int (*init) (struct efx_nic *efx);
@@ -515,6 +533,7 @@ struct efx_phy_operations {
int (*check_hw) (struct efx_nic *efx);
void (*reset_xaui) (struct efx_nic *efx);
int mmds;
+ unsigned loopbacks;
};
/*
@@ -653,7 +672,6 @@ union efx_multicast_hash {
* @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
* @mii: PHY interface
- * @phy_powered: PHY power state
* @tx_disabled: PHY transmitter turned off
* @link_up: Link status
* @link_options: Link options (MII/GMII format)
@@ -662,6 +680,9 @@ union efx_multicast_hash {
* @multicast_hash: Multicast hash table
* @flow_control: Flow control flags - separate RX/TX so can't use link_options
* @reconfigure_work: work item for dealing with PHY events
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+ * @loopback_selftest: Offline self-test private state
*
* The @priv field of the corresponding &struct net_device points to
* this.
@@ -721,6 +742,7 @@ struct efx_nic {
struct efx_phy_operations *phy_op;
void *phy_data;
struct mii_if_info mii;
+ unsigned tx_disabled;
int link_up;
unsigned int link_options;
@@ -732,6 +754,10 @@ struct efx_nic {
struct work_struct reconfigure_work;
atomic_t rx_reset;
+ enum efx_loopback_mode loopback_mode;
+ unsigned int loopback_modes;
+
+ void *loopback_selftest;
};
/**
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 551299b462a..670622373dd 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -19,6 +19,7 @@
#include "rx.h"
#include "efx.h"
#include "falcon.h"
+#include "selftest.h"
#include "workarounds.h"
/* Number of RX descriptors pushed at once. */
@@ -683,6 +684,15 @@ void __efx_rx_packet(struct efx_channel *channel,
struct sk_buff *skb;
int lro = efx->net_dev->features & NETIF_F_LRO;
+ /* If we're in loopback test, then pass the packet directly to the
+ * loopback layer, and free the rx_buf here
+ */
+ if (unlikely(efx->loopback_selftest)) {
+ efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
+ efx_free_rx_buffer(efx, rx_buf);
+ goto done;
+ }
+
if (rx_buf->skb) {
prefetch(skb_shinfo(rx_buf->skb));
@@ -736,7 +746,6 @@ void __efx_rx_packet(struct efx_channel *channel,
/* Update allocation strategy method */
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- /* fall-thru */
done:
efx->net_dev->last_rx = jiffies;
}
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
new file mode 100644
index 00000000000..cbda15946e8
--- /dev/null
+++ b/drivers/net/sfc/selftest.c
@@ -0,0 +1,717 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/rtnetlink.h>
+#include <asm/io.h>
+#include "net_driver.h"
+#include "ethtool.h"
+#include "efx.h"
+#include "falcon.h"
+#include "selftest.h"
+#include "boards.h"
+#include "workarounds.h"
+#include "mac.h"
+
+/*
+ * Loopback test packet structure
+ *
+ * The self-test should stress every RSS vector, and unfortunately
+ * Falcon only performs RSS on TCP/UDP packets.
+ */
+struct efx_loopback_payload {
+ struct ethhdr header;
+ struct iphdr ip;
+ struct udphdr udp;
+ __be16 iteration;
+ const char msg[64];
+} __attribute__ ((packed));
+
+/* Loopback test source MAC address */
+static const unsigned char payload_source[ETH_ALEN] = {
+ 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
+};
+
+static const char *payload_msg =
+ "Hello world! This is an Efx loopback test in progress!";
+
+/**
+ * efx_selftest_state - persistent state during a selftest
+ * @flush: Drop all packets in efx_loopback_rx_packet
+ * @packet_count: Number of packets being used in this test
+ * @skbs: An array of skbs transmitted
+ * @rx_good: RX good packet count
+ * @rx_bad: RX bad packet count
+ * @payload: Payload used in tests
+ */
+struct efx_selftest_state {
+ int flush;
+ int packet_count;
+ struct sk_buff **skbs;
+ atomic_t rx_good;
+ atomic_t rx_bad;
+ struct efx_loopback_payload payload;
+};
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************/
+
+/* Level of loopback testing
+ *
+ * The maximum packet burst length is 16**(n-1), i.e.
+ *
+ * - Level 0 : no packets
+ * - Level 1 : 1 packet
+ * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets)
+ * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets)
+ *
+ */
+static unsigned int loopback_test_level = 3;
+
+/**************************************************************************
+ *
+ * Interrupt and event queue testing
+ *
+ **************************************************************************/
+
+/* Test generation and receipt of interrupts */
+static int efx_test_interrupts(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ struct efx_channel *channel;
+
+ EFX_LOG(efx, "testing interrupts\n");
+ tests->interrupt = -1;
+
+ /* Reset interrupt flag */
+ efx->last_irq_cpu = -1;
+ smp_wmb();
+
+ /* ACK each interrupting event queue. Receiving an interrupt due to
+ * traffic before a test event is raised is considered a pass */
+ efx_for_each_channel_with_interrupt(channel, efx) {
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ if (efx->last_irq_cpu >= 0)
+ goto success;
+ }
+
+ falcon_generate_interrupt(efx);
+
+ /* Wait for arrival of test interrupt. */
+ EFX_LOG(efx, "waiting for test interrupt\n");
+ schedule_timeout_uninterruptible(HZ / 10);
+ if (efx->last_irq_cpu >= 0)
+ goto success;
+
+ EFX_ERR(efx, "timed out waiting for interrupt\n");
+ return -ETIMEDOUT;
+
+ success:
+ EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n",
+ efx->interrupt_mode, efx->last_irq_cpu);
+ tests->interrupt = 1;
+ return 0;
+}
+
+/* Test generation and receipt of non-interrupting events */
+static int efx_test_eventq(struct efx_channel *channel,
+ struct efx_self_tests *tests)
+{
+ unsigned int magic;
+
+ /* Channel specific code, limited to 20 bits */
+ magic = (0x00010150 + channel->channel);
+ EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
+ channel->channel, magic);
+
+ tests->eventq_dma[channel->channel] = -1;
+ tests->eventq_int[channel->channel] = 1; /* fake pass */
+ tests->eventq_poll[channel->channel] = 1; /* fake pass */
+
+ /* Reset flag and zero magic word */
+ channel->efx->last_irq_cpu = -1;
+ channel->eventq_magic = 0;
+ smp_wmb();
+
+ falcon_generate_test_event(channel, magic);
+ udelay(1);
+
+ efx_process_channel_now(channel);
+ if (channel->eventq_magic != magic) {
+ EFX_ERR(channel->efx, "channel %d failed to see test event\n",
+ channel->channel);
+ return -ETIMEDOUT;
+ } else {
+ tests->eventq_dma[channel->channel] = 1;
+ }
+
+ return 0;
+}
+
+/* Test generation and receipt of interrupting events */
+static int efx_test_eventq_irq(struct efx_channel *channel,
+ struct efx_self_tests *tests)
+{
+ unsigned int magic, count;
+
+ /* Channel specific code, limited to 20 bits */
+ magic = (0x00010150 + channel->channel);
+ EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
+ channel->channel, magic);
+
+ tests->eventq_dma[channel->channel] = -1;
+ tests->eventq_int[channel->channel] = -1;
+ tests->eventq_poll[channel->channel] = -1;
+
+ /* Reset flag and zero magic word */
+ channel->efx->last_irq_cpu = -1;
+ channel->eventq_magic = 0;
+ smp_wmb();
+
+ falcon_generate_test_event(channel, magic);
+
+ /* Wait for arrival of interrupt */
+ count = 0;
+ do {
+ schedule_timeout_uninterruptible(HZ / 100);
+
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+
+ if (channel->eventq_magic == magic)
+ goto eventq_ok;
+ } while (++count < 2);
+
+ EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n",
+ channel->channel);
+
+ /* See if interrupt arrived */
+ if (channel->efx->last_irq_cpu >= 0) {
+ EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
+ "during event queue test\n", channel->channel,
+ raw_smp_processor_id());
+ tests->eventq_int[channel->channel] = 1;
+ }
+
+ /* Check to see if event was received even if interrupt wasn't */
+ efx_process_channel_now(channel);
+ if (channel->eventq_magic == magic) {
+ EFX_ERR(channel->efx, "channel %d event was generated, but "
+ "failed to trigger an interrupt\n", channel->channel);
+ tests->eventq_dma[channel->channel] = 1;
+ }
+
+ return -ETIMEDOUT;
+ eventq_ok:
+ EFX_LOG(channel->efx, "channel %d event queue passed\n",
+ channel->channel);
+ tests->eventq_dma[channel->channel] = 1;
+ tests->eventq_int[channel->channel] = 1;
+ tests->eventq_poll[channel->channel] = 1;
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * PHY testing
+ *
+ **************************************************************************/
+
+/* Check PHY presence by reading the PHY ID registers */
+static int efx_test_phy(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ u16 physid1, physid2;
+ struct mii_if_info *mii = &efx->mii;
+ struct net_device *net_dev = efx->net_dev;
+
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return 0;
+
+ EFX_LOG(efx, "testing PHY presence\n");
+ tests->phy_ok = -1;
+
+ physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
+ physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
+
+ if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
+ (physid2 != 0x0000) && (physid2 != 0xffff)) {
+ EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
+ mii->phy_id, physid1, physid2);
+ tests->phy_ok = 1;
+ return 0;
+ }
+
+ EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
+ return -ENODEV;
+}
+
+/**************************************************************************
+ *
+ * Loopback testing
+ * NB Only one loopback test can be executing concurrently.
+ *
+ **************************************************************************/
+
+/* Loopback test RX callback
+ * This is called for each received packet during loopback testing.
+ */
+void efx_loopback_rx_packet(struct efx_nic *efx,
+ const char *buf_ptr, int pkt_len)
+{
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *received;
+ struct efx_loopback_payload *payload;
+
+ BUG_ON(!buf_ptr);
+
+ /* If we are just flushing, then drop the packet */
+ if ((state == NULL) || state->flush)
+ return;
+
+ payload = &state->payload;
+
+ received = (struct efx_loopback_payload *)(char *) buf_ptr;
+ received->ip.saddr = payload->ip.saddr;
+ received->ip.check = payload->ip.check;
+
+ /* Check that header exists */
+ if (pkt_len < sizeof(received->header)) {
+ EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
+ "test\n", pkt_len, LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that the ethernet header exists */
+ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
+ EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check packet length */
+ if (pkt_len != sizeof(*payload)) {
+ EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
+ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that IP header matches */
+ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
+ EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that msg and padding matches */
+ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
+ EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that iteration matches */
+ if (received->iteration != payload->iteration) {
+ EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
+ "%s loopback test\n", ntohs(received->iteration),
+ ntohs(payload->iteration), LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Increase correct RX count */
+ EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+
+ atomic_inc(&state->rx_good);
+ return;
+
+ err:
+#ifdef EFX_ENABLE_DEBUG
+ if (atomic_read(&state->rx_bad) == 0) {
+ EFX_ERR(efx, "received packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ buf_ptr, pkt_len, 0);
+ EFX_ERR(efx, "expected packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ &state->payload, sizeof(state->payload), 0);
+ }
+#endif
+ atomic_inc(&state->rx_bad);
+}
+
+/* Initialise an efx_selftest_state for a new iteration */
+static void efx_iterate_state(struct efx_nic *efx)
+{
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_loopback_payload *payload = &state->payload;
+
+ /* Initialise the layerII header */
+ memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
+ memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
+ payload->header.h_proto = htons(ETH_P_IP);
+
+ /* saddr set later and used as incrementing count */
+ payload->ip.daddr = htonl(INADDR_LOOPBACK);
+ payload->ip.ihl = 5;
+ payload->ip.check = htons(0xdead);
+ payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
+ payload->ip.version = IPVERSION;
+ payload->ip.protocol = IPPROTO_UDP;
+
+ /* Initialise udp header */
+ payload->udp.source = 0;
+ payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
+ sizeof(struct iphdr));
+ payload->udp.check = 0; /* checksum ignored */
+
+ /* Fill out payload */
+ payload->iteration = htons(ntohs(payload->iteration) + 1);
+ memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+
+ /* Fill out remaining state members */
+ atomic_set(&state->rx_good, 0);
+ atomic_set(&state->rx_bad, 0);
+ smp_wmb();
+}
+
+static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *payload;
+ struct sk_buff *skb;
+ int i, rc;
+
+ /* Transmit N copies of buffer */
+ for (i = 0; i < state->packet_count; i++) {
+ /* Allocate an skb, holding an extra reference for
+ * transmit completion counting */
+ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ state->skbs[i] = skb;
+ skb_get(skb);
+
+ /* Copy the payload in, incrementing the source address to
+ * exercise the rss vectors */
+ payload = ((struct efx_loopback_payload *)
+ skb_put(skb, sizeof(state->payload)));
+ memcpy(payload, &state->payload, sizeof(state->payload));
+ payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
+
+ /* Ensure everything we've written is visible to the
+ * interrupt handler. */
+ smp_wmb();
+
+ if (NET_DEV_REGISTERED(efx))
+ netif_tx_lock_bh(efx->net_dev);
+ rc = efx_xmit(efx, tx_queue, skb);
+ if (NET_DEV_REGISTERED(efx))
+ netif_tx_unlock_bh(efx->net_dev);
+
+ if (rc != NETDEV_TX_OK) {
+ EFX_ERR(efx, "TX queue %d could not transmit packet %d "
+ "of %d in %s loopback test\n", tx_queue->queue,
+ i + 1, state->packet_count, LOOPBACK_MODE(efx));
+
+ /* Defer cleaning up the other skbs for the caller */
+ kfree_skb(skb);
+ return -EPIPE;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct sk_buff *skb;
+ int tx_done = 0, rx_good, rx_bad;
+ int i, rc = 0;
+
+ if (NET_DEV_REGISTERED(efx))
+ netif_tx_lock_bh(efx->net_dev);
+
+ /* Count the number of tx completions, and decrement the refcnt. Any
+ * skbs not already completed will be free'd when the queue is flushed */
+ for (i=0; i < state->packet_count; i++) {
+ skb = state->skbs[i];
+ if (skb && !skb_shared(skb))
+ ++tx_done;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (NET_DEV_REGISTERED(efx))
+ netif_tx_unlock_bh(efx->net_dev);
+
+ /* Check TX completion and received packet counts */
+ rx_good = atomic_read(&state->rx_good);
+ rx_bad = atomic_read(&state->rx_bad);
+ if (tx_done != state->packet_count) {
+ /* Don't free the skbs; they will be picked up on TX
+ * overflow or channel teardown.
+ */
+ EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
+ "TX completion events in %s loopback test\n",
+ tx_queue->queue, tx_done, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Allow to fall through so we see the RX errors as well */
+ }
+
+ /* We may always be up to a flush away from our desired packet total */
+ if (rx_good != state->packet_count) {
+ EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
+ "received packets in %s loopback test\n",
+ tx_queue->queue, rx_good, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Fall through */
+ }
+
+ /* Update loopback test structure */
+ lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
+ lb_tests->tx_done[tx_queue->queue] += tx_done;
+ lb_tests->rx_good += rx_good;
+ lb_tests->rx_bad += rx_bad;
+
+ return rc;
+}
+
+static int
+efx_test_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_channel *channel;
+ int i, rc = 0;
+
+ for (i = 0; i < loopback_test_level; i++) {
+ /* Determine how many packets to send */
+ state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
+ state->packet_count = min(1 << (i << 2), state->packet_count);
+ state->skbs = kzalloc(sizeof(state->skbs[0]) *
+ state->packet_count, GFP_KERNEL);
+ state->flush = 0;
+
+ EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
+ "packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ efx_iterate_state(efx);
+ rc = efx_tx_loopback(tx_queue);
+
+ /* NAPI polling is not enabled, so process channels synchronously */
+ schedule_timeout_uninterruptible(HZ / 50);
+ efx_for_each_channel_with_interrupt(channel, efx) {
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ }
+
+ rc |= efx_rx_loopback(tx_queue, lb_tests);
+ kfree(state->skbs);
+
+ if (rc) {
+ /* Wait a while to ensure there are no packets
+ * floating around after a failure. */
+ schedule_timeout_uninterruptible(HZ / 10);
+ return rc;
+ }
+ }
+
+ EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length "
+ "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ return rc;
+}
+
+static int efx_test_loopbacks(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ unsigned int loopback_modes)
+{
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct ethtool_cmd ecmd, ecmd_loopback;
+ struct efx_tx_queue *tx_queue;
+ enum efx_loopback_mode old_mode, mode;
+ int count, rc = 0, link_up;
+
+ rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
+ if (rc) {
+ EFX_ERR(efx, "could not get GMII settings\n");
+ return rc;
+ }
+ old_mode = efx->loopback_mode;
+
+ /* Disable autonegotiation for the purposes of loopback */
+ memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
+ if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
+ ecmd_loopback.autoneg = AUTONEG_DISABLE;
+ ecmd_loopback.duplex = DUPLEX_FULL;
+ ecmd_loopback.speed = SPEED_10000;
+ }
+
+ rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback);
+ if (rc) {
+ EFX_ERR(efx, "could not disable autonegotiation\n");
+ goto out;
+ }
+ tests->loopback_speed = ecmd_loopback.speed;
+ tests->loopback_full_duplex = ecmd_loopback.duplex;
+
+ /* Test all supported loopback modes */
+ for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+ if (!(loopback_modes & (1 << mode)))
+ continue;
+
+ /* Move the port into the specified loopback mode. */
+ state->flush = 1;
+ efx->loopback_mode = mode;
+ efx_reconfigure_port(efx);
+
+ /* Wait for the PHY to signal the link is up */
+ count = 0;
+ do {
+ struct efx_channel *channel = &efx->channel[0];
+
+ falcon_check_xmac(efx);
+ schedule_timeout_uninterruptible(HZ / 10);
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ /* Wait for PHY events to be processed */
+ flush_workqueue(efx->workqueue);
+ rmb();
+
+ /* efx->link_up can be 1 even if the XAUI link is down,
+ * (bug5762). Usually, it's not worth bothering with the
+ * difference, but for selftests, we need that extra
+ * guarantee that the link is really, really, up.
+ */
+ link_up = efx->link_up;
+ if (!falcon_xaui_link_ok(efx))
+ link_up = 0;
+
+ } while ((++count < 20) && !link_up);
+
+ /* The link should now be up. If it isn't, there is no point
+ * in attempting a loopback test */
+ if (!link_up) {
+ EFX_ERR(efx, "loopback %s never came up\n",
+ LOOPBACK_MODE(efx));
+ rc = -EIO;
+ goto out;
+ }
+
+ EFX_LOG(efx, "link came up in %s loopback in %d iterations\n",
+ LOOPBACK_MODE(efx), count);
+
+ /* Test every TX queue */
+ efx_for_each_tx_queue(tx_queue, efx) {
+ rc |= efx_test_loopback(tx_queue,
+ &tests->loopback[mode]);
+ if (rc)
+ goto out;
+ }
+ }
+
+ out:
+ /* Take out of loopback and restore PHY settings */
+ state->flush = 1;
+ efx->loopback_mode = old_mode;
+ efx_ethtool_set_settings(efx->net_dev, &ecmd);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Entry points
+ *
+ *************************************************************************/
+
+/* Online (i.e. non-disruptive) testing
+ * This checks interrupt generation, event delivery and PHY presence. */
+int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ struct efx_channel *channel;
+ int rc = 0;
+
+ EFX_LOG(efx, "performing online self-tests\n");
+
+ rc |= efx_test_interrupts(efx, tests);
+ efx_for_each_channel(channel, efx) {
+ if (channel->has_interrupt)
+ rc |= efx_test_eventq_irq(channel, tests);
+ else
+ rc |= efx_test_eventq(channel, tests);
+ }
+ rc |= efx_test_phy(efx, tests);
+
+ if (rc)
+ EFX_ERR(efx, "failed online self-tests\n");
+
+ return rc;
+}
+
+/* Offline (i.e. disruptive) testing
+ * This checks MAC and PHY loopback on the specified port. */
+int efx_offline_test(struct efx_nic *efx,
+ struct efx_self_tests *tests, unsigned int loopback_modes)
+{
+ struct efx_selftest_state *state;
+ int rc = 0;
+
+ EFX_LOG(efx, "performing offline self-tests\n");
+
+ /* Create a selftest_state structure to hold state for the test */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Set the port loopback_selftest member. From this point on
+ * all received packets will be dropped. Mark the state as
+ * "flushing" so all inflight packets are dropped */
+ BUG_ON(efx->loopback_selftest);
+ state->flush = 1;
+ efx->loopback_selftest = (void *)state;
+
+ rc = efx_test_loopbacks(efx, tests, loopback_modes);
+
+ efx->loopback_selftest = NULL;
+ wmb();
+ kfree(state);
+
+ out:
+ if (rc)
+ EFX_ERR(efx, "failed offline self-tests\n");
+
+ return rc;
+}
+
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
new file mode 100644
index 00000000000..f6999c2b622
--- /dev/null
+++ b/drivers/net/sfc/selftest.h
@@ -0,0 +1,50 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SELFTEST_H
+#define EFX_SELFTEST_H
+
+#include "net_driver.h"
+
+/*
+ * Self tests
+ */
+
+struct efx_loopback_self_tests {
+ int tx_sent[EFX_MAX_TX_QUEUES];
+ int tx_done[EFX_MAX_TX_QUEUES];
+ int rx_good;
+ int rx_bad;
+};
+
+/* Efx self test results
+ * For fields which are not counters, 1 indicates success and -1
+ * indicates failure.
+ */
+struct efx_self_tests {
+ int interrupt;
+ int eventq_dma[EFX_MAX_CHANNELS];
+ int eventq_int[EFX_MAX_CHANNELS];
+ int eventq_poll[EFX_MAX_CHANNELS];
+ int phy_ok;
+ int loopback_speed;
+ int loopback_full_duplex;
+ struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX];
+};
+
+extern void efx_loopback_rx_packet(struct efx_nic *efx,
+ const char *buf_ptr, int pkt_len);
+extern int efx_online_test(struct efx_nic *efx,
+ struct efx_self_tests *tests);
+extern int efx_offline_test(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ unsigned int loopback_modes);
+
+#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 11fa9fb8f48..725d1a539c4 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -130,6 +130,15 @@ void sfe4001_poweroff(struct efx_nic *efx)
(void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
}
+/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
+ * to the FLASH_CFG_1 input on the DSP. We must keep it high at power-
+ * up to allow writing the flash (done through MDIO from userland).
+ */
+unsigned int sfe4001_phy_flash_cfg;
+module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444);
+MODULE_PARM_DESC(phy_flash_cfg,
+ "Force PHY to enter flash configuration mode");
+
/* This board uses an I2C expander to provider power to the PHY, which needs to
* be turned on before the PHY can be used.
* Context: Process context, rtnl lock held
@@ -203,6 +212,8 @@ int sfe4001_poweron(struct efx_nic *efx)
out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
(1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
(1 << P0_X_TRST_LBN));
+ if (sfe4001_phy_flash_cfg)
+ out |= 1 << P0_EN_3V3X_LBN;
rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
if (rc)
@@ -226,6 +237,9 @@ int sfe4001_poweron(struct efx_nic *efx)
if (in & (1 << P1_AFE_PWD_LBN))
goto done;
+ /* DSP doesn't look powered in flash config mode */
+ if (sfe4001_phy_flash_cfg)
+ goto done;
} while (++count < 20);
EFX_INFO(efx, "timed out waiting for power\n");
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index a2e9f79e47b..b1cd6deec01 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -24,6 +24,11 @@
MDIO_MMDREG_DEVS0_PCS | \
MDIO_MMDREG_DEVS0_PHYXS)
+#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
+ (1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_NETWORK))
+
/* We complain if we fail to see the link partner as 10G capable this many
* times in a row (must be > 1 as sampling the autoneg. registers is racy)
*/
@@ -72,6 +77,10 @@
#define PMA_PMD_BIST_RXD_LBN (1)
#define PMA_PMD_BIST_AFE_LBN (0)
+/* Special Software reset register */
+#define PMA_PMD_EXT_CTRL_REG 49152
+#define PMA_PMD_EXT_SSR_LBN 15
+
#define BIST_MAX_DELAY (1000)
#define BIST_POLL_DELAY (10)
@@ -86,6 +95,11 @@
#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */
#define CLK312_EN_LBN 3
+/* PHYXS registers */
+#define PHYXS_TEST1 (49162)
+#define LOOPBACK_NEAR_LBN (8)
+#define LOOPBACK_NEAR_WIDTH (1)
+
/* Boot status register */
#define PCS_BOOT_STATUS_REG (0xd000)
#define PCS_BOOT_FATAL_ERR_LBN (0)
@@ -106,7 +120,9 @@ MODULE_PARM_DESC(crc_error_reset_threshold,
struct tenxpress_phy_data {
enum tenxpress_state state;
+ enum efx_loopback_mode loopback_mode;
atomic_t bad_crc_count;
+ int tx_disabled;
int bad_lp_tries;
};
@@ -199,10 +215,12 @@ static int tenxpress_phy_init(struct efx_nic *efx)
tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
- rc = mdio_clause45_wait_reset_mmds(efx,
- TENXPRESS_REQUIRED_DEVS);
- if (rc < 0)
- goto fail;
+ if (!sfe4001_phy_flash_cfg) {
+ rc = mdio_clause45_wait_reset_mmds(efx,
+ TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ goto fail;
+ }
rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
@@ -225,6 +243,35 @@ static int tenxpress_phy_init(struct efx_nic *efx)
return rc;
}
+static int tenxpress_special_reset(struct efx_nic *efx)
+{
+ int rc, reg;
+
+ EFX_TRACE(efx, "%s\n", __func__);
+
+ /* Initiate reset */
+ reg = mdio_clause45_read(efx, efx->mii.phy_id,
+ MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG);
+ reg |= (1 << PMA_PMD_EXT_SSR_LBN);
+ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+ PMA_PMD_EXT_CTRL_REG, reg);
+
+ msleep(200);
+
+ /* Wait for the blocks to come out of reset */
+ rc = mdio_clause45_wait_reset_mmds(efx,
+ TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ return rc;
+
+ /* Try and reconfigure the device */
+ rc = tenxpress_init(efx);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
{
struct tenxpress_phy_data *pd = efx->phy_data;
@@ -299,11 +346,46 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
return ok;
}
+static void tenxpress_phyxs_loopback(struct efx_nic *efx)
+{
+ int phy_id = efx->mii.phy_id;
+ int ctrl1, ctrl2;
+
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
+ PHYXS_TEST1);
+ if (efx->loopback_mode == LOOPBACK_PHYXS)
+ ctrl2 |= (1 << LOOPBACK_NEAR_LBN);
+ else
+ ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN);
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
+ PHYXS_TEST1, ctrl2);
+}
+
static void tenxpress_phy_reconfigure(struct efx_nic *efx)
{
+ struct tenxpress_phy_data *phy_data = efx->phy_data;
+ int loop_change = LOOPBACK_OUT_OF(phy_data, efx,
+ TENXPRESS_LOOPBACKS);
+
if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
return;
+ /* When coming out of transmit disable, coming out of low power
+ * mode, or moving out of any PHY internal loopback mode,
+ * perform a special software reset */
+ if ((phy_data->tx_disabled && !efx->tx_disabled) ||
+ loop_change) {
+ (void) tenxpress_special_reset(efx);
+ falcon_reset_xaui(efx);
+ }
+
+ mdio_clause45_transmit_disable(efx);
+ mdio_clause45_phy_reconfigure(efx);
+ tenxpress_phyxs_loopback(efx);
+
+ phy_data->tx_disabled = efx->tx_disabled;
+ phy_data->loopback_mode = efx->loopback_mode;
efx->link_up = tenxpress_link_ok(efx, 0);
efx->link_options = GM_LPA_10000FULL;
}
@@ -431,4 +513,5 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = {
.clear_interrupt = tenxpress_phy_clear_interrupt,
.reset_xaui = tenxpress_reset_xaui,
.mmds = TENXPRESS_REQUIRED_DEVS,
+ .loopbacks = TENXPRESS_LOOPBACKS,
};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index fbb866b2185..9b436f5b488 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -82,6 +82,46 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
}
+/**
+ * struct efx_tso_header - a DMA mapped buffer for packet headers
+ * @next: Linked list of free ones.
+ * The list is protected by the TX queue lock.
+ * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
+ * @dma_addr: The DMA address of the header below.
+ *
+ * This controls the memory used for a TSO header. Use TSOH_DATA()
+ * to find the packet header data. Use TSOH_SIZE() to calculate the
+ * total size required for a given packet header length. TSO headers
+ * in the free list are exactly %TSOH_STD_SIZE bytes in size.
+ */
+struct efx_tso_header {
+ union {
+ struct efx_tso_header *next;
+ size_t unmap_len;
+ };
+ dma_addr_t dma_addr;
+};
+
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb);
+static void efx_fini_tso(struct efx_tx_queue *tx_queue);
+static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh);
+
+static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
+{
+ if (buffer->tsoh) {
+ if (likely(!buffer->tsoh->unmap_len)) {
+ buffer->tsoh->next = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = buffer->tsoh;
+ } else {
+ efx_tsoh_heap_free(tx_queue, buffer->tsoh);
+ }
+ buffer->tsoh = NULL;
+ }
+}
+
/*
* Add a socket buffer to a TX queue
@@ -114,6 +154,9 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+ if (skb_shinfo((struct sk_buff *)skb)->gso_size)
+ return efx_enqueue_skb_tso(tx_queue, skb);
+
/* Get size of the initial fragment */
len = skb_headlen(skb);
@@ -166,6 +209,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
insert_ptr = (tx_queue->insert_count &
efx->type->txd_ring_mask);
buffer = &tx_queue->buffer[insert_ptr];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
EFX_BUG_ON_PARANOID(buffer->skb);
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->continuation != 1);
@@ -432,6 +477,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
efx_release_tx_buffers(tx_queue);
+ /* Free up TSO header cache */
+ efx_fini_tso(tx_queue);
+
/* Release queue's stop on port, if any */
if (tx_queue->stopped) {
tx_queue->stopped = 0;
@@ -450,3 +498,619 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
}
+/* Efx TCP segmentation acceleration.
+ *
+ * Why? Because by doing it here in the driver we can go significantly
+ * faster than the GSO.
+ *
+ * Requires TX checksum offload support.
+ */
+
+/* Number of bytes inserted at the start of a TSO header buffer,
+ * similar to NET_IP_ALIGN.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+#define TSOH_OFFSET 0
+#else
+#define TSOH_OFFSET NET_IP_ALIGN
+#endif
+
+#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
+
+/* Total size of struct efx_tso_header, buffer and padding */
+#define TSOH_SIZE(hdr_len) \
+ (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
+
+/* Size of blocks on free list. Larger blocks must be allocated from
+ * the heap.
+ */
+#define TSOH_STD_SIZE 128
+
+#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
+#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
+#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
+#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
+
+/**
+ * struct tso_state - TSO state for an SKB
+ * @remaining_len: Bytes of data we've yet to segment
+ * @seqnum: Current sequence number
+ * @packet_space: Remaining space in current packet
+ * @ifc: Input fragment cursor.
+ * Where we are in the current fragment of the incoming SKB. These
+ * values get updated in place when we split a fragment over
+ * multiple packets.
+ * @p: Parameters.
+ * These values are set once at the start of the TSO send and do
+ * not get changed as the routine progresses.
+ *
+ * The state used during segmentation. It is put into this data structure
+ * just to make it easy to pass into inline functions.
+ */
+struct tso_state {
+ unsigned remaining_len;
+ unsigned seqnum;
+ unsigned packet_space;
+
+ struct {
+ /* DMA address of current position */
+ dma_addr_t dma_addr;
+ /* Remaining length */
+ unsigned int len;
+ /* DMA address and length of the whole fragment */
+ unsigned int unmap_len;
+ dma_addr_t unmap_addr;
+ struct page *page;
+ unsigned page_off;
+ } ifc;
+
+ struct {
+ /* The number of bytes of header */
+ unsigned int header_length;
+
+ /* The number of bytes to put in each outgoing segment. */
+ int full_packet_size;
+
+ /* Current IPv4 ID, host endian. */
+ unsigned ipv4_id;
+ } p;
+};
+
+
+/*
+ * Verify that our various assumptions about sk_buffs and the conditions
+ * under which TSO will be attempted hold true.
+ */
+static inline void efx_tso_check_safe(const struct sk_buff *skb)
+{
+ EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
+ EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
+ skb->protocol);
+ EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
+ EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
+ + (tcp_hdr(skb)->doff << 2u)) >
+ skb_headlen(skb));
+}
+
+
+/*
+ * Allocate a page worth of efx_tso_header structures, and string them
+ * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
+ */
+static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+{
+
+ struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct efx_tso_header *tsoh;
+ dma_addr_t dma_addr;
+ u8 *base_kva, *kva;
+
+ base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
+ if (base_kva == NULL) {
+ EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
+ " headers\n");
+ return -ENOMEM;
+ }
+
+ /* pci_alloc_consistent() allocates pages. */
+ EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
+
+ for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
+ tsoh = (struct efx_tso_header *)kva;
+ tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
+ tsoh->next = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = tsoh;
+ }
+
+ return 0;
+}
+
+
+/* Free up a TSO header, and all others in the same page. */
+static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh,
+ struct pci_dev *pci_dev)
+{
+ struct efx_tso_header **p;
+ unsigned long base_kva;
+ dma_addr_t base_dma;
+
+ base_kva = (unsigned long)tsoh & PAGE_MASK;
+ base_dma = tsoh->dma_addr & PAGE_MASK;
+
+ p = &tx_queue->tso_headers_free;
+ while (*p != NULL)
+ if (((unsigned long)*p & PAGE_MASK) == base_kva)
+ *p = (*p)->next;
+ else
+ p = &(*p)->next;
+
+ pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
+}
+
+static struct efx_tso_header *
+efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
+{
+ struct efx_tso_header *tsoh;
+
+ tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
+ if (unlikely(!tsoh))
+ return NULL;
+
+ tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
+ TSOH_BUFFER(tsoh), header_len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
+ kfree(tsoh);
+ return NULL;
+ }
+
+ tsoh->unmap_len = header_len;
+ return tsoh;
+}
+
+static void
+efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
+{
+ pci_unmap_single(tx_queue->efx->pci_dev,
+ tsoh->dma_addr, tsoh->unmap_len,
+ PCI_DMA_TODEVICE);
+ kfree(tsoh);
+}
+
+/**
+ * efx_tx_queue_insert - push descriptors onto the TX queue
+ * @tx_queue: Efx TX queue
+ * @dma_addr: DMA address of fragment
+ * @len: Length of fragment
+ * @skb: Only non-null for end of last segment
+ * @end_of_packet: True if last fragment in a packet
+ * @unmap_addr: DMA address of fragment for unmapping
+ * @unmap_len: Only set this in last segment of a fragment
+ *
+ * Push descriptors onto the TX queue. Return 0 on success or 1 if
+ * @tx_queue full.
+ */
+static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned len,
+ const struct sk_buff *skb, int end_of_packet,
+ dma_addr_t unmap_addr, unsigned unmap_len)
+{
+ struct efx_tx_buffer *buffer;
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned dma_len, fill_level, insert_ptr, misalign;
+ int q_space;
+
+ EFX_BUG_ON_PARANOID(len <= 0);
+
+ fill_level = tx_queue->insert_count - tx_queue->old_read_count;
+ /* -1 as there is no way to represent all descriptors used */
+ q_space = efx->type->txd_ring_mask - 1 - fill_level;
+
+ while (1) {
+ if (unlikely(q_space-- <= 0)) {
+ /* It might be that completions have happened
+ * since the xmit path last checked. Update
+ * the xmit path's copy of read_count.
+ */
+ ++tx_queue->stopped;
+ /* This memory barrier protects the change of
+ * stopped from the access of read_count. */
+ smp_mb();
+ tx_queue->old_read_count =
+ *(volatile unsigned *)&tx_queue->read_count;
+ fill_level = (tx_queue->insert_count
+ - tx_queue->old_read_count);
+ q_space = efx->type->txd_ring_mask - 1 - fill_level;
+ if (unlikely(q_space-- <= 0))
+ return 1;
+ smp_mb();
+ --tx_queue->stopped;
+ }
+
+ insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
+ buffer = &tx_queue->buffer[insert_ptr];
+ ++tx_queue->insert_count;
+
+ EFX_BUG_ON_PARANOID(tx_queue->insert_count -
+ tx_queue->read_count >
+ efx->type->txd_ring_mask);
+
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
+
+ buffer->dma_addr = dma_addr;
+
+ /* Ensure we do not cross a boundary unsupported by H/W */
+ dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
+
+ misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
+ if (misalign && dma_len + misalign > 512)
+ dma_len = 512 - misalign;
+
+ /* If there is enough space to send then do so */
+ if (dma_len >= len)
+ break;
+
+ buffer->len = dma_len; /* Don't set the other members */
+ dma_addr += dma_len;
+ len -= dma_len;
+ }
+
+ EFX_BUG_ON_PARANOID(!len);
+ buffer->len = len;
+ buffer->skb = skb;
+ buffer->continuation = !end_of_packet;
+ buffer->unmap_addr = unmap_addr;
+ buffer->unmap_len = unmap_len;
+ return 0;
+}
+
+
+/*
+ * Put a TSO header into the TX queue.
+ *
+ * This is special-cased because we know that it is small enough to fit in
+ * a single fragment, and we know it doesn't cross a page boundary. It
+ * also allows us to not worry about end-of-packet etc.
+ */
+static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh, unsigned len)
+{
+ struct efx_tx_buffer *buffer;
+
+ buffer = &tx_queue->buffer[tx_queue->insert_count &
+ tx_queue->efx->type->txd_ring_mask];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
+ buffer->len = len;
+ buffer->dma_addr = tsoh->dma_addr;
+ buffer->tsoh = tsoh;
+
+ ++tx_queue->insert_count;
+}
+
+
+/* Remove descriptors put into a tx_queue. */
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != tx_queue->write_count) {
+ --tx_queue->insert_count;
+ buffer = &tx_queue->buffer[tx_queue->insert_count &
+ tx_queue->efx->type->txd_ring_mask];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ buffer->len = 0;
+ buffer->continuation = 1;
+ if (buffer->unmap_len) {
+ pci_unmap_page(tx_queue->efx->pci_dev,
+ buffer->unmap_addr,
+ buffer->unmap_len, PCI_DMA_TODEVICE);
+ buffer->unmap_len = 0;
+ }
+ }
+}
+
+
+/* Parse the SKB header and initialise state. */
+static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
+{
+ /* All ethernet/IP/TCP headers combined size is TCP header size
+ * plus offset of TCP header relative to start of packet.
+ */
+ st->p.header_length = ((tcp_hdr(skb)->doff << 2u)
+ + PTR_DIFF(tcp_hdr(skb), skb->data));
+ st->p.full_packet_size = (st->p.header_length
+ + skb_shinfo(skb)->gso_size);
+
+ st->p.ipv4_id = ntohs(ip_hdr(skb)->id);
+ st->seqnum = ntohl(tcp_hdr(skb)->seq);
+
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
+
+ st->packet_space = st->p.full_packet_size;
+ st->remaining_len = skb->len - st->p.header_length;
+}
+
+
+/**
+ * tso_get_fragment - record fragment details and map for DMA
+ * @st: TSO state
+ * @efx: Efx NIC
+ * @data: Pointer to fragment data
+ * @len: Length of fragment
+ *
+ * Record fragment details and map for DMA. Return 0 on success, or
+ * -%ENOMEM if DMA mapping fails.
+ */
+static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+ int len, struct page *page, int page_off)
+{
+
+ st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
+ len, PCI_DMA_TODEVICE);
+ if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
+ st->ifc.unmap_len = len;
+ st->ifc.len = len;
+ st->ifc.dma_addr = st->ifc.unmap_addr;
+ st->ifc.page = page;
+ st->ifc.page_off = page_off;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+
+/**
+ * tso_fill_packet_with_fragment - form descriptors for the current fragment
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Form descriptors for the current fragment, until we reach the end
+ * of fragment or end-of-packet. Return 0 on success, 1 if not enough
+ * space in @tx_queue.
+ */
+static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+
+ int n, end_of_packet, rc;
+
+ if (st->ifc.len == 0)
+ return 0;
+ if (st->packet_space == 0)
+ return 0;
+
+ EFX_BUG_ON_PARANOID(st->ifc.len <= 0);
+ EFX_BUG_ON_PARANOID(st->packet_space <= 0);
+
+ n = min(st->ifc.len, st->packet_space);
+
+ st->packet_space -= n;
+ st->remaining_len -= n;
+ st->ifc.len -= n;
+ st->ifc.page_off += n;
+ end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
+
+ rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n,
+ st->remaining_len ? NULL : skb,
+ end_of_packet, st->ifc.unmap_addr,
+ st->ifc.len ? 0 : st->ifc.unmap_len);
+
+ st->ifc.dma_addr += n;
+
+ return rc;
+}
+
+
+/**
+ * tso_start_new_packet - generate a new header and prepare for the new packet
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Generate a new header and prepare for the new packet. Return 0 on
+ * success, or -1 if failed to alloc header.
+ */
+static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+ struct efx_tso_header *tsoh;
+ struct iphdr *tsoh_iph;
+ struct tcphdr *tsoh_th;
+ unsigned ip_length;
+ u8 *header;
+
+ /* Allocate a DMA-mapped header buffer. */
+ if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
+ if (tx_queue->tso_headers_free == NULL)
+ if (efx_tsoh_block_alloc(tx_queue))
+ return -1;
+ EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
+ tsoh = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = tsoh->next;
+ tsoh->unmap_len = 0;
+ } else {
+ tx_queue->tso_long_headers++;
+ tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length);
+ if (unlikely(!tsoh))
+ return -1;
+ }
+
+ header = TSOH_BUFFER(tsoh);
+ tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+ tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
+
+ /* Copy and update the headers. */
+ memcpy(header, skb->data, st->p.header_length);
+
+ tsoh_th->seq = htonl(st->seqnum);
+ st->seqnum += skb_shinfo(skb)->gso_size;
+ if (st->remaining_len > skb_shinfo(skb)->gso_size) {
+ /* This packet will not finish the TSO burst. */
+ ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
+ tsoh_th->fin = 0;
+ tsoh_th->psh = 0;
+ } else {
+ /* This packet will be the last in the TSO burst. */
+ ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
+ + st->remaining_len);
+ tsoh_th->fin = tcp_hdr(skb)->fin;
+ tsoh_th->psh = tcp_hdr(skb)->psh;
+ }
+ tsoh_iph->tot_len = htons(ip_length);
+
+ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+ tsoh_iph->id = htons(st->p.ipv4_id);
+ st->p.ipv4_id++;
+
+ st->packet_space = skb_shinfo(skb)->gso_size;
+ ++tx_queue->tso_packets;
+
+ /* Form a descriptor for this header. */
+ efx_tso_put_header(tx_queue, tsoh, st->p.header_length);
+
+ return 0;
+}
+
+
+/**
+ * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ *
+ * Context: You must hold netif_tx_lock() to call this function.
+ *
+ * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
+ * @skb was not enqueued. In all cases @skb is consumed. Return
+ * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ */
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb)
+{
+ int frag_i, rc, rc2 = NETDEV_TX_OK;
+ struct tso_state state;
+ skb_frag_t *f;
+
+ /* Verify TSO is safe - these checks should never fail. */
+ efx_tso_check_safe(skb);
+
+ EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+
+ tso_start(&state, skb);
+
+ /* Assume that skb header area contains exactly the headers, and
+ * all payload is in the frag list.
+ */
+ if (skb_headlen(skb) == state.p.header_length) {
+ /* Grab the first payload fragment. */
+ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
+ frag_i = 0;
+ f = &skb_shinfo(skb)->frags[frag_i];
+ rc = tso_get_fragment(&state, tx_queue->efx,
+ f->size, f->page, f->page_offset);
+ if (rc)
+ goto mem_err;
+ } else {
+ /* It may look like this code fragment assumes that the
+ * skb->data portion does not cross a page boundary, but
+ * that is not the case. It is guaranteed to be direct
+ * mapped memory, and therefore is physically contiguous,
+ * and so DMA will work fine. kmap_atomic() on this region
+ * will just return the direct mapping, so that will work
+ * too.
+ */
+ int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
+ int hl = state.p.header_length;
+ rc = tso_get_fragment(&state, tx_queue->efx,
+ skb_headlen(skb) - hl,
+ virt_to_page(skb->data), page_off + hl);
+ if (rc)
+ goto mem_err;
+ frag_i = -1;
+ }
+
+ if (tso_start_new_packet(tx_queue, skb, &state) < 0)
+ goto mem_err;
+
+ while (1) {
+ rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
+ if (unlikely(rc))
+ goto stop;
+
+ /* Move onto the next fragment? */
+ if (state.ifc.len == 0) {
+ if (++frag_i >= skb_shinfo(skb)->nr_frags)
+ /* End of payload reached. */
+ break;
+ f = &skb_shinfo(skb)->frags[frag_i];
+ rc = tso_get_fragment(&state, tx_queue->efx,
+ f->size, f->page, f->page_offset);
+ if (rc)
+ goto mem_err;
+ }
+
+ /* Start at new packet? */
+ if (state.packet_space == 0 &&
+ tso_start_new_packet(tx_queue, skb, &state) < 0)
+ goto mem_err;
+ }
+
+ /* Pass off to hardware */
+ falcon_push_buffers(tx_queue);
+
+ tx_queue->tso_bursts++;
+ return NETDEV_TX_OK;
+
+ mem_err:
+ EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping"
+ " error\n");
+ dev_kfree_skb_any((struct sk_buff *)skb);
+ goto unwind;
+
+ stop:
+ rc2 = NETDEV_TX_BUSY;
+
+ /* Stop the queue if it wasn't stopped before. */
+ if (tx_queue->stopped == 1)
+ efx_stop_queue(tx_queue->efx);
+
+ unwind:
+ efx_enqueue_unwind(tx_queue);
+ return rc2;
+}
+
+
+/*
+ * Free up all TSO datastructures associated with tx_queue. This
+ * routine should be called only once the tx_queue is both empty and
+ * will no longer be used.
+ */
+static void efx_fini_tso(struct efx_tx_queue *tx_queue)
+{
+ unsigned i;
+
+ if (tx_queue->buffer)
+ for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
+ efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
+
+ while (tx_queue->tso_headers_free != NULL)
+ efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
+ tx_queue->efx->pci_dev);
+}
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 66dd5bf1eaa..3b9f9ddbc37 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -24,6 +24,10 @@
MDIO_MMDREG_DEVS0_PMAPMD | \
MDIO_MMDREG_DEVS0_PHYXS)
+#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_NETWORK))
+
/****************************************************************************/
/* Quake-specific MDIO registers */
#define MDIO_QUAKE_LED0_REG (0xD006)
@@ -35,6 +39,10 @@ void xfp_set_led(struct efx_nic *p, int led, int mode)
mode);
}
+struct xfp_phy_data {
+ int tx_disabled;
+};
+
#define XFP_MAX_RESET_TIME 500
#define XFP_RESET_WAIT 10
@@ -72,18 +80,31 @@ static int xfp_reset_phy(struct efx_nic *efx)
static int xfp_phy_init(struct efx_nic *efx)
{
+ struct xfp_phy_data *phy_data;
u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
int rc;
+ phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
+ efx->phy_data = (void *) phy_data;
+
EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
" %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
MDIO_ID_REV(devid));
+ phy_data->tx_disabled = efx->tx_disabled;
+
rc = xfp_reset_phy(efx);
EFX_INFO(efx, "XFP: PHY init %s.\n",
rc ? "failed" : "successful");
+ if (rc < 0)
+ goto fail;
+ return 0;
+
+ fail:
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
return rc;
}
@@ -110,6 +131,16 @@ static int xfp_phy_check_hw(struct efx_nic *efx)
static void xfp_phy_reconfigure(struct efx_nic *efx)
{
+ struct xfp_phy_data *phy_data = efx->phy_data;
+
+ /* Reset the PHY when moving from tx off to tx on */
+ if (phy_data->tx_disabled && !efx->tx_disabled)
+ xfp_reset_phy(efx);
+
+ mdio_clause45_transmit_disable(efx);
+ mdio_clause45_phy_reconfigure(efx);
+
+ phy_data->tx_disabled = efx->tx_disabled;
efx->link_up = xfp_link_ok(efx);
efx->link_options = GM_LPA_10000FULL;
}
@@ -119,6 +150,10 @@ static void xfp_phy_fini(struct efx_nic *efx)
{
/* Clobber the LED if it was blinking */
efx->board_info.blink(efx, 0);
+
+ /* Free the context block */
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
}
struct efx_phy_operations falcon_xfp_phy_ops = {
@@ -129,4 +164,5 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
.clear_interrupt = xfp_phy_clear_interrupt,
.reset_xaui = efx_port_dummy_op_void,
.mmds = XFP_REQUIRED_DEVS,
+ .loopbacks = XFP_LOOPBACKS,
};
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 7bb3ba9bcbd..c0a5eea2000 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1966,13 +1966,13 @@ struct sky2_status_le {
struct tx_ring_info {
struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(mapaddr);
- DECLARE_PCI_UNMAP_ADDR(maplen);
+ DECLARE_PCI_UNMAP_LEN(maplen);
};
struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
- DECLARE_PCI_UNMAP_ADDR(data_size);
+ DECLARE_PCI_UNMAP_LEN(data_size);
dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
};
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a59c1f224aa..2511ca7a12a 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -434,10 +434,6 @@ static int uli526x_open(struct net_device *dev)
ULI526X_DBUG(0, "uli526x_open", 0);
- ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev);
- if (ret)
- return ret;
-
/* system variable init */
db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
db->tx_packet_cnt = 0;
@@ -456,6 +452,10 @@ static int uli526x_open(struct net_device *dev)
/* Initialize ULI526X board */
uli526x_init(dev);
+ ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev);
+ if (ret)
+ return ret;
+
/* Active System Interface */
netif_wake_queue(dev);
@@ -1368,6 +1368,12 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
* This setup frame initialize ULI526X address filter mode
*/
+#ifdef __BIG_ENDIAN
+#define FLT_SHIFT 16
+#else
+#define FLT_SHIFT 0
+#endif
+
static void send_filter_frame(struct net_device *dev, int mc_cnt)
{
struct uli526x_board_info *db = netdev_priv(dev);
@@ -1384,27 +1390,27 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
/* Node address */
addrptr = (u16 *) dev->dev_addr;
- *suptr++ = addrptr[0];
- *suptr++ = addrptr[1];
- *suptr++ = addrptr[2];
+ *suptr++ = addrptr[0] << FLT_SHIFT;
+ *suptr++ = addrptr[1] << FLT_SHIFT;
+ *suptr++ = addrptr[2] << FLT_SHIFT;
/* broadcast address */
- *suptr++ = 0xffff;
- *suptr++ = 0xffff;
- *suptr++ = 0xffff;
+ *suptr++ = 0xffff << FLT_SHIFT;
+ *suptr++ = 0xffff << FLT_SHIFT;
+ *suptr++ = 0xffff << FLT_SHIFT;
/* fit the multicast address */
for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
addrptr = (u16 *) mcptr->dmi_addr;
- *suptr++ = addrptr[0];
- *suptr++ = addrptr[1];
- *suptr++ = addrptr[2];
+ *suptr++ = addrptr[0] << FLT_SHIFT;
+ *suptr++ = addrptr[1] << FLT_SHIFT;
+ *suptr++ = addrptr[2] << FLT_SHIFT;
}
for (; i<14; i++) {
- *suptr++ = 0xffff;
- *suptr++ = 0xffff;
- *suptr++ = 0xffff;
+ *suptr++ = 0xffff << FLT_SHIFT;
+ *suptr++ = 0xffff << FLT_SHIFT;
+ *suptr++ = 0xffff << FLT_SHIFT;
}
/* prepare the setup frame */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 281ce3d3953..ca0bdac07a7 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -62,7 +62,6 @@
#endif /* UGETH_VERBOSE_DEBUG */
#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
-void uec_set_ethtool_ops(struct net_device *netdev);
static DEFINE_SPINLOCK(ugeth_lock);
@@ -216,7 +215,8 @@ static struct list_head *dequeue(struct list_head *lh)
}
}
-static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd)
+static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
+ u8 __iomem *bd)
{
struct sk_buff *skb = NULL;
@@ -236,21 +236,22 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd)
skb->dev = ugeth->dev;
- out_be32(&((struct qe_bd *)bd)->buf,
+ out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(NULL,
skb->data,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT,
DMA_FROM_DEVICE));
- out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W)));
+ out_be32((u32 __iomem *)bd,
+ (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
return skb;
}
static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
{
- u8 *bd;
+ u8 __iomem *bd;
u32 bd_status;
struct sk_buff *skb;
int i;
@@ -259,7 +260,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
i = 0;
do {
- bd_status = in_be32((u32*)bd);
+ bd_status = in_be32((u32 __iomem *)bd);
skb = get_new_skb(ugeth, bd);
if (!skb) /* If can not allocate data buffer,
@@ -277,7 +278,7 @@ static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
}
static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
- volatile u32 *p_start,
+ u32 *p_start,
u8 num_entries,
u32 thread_size,
u32 thread_alignment,
@@ -316,7 +317,7 @@ static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
}
static int return_init_enet_entries(struct ucc_geth_private *ugeth,
- volatile u32 *p_start,
+ u32 *p_start,
u8 num_entries,
enum qe_risc_allocation risc,
int skip_page_for_first_entry)
@@ -326,21 +327,22 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth,
int snum;
for (i = 0; i < num_entries; i++) {
+ u32 val = *p_start;
+
/* Check that this entry was actually valid --
needed in case failed in allocations */
- if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
snum =
- (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
+ (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
ENET_INIT_PARAM_SNUM_SHIFT;
qe_put_snum((u8) snum);
if (!((i == 0) && skip_page_for_first_entry)) {
/* First entry of Rx does not have page */
init_enet_offset =
- (in_be32(p_start) &
- ENET_INIT_PARAM_PTR_MASK);
+ (val & ENET_INIT_PARAM_PTR_MASK);
qe_muram_free(init_enet_offset);
}
- *(p_start++) = 0; /* Just for cosmetics */
+ *p_start++ = 0;
}
}
@@ -349,7 +351,7 @@ static int return_init_enet_entries(struct ucc_geth_private *ugeth,
#ifdef DEBUG
static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
- volatile u32 *p_start,
+ u32 __iomem *p_start,
u8 num_entries,
u32 thread_size,
enum qe_risc_allocation risc,
@@ -360,11 +362,13 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
int snum;
for (i = 0; i < num_entries; i++) {
+ u32 val = in_be32(p_start);
+
/* Check that this entry was actually valid --
needed in case failed in allocations */
- if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
snum =
- (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
+ (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
ENET_INIT_PARAM_SNUM_SHIFT;
qe_put_snum((u8) snum);
if (!((i == 0) && skip_page_for_first_entry)) {
@@ -440,7 +444,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
{
- struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
if (!(paddr_num < NUM_OF_PADDRS)) {
ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
@@ -448,7 +452,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
}
p_82xx_addr_filt =
- (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
addressfiltering;
/* Writing address ff.ff.ff.ff.ff.ff disables address
@@ -463,11 +467,11 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
u8 *p_enet_addr)
{
- struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
u32 cecr_subblock;
p_82xx_addr_filt =
- (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
addressfiltering;
cecr_subblock =
@@ -487,7 +491,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
- struct ucc_geth *ug_regs;
+ struct ucc_geth __iomem *ug_regs;
u32 maccfg2, uccm;
uccf = ugeth->uccf;
@@ -507,7 +511,7 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
{
struct ucc_fast_private *uccf;
- struct ucc_geth *ug_regs;
+ struct ucc_geth __iomem *ug_regs;
u32 maccfg2, uccm;
uccf = ugeth->uccf;
@@ -538,13 +542,13 @@ static void get_statistics(struct ucc_geth_private *ugeth,
rx_firmware_statistics,
struct ucc_geth_hardware_statistics *hardware_statistics)
{
- struct ucc_fast *uf_regs;
- struct ucc_geth *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
ug_regs = ugeth->ug_regs;
- uf_regs = (struct ucc_fast *) ug_regs;
+ uf_regs = (struct ucc_fast __iomem *) ug_regs;
p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
@@ -1132,9 +1136,9 @@ static void dump_regs(struct ucc_geth_private *ugeth)
}
#endif /* DEBUG */
-static void init_default_reg_vals(volatile u32 *upsmr_register,
- volatile u32 *maccfg1_register,
- volatile u32 *maccfg2_register)
+static void init_default_reg_vals(u32 __iomem *upsmr_register,
+ u32 __iomem *maccfg1_register,
+ u32 __iomem *maccfg2_register)
{
out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
@@ -1148,7 +1152,7 @@ static int init_half_duplex_params(int alt_beb,
u8 alt_beb_truncation,
u8 max_retransmissions,
u8 collision_window,
- volatile u32 *hafdup_register)
+ u32 __iomem *hafdup_register)
{
u32 value = 0;
@@ -1180,7 +1184,7 @@ static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
u8 non_btb_ipg,
u8 min_ifg,
u8 btb_ipg,
- volatile u32 *ipgifg_register)
+ u32 __iomem *ipgifg_register)
{
u32 value = 0;
@@ -1215,9 +1219,9 @@ int init_flow_control_params(u32 automatic_flow_control_mode,
int tx_flow_control_enable,
u16 pause_period,
u16 extension_field,
- volatile u32 *upsmr_register,
- volatile u32 *uempr_register,
- volatile u32 *maccfg1_register)
+ u32 __iomem *upsmr_register,
+ u32 __iomem *uempr_register,
+ u32 __iomem *maccfg1_register)
{
u32 value = 0;
@@ -1243,8 +1247,8 @@ int init_flow_control_params(u32 automatic_flow_control_mode,
static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
int auto_zero_hardware_statistics,
- volatile u32 *upsmr_register,
- volatile u16 *uescr_register)
+ u32 __iomem *upsmr_register,
+ u16 __iomem *uescr_register)
{
u32 upsmr_value = 0;
u16 uescr_value = 0;
@@ -1270,12 +1274,12 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
static int init_firmware_statistics_gathering_mode(int
enable_tx_firmware_statistics,
int enable_rx_firmware_statistics,
- volatile u32 *tx_rmon_base_ptr,
+ u32 __iomem *tx_rmon_base_ptr,
u32 tx_firmware_statistics_structure_address,
- volatile u32 *rx_rmon_base_ptr,
+ u32 __iomem *rx_rmon_base_ptr,
u32 rx_firmware_statistics_structure_address,
- volatile u16 *temoder_register,
- volatile u32 *remoder_register)
+ u16 __iomem *temoder_register,
+ u32 __iomem *remoder_register)
{
/* Note: this function does not check if */
/* the parameters it receives are NULL */
@@ -1307,8 +1311,8 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
u8 address_byte_3,
u8 address_byte_4,
u8 address_byte_5,
- volatile u32 *macstnaddr1_register,
- volatile u32 *macstnaddr2_register)
+ u32 __iomem *macstnaddr1_register,
+ u32 __iomem *macstnaddr2_register)
{
u32 value = 0;
@@ -1344,7 +1348,7 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
}
static int init_check_frame_length_mode(int length_check,
- volatile u32 *maccfg2_register)
+ u32 __iomem *maccfg2_register)
{
u32 value = 0;
@@ -1360,7 +1364,7 @@ static int init_check_frame_length_mode(int length_check,
}
static int init_preamble_length(u8 preamble_length,
- volatile u32 *maccfg2_register)
+ u32 __iomem *maccfg2_register)
{
u32 value = 0;
@@ -1376,7 +1380,7 @@ static int init_preamble_length(u8 preamble_length,
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
- int promiscuous, volatile u32 *upsmr_register)
+ int promiscuous, u32 __iomem *upsmr_register)
{
u32 value = 0;
@@ -1403,7 +1407,7 @@ static int init_rx_parameters(int reject_broadcast,
}
static int init_max_rx_buff_len(u16 max_rx_buf_len,
- volatile u16 *mrblr_register)
+ u16 __iomem *mrblr_register)
{
/* max_rx_buf_len value must be a multiple of 128 */
if ((max_rx_buf_len == 0)
@@ -1415,8 +1419,8 @@ static int init_max_rx_buff_len(u16 max_rx_buf_len,
}
static int init_min_frame_len(u16 min_frame_length,
- volatile u16 *minflr_register,
- volatile u16 *mrblr_register)
+ u16 __iomem *minflr_register,
+ u16 __iomem *mrblr_register)
{
u16 mrblr_value = 0;
@@ -1431,8 +1435,8 @@ static int init_min_frame_len(u16 min_frame_length,
static int adjust_enet_interface(struct ucc_geth_private *ugeth)
{
struct ucc_geth_info *ug_info;
- struct ucc_geth *ug_regs;
- struct ucc_fast *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
int ret_val;
u32 upsmr, maccfg2, tbiBaseAddress;
u16 value;
@@ -1517,8 +1521,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
static void adjust_link(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct ucc_geth *ug_regs;
- struct ucc_fast *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
struct phy_device *phydev = ugeth->phydev;
unsigned long flags;
int new_state = 0;
@@ -1678,9 +1682,9 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
uccf = ugeth->uccf;
/* Clear acknowledge bit */
- temp = ugeth->p_rx_glbl_pram->rxgstpack;
+ temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
- ugeth->p_rx_glbl_pram->rxgstpack = temp;
+ out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
/* Keep issuing command and checking acknowledge bit until
it is asserted, according to spec */
@@ -1692,7 +1696,7 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
QE_CR_PROTOCOL_ETHERNET, 0);
- temp = ugeth->p_rx_glbl_pram->rxgstpack;
+ temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
} while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
uccf->stopped_rx = 1;
@@ -1991,19 +1995,20 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
enum enet_addr_type
enet_addr_type)
{
- struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
struct ucc_fast_private *uccf;
enum comm_dir comm_dir;
struct list_head *p_lh;
u16 i, num;
- u32 *addr_h, *addr_l;
+ u32 __iomem *addr_h;
+ u32 __iomem *addr_l;
u8 *p_counter;
uccf = ugeth->uccf;
p_82xx_addr_filt =
- (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
- addressfiltering;
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *)
+ ugeth->p_rx_glbl_pram->addressfiltering;
if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
addr_h = &(p_82xx_addr_filt->gaddr_h);
@@ -2079,7 +2084,7 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge
static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
{
u16 i, j;
- u8 *bd;
+ u8 __iomem *bd;
if (!ugeth)
return;
@@ -2154,8 +2159,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
if (ugeth->tx_skbuff[i][j]) {
dma_unmap_single(NULL,
- ((struct qe_bd *)bd)->buf,
- (in_be32((u32 *)bd) &
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ (in_be32((u32 __iomem *)bd) &
BD_LENGTH_MASK),
DMA_TO_DEVICE);
dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
@@ -2182,7 +2187,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
if (ugeth->rx_skbuff[i][j]) {
dma_unmap_single(NULL,
- ((struct qe_bd *)bd)->buf,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
ugeth->ug_info->
uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT,
@@ -2218,8 +2223,8 @@ static void ucc_geth_set_multi(struct net_device *dev)
{
struct ucc_geth_private *ugeth;
struct dev_mc_list *dmi;
- struct ucc_fast *uf_regs;
- struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
int i;
ugeth = netdev_priv(dev);
@@ -2228,14 +2233,14 @@ static void ucc_geth_set_multi(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
- uf_regs->upsmr |= UPSMR_PRO;
+ out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO);
} else {
- uf_regs->upsmr &= ~UPSMR_PRO;
+ out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO);
p_82xx_addr_filt =
- (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
p_rx_glbl_pram->addressfiltering;
if (dev->flags & IFF_ALLMULTI) {
@@ -2270,7 +2275,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
- struct ucc_geth *ug_regs = ugeth->ug_regs;
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
struct phy_device *phydev = ugeth->phydev;
u32 tempval;
@@ -2419,20 +2424,20 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- ugeth->ug_regs = (struct ucc_geth *) ioremap(uf_info->regs, sizeof(struct ucc_geth));
+ ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth));
return 0;
}
static int ucc_geth_startup(struct ucc_geth_private *ugeth)
{
- struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
- struct ucc_geth_init_pram *p_init_enet_pram;
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ struct ucc_geth_init_pram __iomem *p_init_enet_pram;
struct ucc_fast_private *uccf;
struct ucc_geth_info *ug_info;
struct ucc_fast_info *uf_info;
- struct ucc_fast *uf_regs;
- struct ucc_geth *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
int ret_val = -EINVAL;
u32 remoder = UCC_GETH_REMODER_INIT;
u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
@@ -2440,7 +2445,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
u16 temoder = UCC_GETH_TEMODER_INIT;
u16 test;
u8 function_code = 0;
- u8 *bd, *endOfRing;
+ u8 __iomem *bd;
+ u8 __iomem *endOfRing;
u8 numThreadsRxNumerical, numThreadsTxNumerical;
ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -2602,11 +2608,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
align = UCC_GETH_TX_BD_RING_ALIGNMENT;
ugeth->tx_bd_ring_offset[j] =
- kmalloc((u32) (length + align), GFP_KERNEL);
+ (u32) kmalloc((u32) (length + align), GFP_KERNEL);
if (ugeth->tx_bd_ring_offset[j] != 0)
ugeth->p_tx_bd_ring[j] =
- (void*)((ugeth->tx_bd_ring_offset[j] +
+ (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
align) & ~(align - 1));
} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
ugeth->tx_bd_ring_offset[j] =
@@ -2614,7 +2620,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_TX_BD_RING_ALIGNMENT);
if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
ugeth->p_tx_bd_ring[j] =
- (u8 *) qe_muram_addr(ugeth->
+ (u8 __iomem *) qe_muram_addr(ugeth->
tx_bd_ring_offset[j]);
}
if (!ugeth->p_tx_bd_ring[j]) {
@@ -2626,8 +2632,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
/* Zero unused end of bd ring, according to spec */
- memset(ugeth->p_tx_bd_ring[j] +
- ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0,
+ memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
+ ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
}
@@ -2639,10 +2645,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
align = UCC_GETH_RX_BD_RING_ALIGNMENT;
ugeth->rx_bd_ring_offset[j] =
- kmalloc((u32) (length + align), GFP_KERNEL);
+ (u32) kmalloc((u32) (length + align), GFP_KERNEL);
if (ugeth->rx_bd_ring_offset[j] != 0)
ugeth->p_rx_bd_ring[j] =
- (void*)((ugeth->rx_bd_ring_offset[j] +
+ (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
align) & ~(align - 1));
} else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
ugeth->rx_bd_ring_offset[j] =
@@ -2650,7 +2656,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
UCC_GETH_RX_BD_RING_ALIGNMENT);
if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
ugeth->p_rx_bd_ring[j] =
- (u8 *) qe_muram_addr(ugeth->
+ (u8 __iomem *) qe_muram_addr(ugeth->
rx_bd_ring_offset[j]);
}
if (!ugeth->p_rx_bd_ring[j]) {
@@ -2685,14 +2691,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
/* clear bd buffer */
- out_be32(&((struct qe_bd *)bd)->buf, 0);
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
/* set bd status and length */
- out_be32((u32 *)bd, 0);
+ out_be32((u32 __iomem *)bd, 0);
bd += sizeof(struct qe_bd);
}
bd -= sizeof(struct qe_bd);
/* set bd status and length */
- out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */
+ out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
}
/* Init Rx bds */
@@ -2717,14 +2723,14 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
/* set bd status and length */
- out_be32((u32 *)bd, R_I);
+ out_be32((u32 __iomem *)bd, R_I);
/* clear bd buffer */
- out_be32(&((struct qe_bd *)bd)->buf, 0);
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
bd += sizeof(struct qe_bd);
}
bd -= sizeof(struct qe_bd);
/* set bd status and length */
- out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */
+ out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
}
/*
@@ -2744,10 +2750,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
ugeth->p_tx_glbl_pram =
- (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth->
+ (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
tx_glbl_pram_offset);
/* Zero out p_tx_glbl_pram */
- memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
+ memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
/* Fill global PRAM */
@@ -2768,7 +2774,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_thread_data_tx =
- (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth->
+ (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
thread_dat_tx_offset);
out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
@@ -2779,7 +2785,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* iphoffset */
for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
- ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
+ out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
+ ug_info->iphoffset[i]);
/* SQPTR */
/* Size varies with number of Tx queues */
@@ -2797,7 +2804,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_send_q_mem_reg =
- (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth->
+ (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
send_q_mem_reg_offset);
out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
@@ -2841,25 +2848,26 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_scheduler =
- (struct ucc_geth_scheduler *) qe_muram_addr(ugeth->
+ (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
scheduler_offset);
out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
ugeth->scheduler_offset);
/* Zero out p_scheduler */
- memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
+ memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
/* Set values in scheduler */
out_be32(&ugeth->p_scheduler->mblinterval,
ug_info->mblinterval);
out_be16(&ugeth->p_scheduler->nortsrbytetime,
ug_info->nortsrbytetime);
- ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
- ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
- ugeth->p_scheduler->txasap = ug_info->txasap;
- ugeth->p_scheduler->extrabw = ug_info->extrabw;
+ out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
+ out_8(&ugeth->p_scheduler->strictpriorityq,
+ ug_info->strictpriorityq);
+ out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
+ out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
for (i = 0; i < NUM_TX_QUEUES; i++)
- ugeth->p_scheduler->weightfactor[i] =
- ug_info->weightfactor[i];
+ out_8(&ugeth->p_scheduler->weightfactor[i],
+ ug_info->weightfactor[i]);
/* Set pointers to cpucount registers in scheduler */
ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
@@ -2890,10 +2898,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
ugeth->p_tx_fw_statistics_pram =
- (struct ucc_geth_tx_firmware_statistics_pram *)
+ (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
/* Zero out p_tx_fw_statistics_pram */
- memset(ugeth->p_tx_fw_statistics_pram,
+ memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
}
@@ -2930,10 +2938,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
ugeth->p_rx_glbl_pram =
- (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth->
+ (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
rx_glbl_pram_offset);
/* Zero out p_rx_glbl_pram */
- memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
+ memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
/* Fill global PRAM */
@@ -2953,7 +2961,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_thread_data_rx =
- (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth->
+ (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
thread_dat_rx_offset);
out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
@@ -2976,10 +2984,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
ugeth->p_rx_fw_statistics_pram =
- (struct ucc_geth_rx_firmware_statistics_pram *)
+ (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
/* Zero out p_rx_fw_statistics_pram */
- memset(ugeth->p_rx_fw_statistics_pram, 0,
+ memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
sizeof(struct ucc_geth_rx_firmware_statistics_pram));
}
@@ -3000,7 +3008,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_rx_irq_coalescing_tbl =
- (struct ucc_geth_rx_interrupt_coalescing_table *)
+ (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
ugeth->rx_irq_coalescing_tbl_offset);
@@ -3069,11 +3077,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_rx_bd_qs_tbl =
- (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth->
+ (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
rx_bd_qs_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
/* Zero out p_rx_bd_qs_tbl */
- memset(ugeth->p_rx_bd_qs_tbl,
+ memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
0,
ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)));
@@ -3133,7 +3141,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
&ugeth->p_rx_glbl_pram->remoder);
/* function code register */
- ugeth->p_rx_glbl_pram->rstate = function_code;
+ out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
/* initialize extended filtering */
if (ug_info->rxExtendedFiltering) {
@@ -3160,7 +3168,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_exf_glbl_param =
- (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth->
+ (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
exf_glbl_param_offset);
out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
ugeth->exf_glbl_param_offset);
@@ -3175,7 +3183,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
p_82xx_addr_filt =
- (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
p_rx_glbl_pram->addressfiltering;
ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
@@ -3307,17 +3315,21 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
p_init_enet_pram =
- (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset);
+ (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
/* Copy shadow InitEnet command parameter structure into PRAM */
- p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
- p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
- p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
- p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
+ out_8(&p_init_enet_pram->resinit1,
+ ugeth->p_init_enet_param_shadow->resinit1);
+ out_8(&p_init_enet_pram->resinit2,
+ ugeth->p_init_enet_param_shadow->resinit2);
+ out_8(&p_init_enet_pram->resinit3,
+ ugeth->p_init_enet_param_shadow->resinit3);
+ out_8(&p_init_enet_pram->resinit4,
+ ugeth->p_init_enet_param_shadow->resinit4);
out_be16(&p_init_enet_pram->resinit5,
ugeth->p_init_enet_param_shadow->resinit5);
- p_init_enet_pram->largestexternallookupkeysize =
- ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
+ out_8(&p_init_enet_pram->largestexternallookupkeysize,
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
out_be32(&p_init_enet_pram->rgftgfrxglobal,
ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
@@ -3371,7 +3383,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef CONFIG_UGETH_TX_ON_DEMAND
struct ucc_fast_private *uccf;
#endif
- u8 *bd; /* BD pointer */
+ u8 __iomem *bd; /* BD pointer */
u32 bd_status;
u8 txQ = 0;
@@ -3383,7 +3395,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Start from the next BD that should be filled */
bd = ugeth->txBd[txQ];
- bd_status = in_be32((u32 *)bd);
+ bd_status = in_be32((u32 __iomem *)bd);
/* Save the skb pointer so we can free it later */
ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
@@ -3393,7 +3405,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
/* set up the buffer descriptor */
- out_be32(&((struct qe_bd *)bd)->buf,
+ out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
@@ -3401,7 +3413,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
/* set bd status and length */
- out_be32((u32 *)bd, bd_status);
+ out_be32((u32 __iomem *)bd, bd_status);
dev->trans_start = jiffies;
@@ -3441,7 +3453,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
{
struct sk_buff *skb;
- u8 *bd;
+ u8 __iomem *bd;
u16 length, howmany = 0;
u32 bd_status;
u8 *bdBuffer;
@@ -3454,11 +3466,11 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
/* collect received buffers */
bd = ugeth->rxBd[rxQ];
- bd_status = in_be32((u32 *)bd);
+ bd_status = in_be32((u32 __iomem *)bd);
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
- bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf);
+ bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
@@ -3516,7 +3528,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
else
bd += sizeof(struct qe_bd);
- bd_status = in_be32((u32 *)bd);
+ bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->rxBd[rxQ] = bd;
@@ -3527,11 +3539,11 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
{
/* Start from the next BD that should be filled */
struct ucc_geth_private *ugeth = netdev_priv(dev);
- u8 *bd; /* BD pointer */
+ u8 __iomem *bd; /* BD pointer */
u32 bd_status;
bd = ugeth->confBd[txQ];
- bd_status = in_be32((u32 *)bd);
+ bd_status = in_be32((u32 __iomem *)bd);
/* Normal processing. */
while ((bd_status & T_R) == 0) {
@@ -3561,7 +3573,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
bd += sizeof(struct qe_bd);
else
bd = ugeth->p_tx_bd_ring[txQ];
- bd_status = in_be32((u32 *)bd);
+ bd_status = in_be32((u32 __iomem *)bd);
}
ugeth->confBd[txQ] = bd;
return 0;
@@ -3910,7 +3922,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
return -EINVAL;
}
} else {
- prop = of_get_property(np, "rx-clock", NULL);
+ prop = of_get_property(np, "tx-clock", NULL);
if (!prop) {
printk(KERN_ERR
"ucc_geth: mising tx-clock-name property\n");
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 9f8b7580a3a..abc0e224263 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -700,8 +700,8 @@ struct ucc_geth_82xx_address_filtering_pram {
u32 iaddr_l; /* individual address filter, low */
u32 gaddr_h; /* group address filter, high */
u32 gaddr_l; /* group address filter, low */
- struct ucc_geth_82xx_enet_address taddr;
- struct ucc_geth_82xx_enet_address paddr[NUM_OF_PADDRS];
+ struct ucc_geth_82xx_enet_address __iomem taddr;
+ struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
u8 res0[0x40 - 0x38];
} __attribute__ ((packed));
@@ -1186,40 +1186,40 @@ struct ucc_geth_private {
struct ucc_fast_private *uccf;
struct net_device *dev;
struct napi_struct napi;
- struct ucc_geth *ug_regs;
+ struct ucc_geth __iomem *ug_regs;
struct ucc_geth_init_pram *p_init_enet_param_shadow;
- struct ucc_geth_exf_global_pram *p_exf_glbl_param;
+ struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
u32 exf_glbl_param_offset;
- struct ucc_geth_rx_global_pram *p_rx_glbl_pram;
+ struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
u32 rx_glbl_pram_offset;
- struct ucc_geth_tx_global_pram *p_tx_glbl_pram;
+ struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
u32 tx_glbl_pram_offset;
- struct ucc_geth_send_queue_mem_region *p_send_q_mem_reg;
+ struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
u32 send_q_mem_reg_offset;
- struct ucc_geth_thread_data_tx *p_thread_data_tx;
+ struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
u32 thread_dat_tx_offset;
- struct ucc_geth_thread_data_rx *p_thread_data_rx;
+ struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx;
u32 thread_dat_rx_offset;
- struct ucc_geth_scheduler *p_scheduler;
+ struct ucc_geth_scheduler __iomem *p_scheduler;
u32 scheduler_offset;
- struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
+ struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram;
u32 tx_fw_statistics_pram_offset;
- struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
+ struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram;
u32 rx_fw_statistics_pram_offset;
- struct ucc_geth_rx_interrupt_coalescing_table *p_rx_irq_coalescing_tbl;
+ struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl;
u32 rx_irq_coalescing_tbl_offset;
- struct ucc_geth_rx_bd_queues_entry *p_rx_bd_qs_tbl;
+ struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
u32 rx_bd_qs_tbl_offset;
- u8 *p_tx_bd_ring[NUM_TX_QUEUES];
+ u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
u32 tx_bd_ring_offset[NUM_TX_QUEUES];
- u8 *p_rx_bd_ring[NUM_RX_QUEUES];
+ u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
u32 rx_bd_ring_offset[NUM_RX_QUEUES];
- u8 *confBd[NUM_TX_QUEUES];
- u8 *txBd[NUM_TX_QUEUES];
- u8 *rxBd[NUM_RX_QUEUES];
+ u8 __iomem *confBd[NUM_TX_QUEUES];
+ u8 __iomem *txBd[NUM_TX_QUEUES];
+ u8 __iomem *rxBd[NUM_RX_QUEUES];
int badFrame[NUM_RX_QUEUES];
u16 cpucount[NUM_TX_QUEUES];
- volatile u16 *p_cpucount[NUM_TX_QUEUES];
+ u16 __iomem *p_cpucount[NUM_TX_QUEUES];
int indAddrRegUsed[NUM_OF_PADDRS];
u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
u8 numGroupAddrInHash;
@@ -1251,4 +1251,12 @@ struct ucc_geth_private {
int oldlink;
};
+void uec_set_ethtool_ops(struct net_device *netdev);
+int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable, int tx_flow_control_enable,
+ u16 pause_period, u16 extension_field,
+ u32 __iomem *upsmr_register, u32 __iomem *uempr_register,
+ u32 __iomem *maccfg1_register);
+
+
#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 33200038a14..5f176f2b1c1 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -108,12 +108,6 @@ static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
-extern int init_flow_control_params(u32 automatic_flow_control_mode,
- int rx_flow_control_enable,
- int tx_flow_control_enable, u16 pause_period,
- u16 extension_field, volatile u32 *upsmr_register,
- volatile u32 *uempr_register, volatile u32 *maccfg1_register);
-
static int
uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index 2af49078100..94047473692 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -104,7 +104,7 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
}
/* Reset the MIIM registers, and wait for the bus to free */
-int uec_mdio_reset(struct mii_bus *bus)
+static int uec_mdio_reset(struct mii_bus *bus)
{
struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
unsigned int timeout = PHY_INIT_TIMEOUT;
@@ -240,7 +240,7 @@ reg_map_fail:
return err;
}
-int uec_mdio_remove(struct of_device *ofdev)
+static int uec_mdio_remove(struct of_device *ofdev)
{
struct device *device = &ofdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6f245cfb662..dc6f097062d 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1381,6 +1381,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x0411, 0x003d),
.driver_info = (unsigned long) &ax8817x_info,
}, {
+ // Buffalo LUA-U2-GT 10/100/1000
+ USB_DEVICE (0x0411, 0x006e),
+ .driver_info = (unsigned long) &ax88178_info,
+}, {
// Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
USB_DEVICE (0x6189, 0x182d),
.driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 8005dd16fb4..d5140aed7b7 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -150,11 +150,9 @@ config HDLC_FR
config HDLC_PPP
tristate "Synchronous Point-to-Point Protocol (PPP) support"
- depends on HDLC && BROKEN
+ depends on HDLC
help
Generic HDLC driver supporting PPP over WAN connections.
- This module is currently broken and will cause a kernel panic
- when a device configured in PPP mode is activated.
It will be replaced by new PPP implementation in Linux 2.6.26.
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 45ddfc9763c..b0fce1387ea 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -629,7 +629,7 @@ static void sppp_channel_init(struct channel_data *chan)
d->base_addr = chan->cosa->datareg;
d->irq = chan->cosa->irq;
d->dma = chan->cosa->dma;
- d->priv = chan;
+ d->ml_priv = chan;
sppp_attach(&chan->pppdev);
if (register_netdev(d)) {
printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
@@ -650,7 +650,7 @@ static void sppp_channel_delete(struct channel_data *chan)
static int cosa_sppp_open(struct net_device *d)
{
- struct channel_data *chan = d->priv;
+ struct channel_data *chan = d->ml_priv;
int err;
unsigned long flags;
@@ -690,7 +690,7 @@ static int cosa_sppp_open(struct net_device *d)
static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct channel_data *chan = dev->priv;
+ struct channel_data *chan = dev->ml_priv;
netif_stop_queue(dev);
@@ -701,7 +701,7 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
static void cosa_sppp_timeout(struct net_device *dev)
{
- struct channel_data *chan = dev->priv;
+ struct channel_data *chan = dev->ml_priv;
if (test_bit(RXBIT, &chan->cosa->rxtx)) {
chan->stats.rx_errors++;
@@ -720,7 +720,7 @@ static void cosa_sppp_timeout(struct net_device *dev)
static int cosa_sppp_close(struct net_device *d)
{
- struct channel_data *chan = d->priv;
+ struct channel_data *chan = d->ml_priv;
unsigned long flags;
netif_stop_queue(d);
@@ -800,7 +800,7 @@ static int sppp_tx_done(struct channel_data *chan, int size)
static struct net_device_stats *cosa_net_stats(struct net_device *dev)
{
- struct channel_data *chan = dev->priv;
+ struct channel_data *chan = dev->ml_priv;
return &chan->stats;
}
@@ -1217,7 +1217,7 @@ static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd)
{
int rv;
- struct channel_data *chan = dev->priv;
+ struct channel_data *chan = dev->ml_priv;
rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data);
if (rv == -ENOIOCTLCMD) {
return sppp_do_ioctl(dev, ifr, cmd);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 10396d9686f..00308337928 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -45,7 +45,7 @@ static int ppp_open(struct net_device *dev)
int (*old_ioctl)(struct net_device *, struct ifreq *, int);
int result;
- dev->priv = &state(hdlc)->syncppp_ptr;
+ dev->ml_priv = &state(hdlc)->syncppp_ptr;
state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev;
state(hdlc)->pppdev.dev = dev;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 83dbc924fcb..f3065d3473f 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -75,7 +75,7 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
static int hostess_open(struct net_device *d)
{
- struct sv11_device *sv11=d->priv;
+ struct sv11_device *sv11=d->ml_priv;
int err = -1;
/*
@@ -128,7 +128,7 @@ static int hostess_open(struct net_device *d)
static int hostess_close(struct net_device *d)
{
- struct sv11_device *sv11=d->priv;
+ struct sv11_device *sv11=d->ml_priv;
/*
* Discard new frames
*/
@@ -159,14 +159,14 @@ static int hostess_close(struct net_device *d)
static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
{
- /* struct sv11_device *sv11=d->priv;
+ /* struct sv11_device *sv11=d->ml_priv;
z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */
return sppp_do_ioctl(d, ifr,cmd);
}
static struct net_device_stats *hostess_get_stats(struct net_device *d)
{
- struct sv11_device *sv11=d->priv;
+ struct sv11_device *sv11=d->ml_priv;
if(sv11)
return z8530_get_stats(&sv11->sync.chanA);
else
@@ -179,7 +179,7 @@ static struct net_device_stats *hostess_get_stats(struct net_device *d)
static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
{
- struct sv11_device *sv11=d->priv;
+ struct sv11_device *sv11=d->ml_priv;
return z8530_queue_xmit(&sv11->sync.chanA, skb);
}
@@ -325,6 +325,7 @@ static struct sv11_device *sv11_init(int iobase, int irq)
/*
* Initialise the PPP components
*/
+ d->ml_priv = sv;
sppp_attach(&sv->netdev);
/*
@@ -333,7 +334,6 @@ static struct sv11_device *sv11_init(int iobase, int irq)
d->base_addr = iobase;
d->irq = irq;
- d->priv = sv;
if(register_netdev(d))
{
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index b5860b97a93..24fd613466b 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -459,6 +459,7 @@ static void __exit lapbeth_cleanup_driver(void)
list_for_each_safe(entry, tmp, &lapbeth_devices) {
lapbeth = list_entry(entry, struct lapbethdev, node);
+ dev_put(lapbeth->ethdev);
unregister_netdevice(lapbeth->axdev);
}
rtnl_unlock();
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 6635ecef36e..62133cee446 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -891,6 +891,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev,
/* Initialize the sppp layer */
/* An ioctl can cause a subsequent detach for raw frame interface */
+ dev->ml_priv = sc;
sc->if_type = LMC_PPP;
sc->check = 0xBEAFCAFE;
dev->base_addr = pci_resource_start(pdev, 0);
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 11276bf3149..44a89df1b8b 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -241,6 +241,7 @@ static inline struct slvl_device *slvl_alloc(int iobase, int irq)
return NULL;
sv = d->priv;
+ d->ml_priv = sv;
sv->if_ptr = &sv->pppdev;
sv->pppdev.dev = d;
d->base_addr = iobase;
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index cc0006900b3..995dd537083 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,6 +1,5 @@
config IWLWIFI
- bool
- default n
+ tristate
config IWLCORE
tristate "Intel Wireless Wifi Core"
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 8464397f781..9df48b33f0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -666,7 +666,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
rx_status.flag = 0;
rx_status.mactime = le64_to_cpu(rx_end->timestamp);
rx_status.freq =
- ieee80211_frequency_to_channel(le16_to_cpu(rx_hdr->channel));
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index 67356d9a0c5..24dee00b0e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -168,8 +168,8 @@ struct iwl4965_lq_sta {
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
#endif
u32 dbg_fixed_rate;
- struct iwl_priv *drv;
#endif
+ struct iwl_priv *drv;
};
static void rs_rate_scale_perform(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9546582e983..5d675e39bab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -3128,7 +3128,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
rx_status.mactime = le64_to_cpu(rx_start->timestamp);
rx_status.freq =
- ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel));
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel));
rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.rate_idx =
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 04c2638d75a..9196825ed1b 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -388,8 +388,15 @@ islpci_open(struct net_device *ndev)
netif_start_queue(ndev);
- /* Turn off carrier unless we know we have associated */
- netif_carrier_off(ndev);
+ /* Turn off carrier if in STA or Ad-hoc mode. It will be turned on
+ * once the firmware receives a trap of being associated
+ * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we
+ * should just leave the carrier on as its expected the firmware
+ * won't send us a trigger. */
+ if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC)
+ netif_carrier_off(ndev);
+ else
+ netif_carrier_on(ndev);
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9929b15e28b..61510c51935 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1028,8 +1028,10 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
* Initialize the device.
*/
status = rt2x00dev->ops->lib->initialize(rt2x00dev);
- if (status)
- goto exit;
+ if (status) {
+ rt2x00queue_uninitialize(rt2x00dev);
+ return status;
+ }
__set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags);
@@ -1039,11 +1041,6 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
rt2x00rfkill_register(rt2x00dev);
return 0;
-
-exit:
- rt2x00lib_uninitialize(rt2x00dev);
-
- return status;
}
int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 9e5d94e44c5..c17078eac19 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -314,13 +314,14 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
if (status) {
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
pci_dev->irq, status);
- return status;
+ goto exit;
}
return 0;
exit:
- rt2x00pci_uninitialize(rt2x00dev);
+ queue_for_each(rt2x00dev, queue)
+ rt2x00pci_free_queue_dma(rt2x00dev, queue);
return status;
}
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 98af4d26583..b64f2f5d0d5 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2362,6 +2362,7 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(control->vif);
+ struct queue_entry_priv_pci_tx *priv_tx;
struct skb_frame_desc *skbdesc;
unsigned int beacon_base;
u32 reg;
@@ -2369,21 +2370,8 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
if (unlikely(!intf->beacon))
return -ENOBUFS;
- /*
- * We need to append the descriptor in front of the
- * beacon frame.
- */
- if (skb_headroom(skb) < intf->beacon->queue->desc_size) {
- if (pskb_expand_head(skb, intf->beacon->queue->desc_size,
- 0, GFP_ATOMIC))
- return -ENOMEM;
- }
-
- /*
- * Add the descriptor in front of the skb.
- */
- skb_push(skb, intf->beacon->queue->desc_size);
- memset(skb->data, 0, intf->beacon->queue->desc_size);
+ priv_tx = intf->beacon->priv_data;
+ memset(priv_tx->desc, 0, intf->beacon->queue->desc_size);
/*
* Fill in skb descriptor
@@ -2391,9 +2379,9 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
skbdesc = get_skb_frame_desc(skb);
memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
- skbdesc->data = skb->data + intf->beacon->queue->desc_size;
- skbdesc->data_len = skb->len - intf->beacon->queue->desc_size;
- skbdesc->desc = skb->data;
+ skbdesc->data = skb->data;
+ skbdesc->data_len = skb->len;
+ skbdesc->desc = priv_tx->desc;
skbdesc->desc_len = intf->beacon->queue->desc_size;
skbdesc->entry = intf->beacon;
@@ -2414,7 +2402,10 @@ static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
- skb->data, skb->len);
+ skbdesc->desc, skbdesc->desc_len);
+ rt2x00pci_register_multiwrite(rt2x00dev,
+ beacon_base + skbdesc->desc_len,
+ skbdesc->data, skbdesc->data_len);
rt61pci_kick_tx_queue(rt2x00dev, QID_BEACON);
return 0;
@@ -2479,7 +2470,7 @@ static const struct data_queue_desc rt61pci_queue_tx = {
static const struct data_queue_desc rt61pci_queue_bcn = {
.entry_num = 4 * BEACON_ENTRIES,
- .data_size = MGMT_FRAME_SIZE,
+ .data_size = 0, /* No DMA required for beacons */
.desc_size = TXINFO_SIZE,
.priv_size = sizeof(struct queue_entry_priv_pci_tx),
};
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 5dd23c93497..883af891ebf 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -2611,7 +2611,7 @@ static int strip_open(struct tty_struct *tty)
* We need a write method.
*/
- if (tty->ops->write == NULL)
+ if (tty->ops->write == NULL || tty->ops->set_termios == NULL)
return -EOPNOTSUPP;
/*
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 03384a43186..49ae9700395 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -908,9 +908,9 @@ static void wv_psa_show(psa_t * p)
p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5],
p->psa_call_code[6], p->psa_call_code[7]);
#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n",
+ printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n",
p->psa_reserved[0],
- p->psa_reserved[1], p->psa_reserved[2], p->psa_reserved[3]);
+ p->psa_reserved[1]);
#endif /* DEBUG_SHOW_UNUSED */
printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index baf74015751..b584c0ecc62 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1074,11 +1074,9 @@ wv_psa_show(psa_t * p)
p->psa_call_code[6],
p->psa_call_code[7]);
#ifdef DEBUG_SHOW_UNUSED
- printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n",
+ printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n",
p->psa_reserved[0],
- p->psa_reserved[1],
- p->psa_reserved[2],
- p->psa_reserved[3]);
+ p->psa_reserved[1]);
#endif /* DEBUG_SHOW_UNUSED */
printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status);
printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 5316074f39f..12e24f04ddd 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -889,9 +889,13 @@ static void tx_urb_complete(struct urb *urb)
}
free_urb:
skb = (struct sk_buff *)urb->context;
- zd_mac_tx_to_dev(skb, urb->status);
+ /*
+ * grab 'usb' pointer before handing off the skb (since
+ * it might be freed by zd_mac_tx_to_dev or mac80211)
+ */
cb = (struct zd_tx_skb_control_block *)skb->cb;
usb = &zd_hw_mac(cb->hw)->chip.usb;
+ zd_mac_tx_to_dev(skb, urb->status);
free_tx_urb(usb, urb);
tx_dec_submitted_urbs(usb);
return;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index efcbf4b4579..2450b3a393f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -27,7 +27,7 @@
#include "buffer_sync.h"
#include "oprof.h"
-DEFINE_PER_CPU_SHARED_ALIGNED(struct oprofile_cpu_buffer, cpu_buffer);
+DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 13588174311..c3e366b5226 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -46,7 +46,7 @@ struct oprofile_cpu_buffer {
unsigned long sample_invalid_eip;
int cpu;
struct delayed_work work;
-} ____cacheline_aligned;
+};
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 1fd8bb76570..66c0fd21894 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -49,7 +49,7 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
-#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
+#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
@@ -490,12 +490,12 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
{\
- unsigned long start_time = jiffies;\
+ cycles_t start_time = get_cycles();\
while (1) {\
sts = op (iommu->reg + offset);\
if (cond)\
break;\
- if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))\
+ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
panic("DMAR hardware is malfunctioning\n");\
cpu_relax();\
}\
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 72f7476930c..9d6fc8e6285 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -19,8 +19,31 @@
#include <linux/pci-acpi.h>
#include "pci.h"
-static u32 ctrlset_buf[3] = {0, 0, 0};
-static u32 global_ctrlsets = 0;
+struct acpi_osc_data {
+ acpi_handle handle;
+ u32 ctrlset_buf[3];
+ u32 global_ctrlsets;
+ struct list_head sibiling;
+};
+static LIST_HEAD(acpi_osc_data_list);
+
+static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
+{
+ struct acpi_osc_data *data;
+
+ list_for_each_entry(data, &acpi_osc_data_list, sibiling) {
+ if (data->handle == handle)
+ return data;
+ }
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+ INIT_LIST_HEAD(&data->sibiling);
+ data->handle = handle;
+ list_add_tail(&data->sibiling, &acpi_osc_data_list);
+ return data;
+}
+
static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
static acpi_status
@@ -37,8 +60,27 @@ acpi_query_osc (
union acpi_object *out_obj;
u32 osc_dw0;
acpi_status *ret_status = (acpi_status *)retval;
+ struct acpi_osc_data *osc_data;
+ u32 flags = (unsigned long)context, temp;
+ acpi_handle tmp;
+
+ status = acpi_get_handle(handle, "_OSC", &tmp);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ osc_data = acpi_get_osc_data(handle);
+ if (!osc_data) {
+ printk(KERN_ERR "acpi osc data array is full\n");
+ return AE_ERROR;
+ }
+
+ osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS);
+
+ /* do _OSC query for all possible controls */
+ temp = osc_data->ctrlset_buf[OSC_CONTROL_TYPE];
+ osc_data->ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
+ osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
-
/* Setting up input parameters */
input.count = 4;
input.pointer = in_params;
@@ -51,13 +93,11 @@ acpi_query_osc (
in_params[2].integer.value = 3;
in_params[3].type = ACPI_TYPE_BUFFER;
in_params[3].buffer.length = 12;
- in_params[3].buffer.pointer = (u8 *)context;
+ in_params[3].buffer.pointer = (u8 *)osc_data->ctrlset_buf;
status = acpi_evaluate_object(handle, "_OSC", &input, &output);
- if (ACPI_FAILURE (status)) {
- *ret_status = status;
- return status;
- }
+ if (ACPI_FAILURE(status))
+ goto out_nofree;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
@@ -76,7 +116,8 @@ acpi_query_osc (
printk(KERN_DEBUG "_OSC invalid revision\n");
if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) {
/* Update Global Control Set */
- global_ctrlsets = *((u32 *)(out_obj->buffer.pointer+8));
+ osc_data->global_ctrlsets =
+ *((u32 *)(out_obj->buffer.pointer + 8));
status = AE_OK;
goto query_osc_out;
}
@@ -85,12 +126,21 @@ acpi_query_osc (
}
/* Update Global Control Set */
- global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8));
+ osc_data->global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8));
status = AE_OK;
query_osc_out:
kfree(output.pointer);
+out_nofree:
*ret_status = status;
+
+ osc_data->ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE;
+ osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = temp;
+ if (ACPI_FAILURE(status)) {
+ /* no osc support at all */
+ osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] = 0;
+ }
+
return status;
}
@@ -165,28 +215,15 @@ run_osc_out:
**/
acpi_status __pci_osc_support_set(u32 flags, const char *hid)
{
- u32 temp;
- acpi_status retval;
+ acpi_status retval = AE_NOT_FOUND;
if (!(flags & OSC_SUPPORT_MASKS)) {
return AE_TYPE;
}
- ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS);
-
- /* do _OSC query for all possible controls */
- temp = ctrlset_buf[OSC_CONTROL_TYPE];
- ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
acpi_get_devices(hid,
acpi_query_osc,
- ctrlset_buf,
+ (void *)(unsigned long)flags,
(void **) &retval );
- ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE;
- ctrlset_buf[OSC_CONTROL_TYPE] = temp;
- if (ACPI_FAILURE(retval)) {
- /* no osc support at all */
- ctrlset_buf[OSC_SUPPORT_TYPE] = 0;
- }
return AE_OK;
}
@@ -201,19 +238,31 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
{
acpi_status status;
u32 ctrlset;
+ acpi_handle tmp;
+ struct acpi_osc_data *osc_data;
+
+ status = acpi_get_handle(handle, "_OSC", &tmp);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ osc_data = acpi_get_osc_data(handle);
+ if (!osc_data) {
+ printk(KERN_ERR "acpi osc data array is full\n");
+ return AE_ERROR;
+ }
ctrlset = (flags & OSC_CONTROL_MASKS);
if (!ctrlset) {
return AE_TYPE;
}
- if (ctrlset_buf[OSC_SUPPORT_TYPE] &&
- ((global_ctrlsets & ctrlset) != ctrlset)) {
+ if (osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] &&
+ ((osc_data->global_ctrlsets & ctrlset) != ctrlset)) {
return AE_SUPPORT;
}
- ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset;
- status = acpi_run_osc(handle, ctrlset_buf);
+ osc_data->ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset;
+ status = acpi_run_osc(handle, osc_data->ctrlset_buf);
if (ACPI_FAILURE (status)) {
- ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset;
+ osc_data->ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset;
}
return status;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4a55bf38095..3706ce7972d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -842,13 +842,25 @@ static void set_pcie_port_type(struct pci_dev *pdev)
* reading the dword at 0x100 which must either be 0 or a valid extended
* capability header.
*/
-int pci_cfg_space_size_ext(struct pci_dev *dev, unsigned check_exp_pcix)
+int pci_cfg_space_size_ext(struct pci_dev *dev)
{
- int pos;
u32 status;
- if (!check_exp_pcix)
- goto skip;
+ if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL)
+ goto fail;
+ if (status == 0xffffffff)
+ goto fail;
+
+ return PCI_CFG_SPACE_EXP_SIZE;
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
+int pci_cfg_space_size(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (!pos) {
@@ -861,23 +873,12 @@ int pci_cfg_space_size_ext(struct pci_dev *dev, unsigned check_exp_pcix)
goto fail;
}
- skip:
- if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL)
- goto fail;
- if (status == 0xffffffff)
- goto fail;
-
- return PCI_CFG_SPACE_EXP_SIZE;
+ return pci_cfg_space_size_ext(dev);
fail:
return PCI_CFG_SPACE_SIZE;
}
-int pci_cfg_space_size(struct pci_dev *dev)
-{
- return pci_cfg_space_size_ext(dev, 1);
-}
-
static void pci_release_bus_bridge_dev(struct device *dev)
{
kfree(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index afd914ebe21..f2d9c770f51 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1826,6 +1826,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk);
static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 4fe7c58f57e..886dac823ed 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -19,6 +19,7 @@ void pnp_remove_card(struct pnp_card *card);
int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
void pnp_remove_card_device(struct pnp_dev *dev);
+struct pnp_option *pnp_build_option(int priority);
struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev);
struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
int priority);
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 5d9301de177..5695a79f3a5 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -424,7 +424,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
start = simple_strtoul(buf, &buf, 0);
pnp_res = pnp_add_irq_resource(dev, start, 0);
if (pnp_res)
- nirq++;
+ pnp_res->index = nirq++;
continue;
}
if (!strnicmp(buf, "dma", 3)) {
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index d049a2279fe..ffdb12a59c4 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -111,6 +111,113 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
dev_info(&dev->dev, "SB audio device quirk - increased port range\n");
}
+static struct pnp_option *quirk_isapnp_mpu_options(struct pnp_dev *dev)
+{
+ struct pnp_option *head = NULL;
+ struct pnp_option *prev = NULL;
+ struct pnp_option *res;
+
+ /*
+ * Build a functional IRQ-less variant of each MPU option.
+ */
+
+ for (res = dev->dependent; res; res = res->next) {
+ struct pnp_option *curr;
+ struct pnp_port *port;
+ struct pnp_port *copy;
+
+ port = res->port;
+ if (!port || !res->irq)
+ continue;
+
+ copy = pnp_alloc(sizeof *copy);
+ if (!copy)
+ break;
+
+ copy->min = port->min;
+ copy->max = port->max;
+ copy->align = port->align;
+ copy->size = port->size;
+ copy->flags = port->flags;
+
+ curr = pnp_build_option(PNP_RES_PRIORITY_FUNCTIONAL);
+ if (!curr) {
+ kfree(copy);
+ break;
+ }
+ curr->port = copy;
+
+ if (prev)
+ prev->next = curr;
+ else
+ head = curr;
+ prev = curr;
+ }
+ if (head)
+ dev_info(&dev->dev, "adding IRQ-less MPU options\n");
+
+ return head;
+}
+
+static void quirk_ad1815_mpu_resources(struct pnp_dev *dev)
+{
+ struct pnp_option *res;
+ struct pnp_irq *irq;
+
+ /*
+ * Distribute the independent IRQ over the dependent options
+ */
+
+ res = dev->independent;
+ if (!res)
+ return;
+
+ irq = res->irq;
+ if (!irq || irq->next)
+ return;
+
+ res = dev->dependent;
+ if (!res)
+ return;
+
+ while (1) {
+ struct pnp_irq *copy;
+
+ copy = pnp_alloc(sizeof *copy);
+ if (!copy)
+ break;
+
+ memcpy(copy->map, irq->map, sizeof copy->map);
+ copy->flags = irq->flags;
+
+ copy->next = res->irq; /* Yes, this is NULL */
+ res->irq = copy;
+
+ if (!res->next)
+ break;
+ res = res->next;
+ }
+ kfree(irq);
+
+ res->next = quirk_isapnp_mpu_options(dev);
+
+ res = dev->independent;
+ res->irq = NULL;
+}
+
+static void quirk_isapnp_mpu_resources(struct pnp_dev *dev)
+{
+ struct pnp_option *res;
+
+ res = dev->dependent;
+ if (!res)
+ return;
+
+ while (res->next)
+ res = res->next;
+
+ res->next = quirk_isapnp_mpu_options(dev);
+}
#include <linux/pci.h>
@@ -205,6 +312,11 @@ static struct pnp_fixup pnp_fixups[] = {
{"CTL0043", quirk_sb16audio_resources},
{"CTL0044", quirk_sb16audio_resources},
{"CTL0045", quirk_sb16audio_resources},
+ /* Add IRQ-less MPU options */
+ {"ADS7151", quirk_ad1815_mpu_resources},
+ {"ADS7181", quirk_isapnp_mpu_resources},
+ {"AZT0002", quirk_isapnp_mpu_resources},
+ /* PnP resources that might overlap PCI BARs */
{"PNP0c01", quirk_system_pci_resources},
{"PNP0c02", quirk_system_pci_resources},
{""}
@@ -212,20 +324,16 @@ static struct pnp_fixup pnp_fixups[] = {
void pnp_fixup_device(struct pnp_dev *dev)
{
- int i = 0;
- void (*quirk)(struct pnp_dev *);
-
- while (*pnp_fixups[i].id) {
- if (compare_pnp_id(dev->id, pnp_fixups[i].id)) {
- quirk = pnp_fixups[i].quirk_function;
+ struct pnp_fixup *f;
+ for (f = pnp_fixups; *f->id; f++) {
+ if (!compare_pnp_id(dev->id, f->id))
+ continue;
#ifdef DEBUG
- dev_dbg(&dev->dev, "calling ");
- print_fn_descriptor_symbol("%s()\n",
- (unsigned long) *quirk);
+ dev_dbg(&dev->dev, "%s: calling ", f->id);
+ print_fn_descriptor_symbol("%s\n",
+ (unsigned long) f->quirk_function);
#endif
- (*quirk)(dev);
- }
- i++;
+ f->quirk_function(dev);
}
}
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index 2041620d568..390b50096e3 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -28,7 +28,7 @@ static int pnp_reserve_mem[16] = {[0 ... 15] = -1 }; /* reserve (don't use) some
* option registration
*/
-static struct pnp_option *pnp_build_option(int priority)
+struct pnp_option *pnp_build_option(int priority)
{
struct pnp_option *option = pnp_alloc(sizeof(struct pnp_option));
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index 3eba85ed729..95b076c18c0 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -45,10 +45,10 @@ void pnp_eisa_id_to_string(u32 id, char *str)
str[0] = 'A' + ((id >> 26) & 0x3f) - 1;
str[1] = 'A' + ((id >> 21) & 0x1f) - 1;
str[2] = 'A' + ((id >> 16) & 0x1f) - 1;
- str[3] = hex_asc((id >> 12) & 0xf);
- str[4] = hex_asc((id >> 8) & 0xf);
- str[5] = hex_asc((id >> 4) & 0xf);
- str[6] = hex_asc((id >> 0) & 0xf);
+ str[3] = hex_asc_hi(id >> 8);
+ str[4] = hex_asc_lo(id >> 8);
+ str[5] = hex_asc_hi(id);
+ str[6] = hex_asc_lo(id);
str[7] = '\0';
}
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index a83a40b3eba..0f0d27d1c4c 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -184,7 +184,7 @@ ds1511_wdog_disable(void)
static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
{
u8 mon, day, dow, hrs, min, sec, yrs, cen;
- unsigned int flags;
+ unsigned long flags;
/*
* won't have to change this for a while
@@ -247,7 +247,7 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
{
unsigned int century;
- unsigned int flags;
+ unsigned long flags;
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index ba795a4db1e..9f996ec881c 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(rtc_year_days);
*/
void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
{
- register int days, month, year;
+ unsigned int days, month, year;
days = time / 86400;
time -= days * 86400;
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 316bfaa8087..a3e0880b38f 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/i2c.h>
@@ -803,6 +804,7 @@ static int m41t80_probe(struct i2c_client *client,
#ifdef CONFIG_RTC_DRV_M41T80_WDT
if (clientdata->features & M41T80_FEATURE_HT) {
+ save_client = client;
rc = misc_register(&wdt_dev);
if (rc)
goto exit;
@@ -811,7 +813,6 @@ static int m41t80_probe(struct i2c_client *client,
misc_deregister(&wdt_dev);
goto exit;
}
- save_client = client;
}
#endif
return 0;
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 29f47bacfc7..a6fa1f2f2ca 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -227,7 +227,7 @@ static int s35390a_probe(struct i2c_client *client,
/* This chip uses multiple addresses, use dummy devices for them */
for (i = 1; i < 8; ++i) {
s35390a->client[i] = i2c_new_dummy(client->adapter,
- client->addr + i, "rtc-s35390a");
+ client->addr + i);
if (!s35390a->client[i]) {
dev_err(&client->dev, "Address %02x unavailable\n",
client->addr + i);
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 110699bb478..1f88e9e914e 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -616,7 +616,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
goto err_badres;
}
- rtc->regbase = (void __iomem *)rtc->res->start;
+ rtc->regbase = ioremap_nocache(rtc->res->start, rtc->regsize);
if (unlikely(!rtc->regbase)) {
ret = -EINVAL;
goto err_badmap;
@@ -626,7 +626,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
&sh_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc_dev)) {
ret = PTR_ERR(rtc->rtc_dev);
- goto err_badmap;
+ goto err_unmap;
}
rtc->capabilities = RTC_DEF_CAPABILITIES;
@@ -653,7 +653,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"request period IRQ failed with %d, IRQ %d\n", ret,
rtc->periodic_irq);
- goto err_badmap;
+ goto err_unmap;
}
ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, IRQF_DISABLED,
@@ -663,7 +663,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
"request carry IRQ failed with %d, IRQ %d\n", ret,
rtc->carry_irq);
free_irq(rtc->periodic_irq, rtc);
- goto err_badmap;
+ goto err_unmap;
}
ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, IRQF_DISABLED,
@@ -674,7 +674,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
rtc->alarm_irq);
free_irq(rtc->carry_irq, rtc);
free_irq(rtc->periodic_irq, rtc);
- goto err_badmap;
+ goto err_unmap;
}
tmp = readb(rtc->regbase + RCR1);
@@ -684,6 +684,8 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
return 0;
+err_unmap:
+ iounmap(rtc->regbase);
err_badmap:
release_resource(rtc->res);
err_badres:
@@ -708,6 +710,8 @@ static int __devexit sh_rtc_remove(struct platform_device *pdev)
release_resource(rtc->res);
+ iounmap(rtc->regbase);
+
platform_set_drvdata(pdev, NULL);
kfree(rtc);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index c1f2adefad4..5043150019a 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -965,8 +965,7 @@ tty3270_write_room(struct tty_struct *tty)
* Insert character into the screen at the current position with the
* current color and highlight. This function does NOT do cursor movement.
*/
-static int
-tty3270_put_character(struct tty3270 *tp, char ch)
+static void tty3270_put_character(struct tty3270 *tp, char ch)
{
struct tty3270_line *line;
struct tty3270_cell *cell;
@@ -986,7 +985,6 @@ tty3270_put_character(struct tty3270 *tp, char ch)
cell->character = tp->view.ascebc[(unsigned int) ch];
cell->highlight = tp->highlight;
cell->f_color = tp->f_color;
- return 1;
}
/*
@@ -1612,16 +1610,15 @@ tty3270_write(struct tty_struct * tty,
/*
* Put single characters to the ttys character buffer
*/
-static void
-tty3270_put_char(struct tty_struct *tty, unsigned char ch)
+static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
{
struct tty3270 *tp;
tp = tty->driver_data;
- if (!tp)
- return;
- if (tp->char_count < TTY3270_CHAR_BUF_SIZE)
- tp->char_buf[tp->char_count++] = ch;
+ if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
+ return 0;
+ tp->char_buf[tp->char_count++] = ch;
+ return 1;
}
/*
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 40ef948fcb3..9c21b8f43f9 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -19,6 +19,7 @@
#include <asm/cio.h>
#include <asm/uaccess.h>
+#include <asm/cio.h>
#include "blacklist.h"
#include "cio.h"
@@ -43,164 +44,169 @@ typedef enum {add, free} range_action;
* Function: blacklist_range
* (Un-)blacklist the devices from-to
*/
-static void
-blacklist_range (range_action action, unsigned int from, unsigned int to,
- unsigned int ssid)
+static int blacklist_range(range_action action, unsigned int from_ssid,
+ unsigned int to_ssid, unsigned int from,
+ unsigned int to, int msgtrigger)
{
- if (!to)
- to = from;
-
- if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) {
- printk (KERN_WARNING "cio: Invalid blacklist range "
- "0.%x.%04x to 0.%x.%04x, skipping\n",
- ssid, from, ssid, to);
- return;
+ if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
+ if (msgtrigger)
+ printk(KERN_WARNING "cio: Invalid cio_ignore range "
+ "0.%x.%04x-0.%x.%04x\n", from_ssid, from,
+ to_ssid, to);
+ return 1;
}
- for (; from <= to; from++) {
+
+ while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
+ (from <= to))) {
if (action == add)
- set_bit (from, bl_dev[ssid]);
+ set_bit(from, bl_dev[from_ssid]);
else
- clear_bit (from, bl_dev[ssid]);
+ clear_bit(from, bl_dev[from_ssid]);
+ from++;
+ if (from > __MAX_SUBCHANNEL) {
+ from_ssid++;
+ from = 0;
+ }
}
+
+ return 0;
}
-/*
- * Function: blacklist_busid
- * Get devno/busid from given string.
- * Shamelessly grabbed from dasd_devmap.c.
- */
-static int
-blacklist_busid(char **str, int *id0, int *ssid, int *devno)
+static int pure_hex(char **cp, unsigned int *val, int min_digit,
+ int max_digit, int max_val)
{
- int val, old_style;
- char *sav;
+ int diff;
+ unsigned int value;
- sav = *str;
+ diff = 0;
+ *val = 0;
- /* check for leading '0x' */
- old_style = 0;
- if ((*str)[0] == '0' && (*str)[1] == 'x') {
- *str += 2;
- old_style = 1;
- }
- if (!isxdigit((*str)[0])) /* We require at least one hex digit */
- goto confused;
- val = simple_strtoul(*str, str, 16);
- if (old_style || (*str)[0] != '.') {
- *id0 = *ssid = 0;
- if (val < 0 || val > 0xffff)
- goto confused;
- *devno = val;
- if ((*str)[0] != ',' && (*str)[0] != '-' &&
- (*str)[0] != '\n' && (*str)[0] != '\0')
- goto confused;
- return 0;
+ while (isxdigit(**cp) && (diff <= max_digit)) {
+
+ if (isdigit(**cp))
+ value = **cp - '0';
+ else
+ value = tolower(**cp) - 'a' + 10;
+ *val = *val * 16 + value;
+ (*cp)++;
+ diff++;
}
- /* New style x.y.z busid */
- if (val < 0 || val > 0xff)
- goto confused;
- *id0 = val;
- (*str)++;
- if (!isxdigit((*str)[0])) /* We require at least one hex digit */
- goto confused;
- val = simple_strtoul(*str, str, 16);
- if (val < 0 || val > 0xff || (*str)++[0] != '.')
- goto confused;
- *ssid = val;
- if (!isxdigit((*str)[0])) /* We require at least one hex digit */
- goto confused;
- val = simple_strtoul(*str, str, 16);
- if (val < 0 || val > 0xffff)
- goto confused;
- *devno = val;
- if ((*str)[0] != ',' && (*str)[0] != '-' &&
- (*str)[0] != '\n' && (*str)[0] != '\0')
- goto confused;
+
+ if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
+ return 1;
+
return 0;
-confused:
- strsep(str, ",\n");
- printk(KERN_WARNING "cio: Invalid cio_ignore parameter '%s'\n", sav);
- return 1;
}
-static int
-blacklist_parse_parameters (char *str, range_action action)
+static int parse_busid(char *str, int *cssid, int *ssid, int *devno,
+ int msgtrigger)
{
- int from, to, from_id0, to_id0, from_ssid, to_ssid;
-
- while (*str != 0 && *str != '\n') {
- range_action ra = action;
- while(*str == ',')
- str++;
- if (*str == '!') {
- ra = !action;
- ++str;
+ char *str_work;
+ int val, rc, ret;
+
+ rc = 1;
+
+ if (*str == '\0')
+ goto out;
+
+ /* old style */
+ str_work = str;
+ val = simple_strtoul(str, &str_work, 16);
+
+ if (*str_work == '\0') {
+ if (val <= __MAX_SUBCHANNEL) {
+ *devno = val;
+ *ssid = 0;
+ *cssid = 0;
+ rc = 0;
}
+ goto out;
+ }
- /*
- * Since we have to parse the proc commands and the
- * kernel arguments we have to check four cases
- */
- if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
- strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
- int j;
-
- str += 3;
- for (j=0; j <= __MAX_SSID; j++)
- blacklist_range(ra, 0, __MAX_SUBCHANNEL, j);
- } else {
- int rc;
+ /* new style */
+ str_work = str;
+ ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
+ if (ret || (str_work[0] != '.'))
+ goto out;
+ str_work++;
+ ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
+ if (ret || (str_work[0] != '\0'))
+ goto out;
+
+ rc = 0;
+out:
+ if (rc && msgtrigger)
+ printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n",
+ str);
+
+ return rc;
+}
- rc = blacklist_busid(&str, &from_id0,
- &from_ssid, &from);
- if (rc)
- continue;
- to = from;
- to_id0 = from_id0;
- to_ssid = from_ssid;
- if (*str == '-') {
- str++;
- rc = blacklist_busid(&str, &to_id0,
- &to_ssid, &to);
- if (rc)
- continue;
- }
- if (*str == '-') {
- printk(KERN_WARNING "cio: invalid cio_ignore "
- "parameter '%s'\n",
- strsep(&str, ",\n"));
- continue;
- }
- if ((from_id0 != to_id0) ||
- (from_ssid != to_ssid)) {
- printk(KERN_WARNING "cio: invalid cio_ignore "
- "range %x.%x.%04x-%x.%x.%04x\n",
- from_id0, from_ssid, from,
- to_id0, to_ssid, to);
- continue;
+static int blacklist_parse_parameters(char *str, range_action action,
+ int msgtrigger)
+{
+ int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
+ int rc, totalrc;
+ char *parm;
+ range_action ra;
+
+ totalrc = 0;
+
+ while ((parm = strsep(&str, ","))) {
+ rc = 0;
+ ra = action;
+ if (*parm == '!') {
+ if (ra == add)
+ ra = free;
+ else
+ ra = add;
+ parm++;
+ }
+ if (strcmp(parm, "all") == 0) {
+ from_cssid = 0;
+ from_ssid = 0;
+ from = 0;
+ to_cssid = __MAX_CSSID;
+ to_ssid = __MAX_SSID;
+ to = __MAX_SUBCHANNEL;
+ } else {
+ rc = parse_busid(strsep(&parm, "-"), &from_cssid,
+ &from_ssid, &from, msgtrigger);
+ if (!rc) {
+ if (parm != NULL)
+ rc = parse_busid(parm, &to_cssid,
+ &to_ssid, &to,
+ msgtrigger);
+ else {
+ to_cssid = from_cssid;
+ to_ssid = from_ssid;
+ to = from;
+ }
}
- blacklist_range (ra, from, to, to_ssid);
}
+ if (!rc) {
+ rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
+ msgtrigger);
+ if (rc)
+ totalrc = 1;
+ } else
+ totalrc = 1;
}
- return 1;
+
+ return totalrc;
}
-/* Parsing the commandline for blacklist parameters, e.g. to blacklist
- * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of:
- * - cio_ignore=1234-1236
- * - cio_ignore=0x1234-0x1235,1236
- * - cio_ignore=0x1234,1235-1236
- * - cio_ignore=1236 cio_ignore=1234-0x1236
- * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235
- * - cio_ignore=0.0.1234-0.0.1236
- * - cio_ignore=0.0.1234,0x1235,1236
- * - ...
- */
static int __init
blacklist_setup (char *str)
{
CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
- return blacklist_parse_parameters (str, add);
+ if (blacklist_parse_parameters(str, add, 1))
+ return 0;
+ return 1;
}
__setup ("cio_ignore=", blacklist_setup);
@@ -224,27 +230,23 @@ is_blacklisted (int ssid, int devno)
* Function: blacklist_parse_proc_parameters
* parse the stuff which is piped to /proc/cio_ignore
*/
-static void
-blacklist_parse_proc_parameters (char *buf)
+static int blacklist_parse_proc_parameters(char *buf)
{
- if (strncmp (buf, "free ", 5) == 0) {
- blacklist_parse_parameters (buf + 5, free);
- } else if (strncmp (buf, "add ", 4) == 0) {
- /*
- * We don't need to check for known devices since
- * css_probe_device will handle this correctly.
- */
- blacklist_parse_parameters (buf + 4, add);
- } else {
- printk (KERN_WARNING "cio: cio_ignore: Parse error; \n"
- KERN_WARNING "try using 'free all|<devno-range>,"
- "<devno-range>,...'\n"
- KERN_WARNING "or 'add <devno-range>,"
- "<devno-range>,...'\n");
- return;
- }
+ int rc;
+ char *parm;
+
+ parm = strsep(&buf, " ");
+
+ if (strcmp("free", parm) == 0)
+ rc = blacklist_parse_parameters(buf, free, 0);
+ else if (strcmp("add", parm) == 0)
+ rc = blacklist_parse_parameters(buf, add, 0);
+ else
+ return 1;
css_schedule_reprobe();
+
+ return rc;
}
/* Iterator struct for all devices. */
@@ -328,6 +330,8 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset)
{
char *buf;
+ size_t i;
+ ssize_t rc, ret;
if (*offset)
return -EINVAL;
@@ -336,16 +340,27 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
buf = vmalloc (user_len + 1); /* maybe better use the stack? */
if (buf == NULL)
return -ENOMEM;
+ memset(buf, 0, user_len + 1);
+
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
- vfree (buf);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out_free;
}
- buf[user_len] = '\0';
- blacklist_parse_proc_parameters (buf);
+ i = user_len - 1;
+ while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
+ buf[i] = '\0';
+ i--;
+ }
+ ret = blacklist_parse_proc_parameters(buf);
+ if (ret)
+ rc = -EINVAL;
+ else
+ rc = user_len;
+out_free:
vfree (buf);
- return user_len;
+ return rc;
}
static const struct seq_operations cio_ignore_proc_seq_ops = {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 08a57816130..82c6a2d4512 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -39,23 +39,6 @@ debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
debug_info_t *cio_debug_crw_id;
-int cio_show_msg;
-
-static int __init
-cio_setup (char *parm)
-{
- if (!strcmp (parm, "yes"))
- cio_show_msg = 1;
- else if (!strcmp (parm, "no"))
- cio_show_msg = 0;
- else
- printk(KERN_ERR "cio: cio_setup: "
- "invalid cio_msg parameter '%s'", parm);
- return 1;
-}
-
-__setup ("cio_msg=", cio_setup);
-
/*
* Function: cio_debug_init
* Initializes three debug logs for common I/O:
@@ -166,7 +149,7 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
stsch (sch->schid, &sch->schib);
- CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
+ CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
"subchannel 0.%x.%04x!\n", sch->schid.ssid,
sch->schid.sch_no);
sprintf(dbf_text, "no%s", sch->dev.bus_id);
@@ -567,10 +550,9 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
* ... just being curious we check for non I/O subchannels
*/
if (sch->st != 0) {
- CIO_DEBUG(KERN_INFO, 0,
- "Subchannel 0.%x.%04x reports "
- "non-I/O subchannel type %04X\n",
- sch->schid.ssid, sch->schid.sch_no, sch->st);
+ CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports "
+ "non-I/O subchannel type %04X\n",
+ sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */
err = sch->st;
goto out;
@@ -588,7 +570,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
* This device must not be known to Linux. So we simply
* say that there is no device and return ENODEV.
*/
- CIO_MSG_EVENT(4, "Blacklisted device detected "
+ CIO_MSG_EVENT(6, "Blacklisted device detected "
"at devno %04X, subchannel set %x\n",
sch->schib.pmcw.dev, sch->schid.ssid);
err = -ENODEV;
@@ -601,12 +583,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
sch->lpm = sch->schib.pmcw.pam & sch->opm;
sch->isc = 3;
- CIO_DEBUG(KERN_INFO, 0,
- "Detected device %04x on subchannel 0.%x.%04X"
- " - PIM = %02X, PAM = %02X, POM = %02X\n",
- sch->schib.pmcw.dev, sch->schid.ssid,
- sch->schid.sch_no, sch->schib.pmcw.pim,
- sch->schib.pmcw.pam, sch->schib.pmcw.pom);
+ CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X "
+ "- PIM = %02X, PAM = %02X, POM = %02X\n",
+ sch->schib.pmcw.dev, sch->schid.ssid,
+ sch->schid.sch_no, sch->schib.pmcw.pim,
+ sch->schib.pmcw.pam, sch->schib.pmcw.pom);
/*
* We now have to initially ...
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 3c75412904d..6e933aebe01 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -118,6 +118,4 @@ extern void *cio_get_console_priv(void);
#define cio_get_console_priv() NULL
#endif
-extern int cio_show_msg;
-
#endif
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index d7429ef6c66..e64e8278c42 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -31,10 +31,4 @@ static inline void CIO_HEX_EVENT(int level, void *data, int length)
}
}
-#define CIO_DEBUG(printk_level, event_level, msg...) do { \
- if (cio_show_msg) \
- printk(printk_level "cio: " msg); \
- CIO_MSG_EVENT(event_level, msg); \
- } while (0)
-
#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 595e327d2f7..a76956512b2 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -570,7 +570,7 @@ static void reprobe_all(struct work_struct *unused)
{
int ret;
- CIO_MSG_EVENT(2, "reprobe start\n");
+ CIO_MSG_EVENT(4, "reprobe start\n");
need_reprobe = 0;
/* Make sure initial subchannel scan is done. */
@@ -578,7 +578,7 @@ static void reprobe_all(struct work_struct *unused)
atomic_read(&ccw_device_init_count) == 0);
ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
- CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
+ CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
need_reprobe);
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index abfd601d237..e22813db74a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -341,7 +341,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
rc = device_schedule_callback(&cdev->dev,
ccw_device_remove_orphan_cb);
if (rc)
- CIO_MSG_EVENT(2, "Couldn't unregister orphan "
+ CIO_MSG_EVENT(0, "Couldn't unregister orphan "
"0.%x.%04x\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
@@ -351,7 +351,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
rc = device_schedule_callback(cdev->dev.parent,
ccw_device_remove_sch_cb);
if (rc)
- CIO_MSG_EVENT(2, "Couldn't unregister disconnected device "
+ CIO_MSG_EVENT(0, "Couldn't unregister disconnected device "
"0.%x.%04x\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
@@ -397,7 +397,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
- CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
@@ -433,7 +433,7 @@ int ccw_device_set_online(struct ccw_device *cdev)
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
- CIO_MSG_EVENT(2, "ccw_device_online returned %d, "
+ CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
@@ -451,7 +451,7 @@ int ccw_device_set_online(struct ccw_device *cdev)
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else
- CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
@@ -803,7 +803,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
other_sch = to_subchannel(get_device(cdev->dev.parent));
ret = device_move(&cdev->dev, &sch->dev);
if (ret) {
- CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed "
+ CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
"(ret=%d)!\n", cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
put_device(&other_sch->dev);
@@ -933,7 +933,7 @@ io_subchannel_register(struct work_struct *work)
ret = device_reprobe(&cdev->dev);
if (ret)
/* We can't do much here. */
- CIO_MSG_EVENT(2, "device_reprobe() returned"
+ CIO_MSG_EVENT(0, "device_reprobe() returned"
" %d for 0.%x.%04x\n", ret,
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
@@ -1086,7 +1086,7 @@ static void ccw_device_move_to_sch(struct work_struct *work)
rc = device_move(&cdev->dev, &sch->dev);
mutex_unlock(&sch->reg_mutex);
if (rc) {
- CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel "
+ CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
"0.%x.%04x failed (ret=%d)!\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, sch->schid.ssid,
@@ -1446,8 +1446,7 @@ ccw_device_remove (struct device *dev)
wait_event(cdev->private->wait_q,
dev_fsm_final_state(cdev));
else
- //FIXME: we can't fail!
- CIO_MSG_EVENT(2, "ccw_device_offline returned %d, "
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
@@ -1524,7 +1523,7 @@ static int recovery_check(struct device *dev, void *data)
spin_lock_irq(cdev->ccwlock);
switch (cdev->private->state) {
case DEV_STATE_DISCONNECTED:
- CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
+ CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
@@ -1554,7 +1553,7 @@ static void recovery_work_func(struct work_struct *unused)
}
spin_unlock_irq(&recovery_lock);
} else
- CIO_MSG_EVENT(2, "recovery: end\n");
+ CIO_MSG_EVENT(4, "recovery: end\n");
}
static DECLARE_WORK(recovery_work, recovery_work_func);
@@ -1572,7 +1571,7 @@ void ccw_device_schedule_recovery(void)
{
unsigned long flags;
- CIO_MSG_EVENT(2, "recovery: schedule\n");
+ CIO_MSG_EVENT(4, "recovery: schedule\n");
spin_lock_irqsave(&recovery_lock, flags);
if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
recovery_phase = 0;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 99403b0a97a..e268d5a77c1 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -322,10 +322,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
same_dev = 0; /* Keep the compiler quiet... */
switch (state) {
case DEV_STATE_NOT_OPER:
- CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : unknown device %04x on subchannel "
- "0.%x.%04x\n", cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
+ CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
+ "subchannel 0.%x.%04x\n",
+ cdev->private->dev_id.devno,
+ sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
@@ -348,20 +348,19 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
return;
}
/* Issue device info message. */
- CIO_DEBUG(KERN_INFO, 2,
- "SenseID : device 0.%x.%04x reports: "
- "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
- "%04X/%02X\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno,
- cdev->id.cu_type, cdev->id.cu_model,
- cdev->id.dev_type, cdev->id.dev_model);
+ CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
+ "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
+ "%04X/%02X\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
+ cdev->id.cu_type, cdev->id.cu_model,
+ cdev->id.dev_type, cdev->id.dev_model);
break;
case DEV_STATE_BOXED:
- CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : boxed device %04x on subchannel "
- "0.%x.%04x\n", cdev->private->dev_id.devno,
- sch->schid.ssid, sch->schid.sch_no);
+ CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
+ " subchannel 0.%x.%04x\n",
+ cdev->private->dev_id.devno,
+ sch->schid.ssid, sch->schid.sch_no);
break;
}
cdev->private->state = state;
@@ -443,9 +442,8 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (state == DEV_STATE_BOXED)
- CIO_DEBUG(KERN_WARNING, 2,
- "Boxed device %04x on subchannel %04x\n",
- cdev->private->dev_id.devno, sch->schid.sch_no);
+ CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
+ cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
@@ -900,7 +898,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense(cdev, irb);
else {
- CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited "
+ CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
"interrupt during w4sense...\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
@@ -1169,8 +1167,10 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
static void
ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
{
- CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n",
- cdev->private->state, dev_event);
+ CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
+ "0.%x.%04x\n", cdev->private->state, dev_event,
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
BUG();
}
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index dc4d87f77f6..cba7020517e 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -214,7 +214,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
* sense id information. So, for intervention required,
* we use the "whack it until it talks" strategy...
*/
- CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel "
+ CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
"0.%x.%04x reports cmd reject\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no);
@@ -239,7 +239,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
lpm = to_io_private(sch)->orb.lpm;
if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
- CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
+ CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
"on subchannel 0.%x.%04x is "
"'not operational'\n", lpm,
cdev->private->dev_id.devno,
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index c52449a1f9f..ba559053402 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -79,7 +79,7 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
/* ret is 0, -EBUSY, -EACCES or -ENODEV */
if (ret != -EACCES)
return ret;
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
+ CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not "
"operational'\n",
cdev->private->dev_id.devno,
@@ -159,7 +159,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
u8 lpm;
lpm = to_io_private(sch)->orb.lpm;
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
+ CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, lpm);
@@ -275,7 +275,7 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
return ret;
}
/* PGID command failed on this path. */
- CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
+ CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
@@ -317,7 +317,7 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
return ret;
}
/* nop command failed on this path. */
- CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel "
+ CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
@@ -362,7 +362,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
- CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x,"
+ CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
@@ -391,7 +391,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
return -ETIME;
}
if (irb->scsw.cc == 3) {
- CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x,"
+ CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
" lpm %02X, became 'not operational'\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no, cdev->private->imask);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 4d4b54277c4..5080f343ad7 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -48,10 +48,11 @@ s390_collect_crw_info(void *param)
int ccode;
struct semaphore *sem;
unsigned int chain;
+ int ignore;
sem = (struct semaphore *)param;
repeat:
- down_interruptible(sem);
+ ignore = down_interruptible(sem);
chain = 0;
while (1) {
if (unlikely(chain > 1)) {
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 4fab0c23814..b87037ec980 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -41,7 +41,7 @@
#define BPP_DELAY 100
static const unsigned BPP_MAJOR = LP_MAJOR;
-static const char* dev_name = "bpp";
+static const char *bpp_dev_name = "bpp";
/* When switching from compatibility to a mode where I can read, try
the following mode first. */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 46d7e400c8b..81ccbd7f9e3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1679,6 +1679,7 @@ config MAC_SCSI
config SCSI_MAC_ESP
tristate "Macintosh NCR53c9[46] SCSI"
depends on MAC && SCSI
+ select SCSI_SPI_ATTRS
help
This is the NCR 53c9x SCSI controller found on most of the 68040
based Macintoshes.
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index f5215fd4b73..1dca1775f4b 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -3830,7 +3830,7 @@ static int __init aha152x_init(void)
iounmap(p);
}
if (!ok && setup_count == 0)
- return 0;
+ return -ENODEV;
printk(KERN_INFO "aha152x: BIOS test: passed, ");
#else
@@ -3909,14 +3909,14 @@ static int __init aha152x_init(void)
#endif
}
- return 1;
+ return 0;
}
static void __exit aha152x_exit(void)
{
- struct aha152x_hostdata *hd;
+ struct aha152x_hostdata *hd, *tmp;
- list_for_each_entry(hd, &aha152x_host_list, host_list) {
+ list_for_each_entry_safe(hd, tmp, &aha152x_host_list, host_list) {
struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
aha152x_release(shost);
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 0fb5bf4c43a..8508816f303 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1967,45 +1967,6 @@ cleanup:
return rcode;
}
-
-/*
- * This routine returns information about the system. This does not effect
- * any logic and if the info is wrong - it doesn't matter.
- */
-
-/* Get all the info we can not get from kernel services */
-static int adpt_system_info(void __user *buffer)
-{
- sysInfo_S si;
-
- memset(&si, 0, sizeof(si));
-
- si.osType = OS_LINUX;
- si.osMajorVersion = 0;
- si.osMinorVersion = 0;
- si.osRevision = 0;
- si.busType = SI_PCI_BUS;
- si.processorFamily = DPTI_sig.dsProcessorFamily;
-
-#if defined __i386__
- adpt_i386_info(&si);
-#elif defined (__ia64__)
- adpt_ia64_info(&si);
-#elif defined(__sparc__)
- adpt_sparc_info(&si);
-#elif defined (__alpha__)
- adpt_alpha_info(&si);
-#else
- si.processorType = 0xff ;
-#endif
- if(copy_to_user(buffer, &si, sizeof(si))){
- printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
#if defined __ia64__
static void adpt_ia64_info(sysInfo_S* si)
{
@@ -2016,7 +1977,6 @@ static void adpt_ia64_info(sysInfo_S* si)
}
#endif
-
#if defined __sparc__
static void adpt_sparc_info(sysInfo_S* si)
{
@@ -2026,7 +1986,6 @@ static void adpt_sparc_info(sysInfo_S* si)
si->processorType = PROC_ULTRASPARC;
}
#endif
-
#if defined __alpha__
static void adpt_alpha_info(sysInfo_S* si)
{
@@ -2038,7 +1997,6 @@ static void adpt_alpha_info(sysInfo_S* si)
#endif
#if defined __i386__
-
static void adpt_i386_info(sysInfo_S* si)
{
// This is all the info we need for now
@@ -2059,9 +2017,45 @@ static void adpt_i386_info(sysInfo_S* si)
break;
}
}
+#endif
+
+/*
+ * This routine returns information about the system. This does not effect
+ * any logic and if the info is wrong - it doesn't matter.
+ */
+/* Get all the info we can not get from kernel services */
+static int adpt_system_info(void __user *buffer)
+{
+ sysInfo_S si;
+
+ memset(&si, 0, sizeof(si));
+
+ si.osType = OS_LINUX;
+ si.osMajorVersion = 0;
+ si.osMinorVersion = 0;
+ si.osRevision = 0;
+ si.busType = SI_PCI_BUS;
+ si.processorFamily = DPTI_sig.dsProcessorFamily;
+
+#if defined __i386__
+ adpt_i386_info(&si);
+#elif defined (__ia64__)
+ adpt_ia64_info(&si);
+#elif defined(__sparc__)
+ adpt_sparc_info(&si);
+#elif defined (__alpha__)
+ adpt_alpha_info(&si);
+#else
+ si.processorType = 0xff ;
#endif
+ if (copy_to_user(buffer, &si, sizeof(si))){
+ printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
+ return -EFAULT;
+ }
+ return 0;
+}
static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
ulong arg)
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index 924cd5a5167..337746d4604 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -316,19 +316,6 @@ static int adpt_close(struct inode *inode, struct file *file);
static void adpt_delay(int millisec);
#endif
-#if defined __ia64__
-static void adpt_ia64_info(sysInfo_S* si);
-#endif
-#if defined __sparc__
-static void adpt_sparc_info(sysInfo_S* si);
-#endif
-#if defined __alpha__
-static void adpt_sparc_info(sysInfo_S* si);
-#endif
-#if defined __i386__
-static void adpt_i386_info(sysInfo_S* si);
-#endif
-
#define PRINT_BUFFER_SIZE 512
#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 8e2e964af66..46771d4c81b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -550,7 +550,6 @@ static int __init gdth_search_isa(ulong32 bios_adr)
#endif /* CONFIG_ISA */
#ifdef CONFIG_PCI
-static bool gdth_pci_registered;
static bool gdth_search_vortex(ushort device)
{
@@ -3724,6 +3723,8 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
}
#ifdef GDTH_STATISTICS
+static unchar gdth_timer_running;
+
static void gdth_timeout(ulong data)
{
ulong32 i;
@@ -3731,7 +3732,10 @@ static void gdth_timeout(ulong data)
gdth_ha_str *ha;
ulong flags;
- BUG_ON(list_empty(&gdth_instances));
+ if(unlikely(list_empty(&gdth_instances))) {
+ gdth_timer_running = 0;
+ return;
+ }
ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
spin_lock_irqsave(&ha->smp_lock, flags);
@@ -3751,6 +3755,22 @@ static void gdth_timeout(ulong data)
add_timer(&gdth_timer);
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
+
+static void gdth_timer_init(void)
+{
+ if (gdth_timer_running)
+ return;
+ gdth_timer_running = 1;
+ TRACE2(("gdth_detect(): Initializing timer !\n"));
+ gdth_timer.expires = jiffies + HZ;
+ gdth_timer.data = 0L;
+ gdth_timer.function = gdth_timeout;
+ add_timer(&gdth_timer);
+}
+#else
+static inline void gdth_timer_init(void)
+{
+}
#endif
static void __init internal_setup(char *str,int *ints)
@@ -4735,6 +4755,7 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios)
if (error)
goto out_free_coal_stat;
list_add_tail(&ha->list, &gdth_instances);
+ gdth_timer_init();
scsi_scan_host(shp);
@@ -4865,6 +4886,7 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot)
if (error)
goto out_free_coal_stat;
list_add_tail(&ha->list, &gdth_instances);
+ gdth_timer_init();
scsi_scan_host(shp);
@@ -5011,6 +5033,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr,
list_add_tail(&ha->list, &gdth_instances);
pci_set_drvdata(ha->pdev, ha);
+ gdth_timer_init();
scsi_scan_host(shp);
@@ -5110,6 +5133,7 @@ static int __init gdth_init(void)
/* initializations */
gdth_polling = TRUE;
gdth_clear_events();
+ init_timer(&gdth_timer);
/* As default we do not probe for EISA or ISA controllers */
if (probe_eisa_isa) {
@@ -5132,23 +5156,17 @@ static int __init gdth_init(void)
#ifdef CONFIG_PCI
/* scanning for PCI controllers */
- if (pci_register_driver(&gdth_pci_driver) == 0)
- gdth_pci_registered = true;
+ if (pci_register_driver(&gdth_pci_driver)) {
+ gdth_ha_str *ha;
+
+ list_for_each_entry(ha, &gdth_instances, list)
+ gdth_remove_one(ha);
+ return -ENODEV;
+ }
#endif /* CONFIG_PCI */
TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
- if (list_empty(&gdth_instances))
- return -ENODEV;
-
-#ifdef GDTH_STATISTICS
- TRACE2(("gdth_detect(): Initializing timer !\n"));
- init_timer(&gdth_timer);
- gdth_timer.expires = jiffies + HZ;
- gdth_timer.data = 0L;
- gdth_timer.function = gdth_timeout;
- add_timer(&gdth_timer);
-#endif
major = register_chrdev(0,"gdth", &gdth_fops);
register_reboot_notifier(&gdth_notifier);
gdth_polling = FALSE;
@@ -5167,8 +5185,7 @@ static void __exit gdth_exit(void)
#endif
#ifdef CONFIG_PCI
- if (gdth_pci_registered)
- pci_unregister_driver(&gdth_pci_driver);
+ pci_unregister_driver(&gdth_pci_driver);
#endif
list_for_each_entry(ha, &gdth_instances, list)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 010c1b9b178..b43bf1d60da 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -730,7 +730,9 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
datalen))
rc = ISCSI_ERR_CONN_FAILED;
- }
+ } else
+ mod_timer(&conn->transport_timer,
+ jiffies + conn->recv_timeout);
iscsi_free_mgmt_task(conn, mtask);
break;
default:
@@ -1453,19 +1455,20 @@ static void iscsi_check_transport_timeouts(unsigned long data)
{
struct iscsi_conn *conn = (struct iscsi_conn *)data;
struct iscsi_session *session = conn->session;
- unsigned long timeout, next_timeout = 0, last_recv;
+ unsigned long recv_timeout, next_timeout = 0, last_recv;
spin_lock(&session->lock);
if (session->state != ISCSI_STATE_LOGGED_IN)
goto done;
- timeout = conn->recv_timeout;
- if (!timeout)
+ recv_timeout = conn->recv_timeout;
+ if (!recv_timeout)
goto done;
- timeout *= HZ;
+ recv_timeout *= HZ;
last_recv = conn->last_recv;
- if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ),
+ if (conn->ping_mtask &&
+ time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
jiffies)) {
iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
"expired, last rx %lu, last ping %lu, "
@@ -1476,15 +1479,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
return;
}
- if (time_before_eq(last_recv + timeout, jiffies)) {
- if (time_before_eq(conn->last_ping, last_recv)) {
- /* send a ping to try to provoke some traffic */
- debug_scsi("Sending nopout as ping on conn %p\n", conn);
- iscsi_send_nopout(conn, NULL);
- }
- next_timeout = last_recv + timeout + (conn->ping_timeout * HZ);
+ if (time_before_eq(last_recv + recv_timeout, jiffies)) {
+ /* send a ping to try to provoke some traffic */
+ debug_scsi("Sending nopout as ping on conn %p\n", conn);
+ iscsi_send_nopout(conn, NULL);
+ next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
} else
- next_timeout = last_recv + timeout;
+ next_timeout = last_recv + recv_timeout;
debug_scsi("Setting next tmo %lu\n", next_timeout);
mod_timer(&conn->transport_timer, next_timeout);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index fa060932d2b..51e2f299dbb 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2007,7 +2007,7 @@ qla1280_set_defaults(struct scsi_qla_host *ha)
nv->bus[bus].config_2.req_ack_active_negation = 1;
nv->bus[bus].config_2.data_line_active_negation = 1;
nv->bus[bus].selection_timeout = 250;
- nv->bus[bus].max_queue_depth = 256;
+ nv->bus[bus].max_queue_depth = 32;
if (IS_ISP1040(ha)) {
nv->bus[bus].bus_reset_delay = 3;
@@ -2051,7 +2051,7 @@ qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
status = qla1280_mailbox_command(ha, 0x0f, mb);
/* Save Tag queuing enable flag. */
- flag = (BIT_0 << target) & mb[0];
+ flag = (BIT_0 << target);
if (nv->bus[bus].target[target].parameter.tag_queuing)
ha->bus_settings[bus].qtag_enables |= flag;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 36acbcca2d4..62e6eb136a3 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -976,11 +976,15 @@ config SERIAL_68328_RTS_CTS
depends on SERIAL_68328
config SERIAL_COLDFIRE
- bool "ColdFire serial support"
+ bool "ColdFire serial support (DEPRECATED)"
depends on COLDFIRE
help
This driver supports the built-in serial ports of the Motorola ColdFire
family of CPUs.
+ This driver is deprecated because it supports only the old interface
+ for serial drivers and features like magic keys are not working.
+ Please switch to the new style driver because this driver will be
+ removed soon.
config SERIAL_MCF
bool "Coldfire serial support (new style driver)"
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 8a2f6a1baa7..d6b4ead693b 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -65,9 +65,6 @@ static void bfin_serial_stop_tx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
struct circ_buf *xmit = &uart->port.info->xmit;
-#if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA)
- unsigned short ier;
-#endif
while (!(UART_GET_LSR(uart) & TEMT))
cpu_relax();
@@ -82,12 +79,8 @@ static void bfin_serial_stop_tx(struct uart_port *port)
#ifdef CONFIG_BF54x
/* Clear TFI bit */
UART_PUT_LSR(uart, TFI);
- UART_CLEAR_IER(uart, ETBEI);
-#else
- ier = UART_GET_IER(uart);
- ier &= ~ETBEI;
- UART_PUT_IER(uart, ier);
#endif
+ UART_CLEAR_IER(uart, ETBEI);
#endif
}
@@ -102,14 +95,7 @@ static void bfin_serial_start_tx(struct uart_port *port)
if (uart->tx_done)
bfin_serial_dma_tx_chars(uart);
#else
-#ifdef CONFIG_BF54x
UART_SET_IER(uart, ETBEI);
-#else
- unsigned short ier;
- ier = UART_GET_IER(uart);
- ier |= ETBEI;
- UART_PUT_IER(uart, ier);
-#endif
bfin_serial_tx_chars(uart);
#endif
}
@@ -120,21 +106,10 @@ static void bfin_serial_start_tx(struct uart_port *port)
static void bfin_serial_stop_rx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-#ifdef CONFIG_KGDB_UART
- if (uart->port.line != CONFIG_KGDB_UART_PORT) {
+#ifdef CONFIG_KGDB_UART
+ if (uart->port.line != CONFIG_KGDB_UART_PORT)
#endif
-#ifdef CONFIG_BF54x
UART_CLEAR_IER(uart, ERBFI);
-#else
- unsigned short ier;
-
- ier = UART_GET_IER(uart);
- ier &= ~ERBFI;
- UART_PUT_IER(uart, ier);
-#endif
-#ifdef CONFIG_KGDB_UART
- }
-#endif
}
/*
@@ -161,10 +136,7 @@ void kgdb_put_debug_char(int chr)
SSYNC();
}
-#ifndef CONFIG_BF54x
- UART_PUT_LCR(uart, UART_GET_LCR(uart)&(~DLAB));
- SSYNC();
-#endif
+ UART_CLEAR_DLAB(uart);
UART_PUT_CHAR(uart, (unsigned char)chr);
SSYNC();
}
@@ -183,10 +155,7 @@ int kgdb_get_debug_char(void)
while(!(UART_GET_LSR(uart) & DR)) {
SSYNC();
}
-#ifndef CONFIG_BF54x
- UART_PUT_LCR(uart, UART_GET_LCR(uart)&(~DLAB));
- SSYNC();
-#endif
+ UART_CLEAR_DLAB(uart);
chr = UART_GET_CHAR(uart);
SSYNC();
@@ -208,9 +177,6 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
struct tty_struct *tty = uart->port.info->tty;
unsigned int status, ch, flg;
static struct timeval anomaly_start = { .tv_sec = 0 };
-#ifdef CONFIG_KGDB_UART
- struct pt_regs *regs = get_irq_regs();
-#endif
status = UART_GET_LSR(uart);
UART_CLEAR_LSR(uart);
@@ -220,6 +186,7 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
#ifdef CONFIG_KGDB_UART
if (uart->port.line == CONFIG_KGDB_UART_PORT) {
+ struct pt_regs *regs = get_irq_regs();
if (uart->port.cons->index == CONFIG_KGDB_UART_PORT && ch == 0x1) { /* Ctrl + A */
kgdb_breakkey_pressed(regs);
return;
@@ -391,7 +358,6 @@ static void bfin_serial_do_work(struct work_struct *work)
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
{
struct circ_buf *xmit = &uart->port.info->xmit;
- unsigned short ier;
uart->tx_done = 0;
@@ -429,13 +395,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
set_dma_x_modify(uart->tx_dma_channel, 1);
enable_dma(uart->tx_dma_channel);
-#ifdef CONFIG_BF54x
UART_SET_IER(uart, ETBEI);
-#else
- ier = UART_GET_IER(uart);
- ier |= ETBEI;
- UART_PUT_IER(uart, ier);
-#endif
}
static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
@@ -513,19 +473,12 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
struct circ_buf *xmit = &uart->port.info->xmit;
- unsigned short ier;
spin_lock(&uart->port.lock);
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
disable_dma(uart->tx_dma_channel);
clear_dma_irqstat(uart->tx_dma_channel);
-#ifdef CONFIG_BF54x
UART_CLEAR_IER(uart, ETBEI);
-#else
- ier = UART_GET_IER(uart);
- ier &= ~ETBEI;
- UART_PUT_IER(uart, ier);
-#endif
xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx += uart->tx_count;
@@ -701,7 +654,6 @@ static int bfin_serial_startup(struct uart_port *port)
# endif
}
-
if (request_irq
(uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED,
"BFIN_UART_TX", uart)) {
@@ -710,11 +662,7 @@ static int bfin_serial_startup(struct uart_port *port)
return -EBUSY;
}
#endif
-#ifdef CONFIG_BF54x
UART_SET_IER(uart, ERBFI);
-#else
- UART_PUT_IER(uart, UART_GET_IER(uart) | ERBFI);
-#endif
return 0;
}
@@ -810,26 +758,15 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
UART_PUT_IER(uart, 0);
#endif
-#ifndef CONFIG_BF54x
/* Set DLAB in LCR to Access DLL and DLH */
- val = UART_GET_LCR(uart);
- val |= DLAB;
- UART_PUT_LCR(uart, val);
- SSYNC();
-#endif
+ UART_SET_DLAB(uart);
UART_PUT_DLL(uart, quot & 0xFF);
- SSYNC();
UART_PUT_DLH(uart, (quot >> 8) & 0xFF);
SSYNC();
-#ifndef CONFIG_BF54x
/* Clear DLAB in LCR to Access THR RBR IER */
- val = UART_GET_LCR(uart);
- val &= ~DLAB;
- UART_PUT_LCR(uart, val);
- SSYNC();
-#endif
+ UART_CLEAR_DLAB(uart);
UART_PUT_LCR(uart, lcr);
@@ -992,8 +929,7 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
status = UART_GET_IER(uart) & (ERBFI | ETBEI);
if (status == (ERBFI | ETBEI)) {
/* ok, the port was enabled */
- unsigned short lcr, val;
- unsigned short dlh, dll;
+ u16 lcr, dlh, dll;
lcr = UART_GET_LCR(uart);
@@ -1010,22 +946,14 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
case 2: *bits = 7; break;
case 3: *bits = 8; break;
}
-#ifndef CONFIG_BF54x
/* Set DLAB in LCR to Access DLL and DLH */
- val = UART_GET_LCR(uart);
- val |= DLAB;
- UART_PUT_LCR(uart, val);
-#endif
+ UART_SET_DLAB(uart);
dll = UART_GET_DLL(uart);
dlh = UART_GET_DLH(uart);
-#ifndef CONFIG_BF54x
/* Clear DLAB in LCR to Access THR RBR IER */
- val = UART_GET_LCR(uart);
- val &= ~DLAB;
- UART_PUT_LCR(uart, val);
-#endif
+ UART_CLEAR_DLAB(uart);
*baud = get_sclk() / (16*(dll | dlh << 8));
}
@@ -1290,11 +1218,7 @@ static int __init bfin_serial_init(void)
request_irq(uart->port.irq, bfin_serial_rx_int,
IRQF_DISABLED, "BFIN_UART_RX", uart);
pr_info("Request irq for kgdb uart port\n");
-#ifdef CONFIG_BF54x
UART_SET_IER(uart, ERBFI);
-#else
- UART_PUT_IER(uart, UART_GET_IER(uart) | ERBFI);
-#endif
SSYNC();
t.c_cflag = CS8|B57600;
t.c_iflag = 0;
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index f9fa237aa94..3e0366eab41 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -3808,7 +3808,7 @@ rs_close(struct tty_struct *tty, struct file * filp)
shutdown(info);
rs_flush_buffer(tty);
- tty_ldisc_flush_buffer(tty);
+ tty_ldisc_flush(tty);
tty->closing = 0;
info->event = 0;
info->tty = 0;
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index 43af40d59b8..56007cc8a9b 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -1,3 +1,4 @@
+#warning This driver is deprecated. Check Kconfig for details.
/*
* mcfserial.c -- serial driver for ColdFire internal UARTS.
*
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 1e2b9d826f6..eab03273379 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -556,7 +556,7 @@ static int uart_chars_in_buffer(struct tty_struct *tty)
static void uart_flush_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->port;
+ struct uart_port *port;
unsigned long flags;
/*
@@ -568,6 +568,7 @@ static void uart_flush_buffer(struct tty_struct *tty)
return;
}
+ port = state->port;
pr_debug("uart_flush_buffer(%d) called\n", tty->index);
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 96910618771..8fdafc27fce 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -42,14 +42,12 @@
#include <linux/console.h>
#include <linux/platform_device.h>
#include <linux/serial_sci.h>
-
-#ifdef CONFIG_CPU_FREQ
#include <linux/notifier.h>
#include <linux/cpufreq.h>
-#endif
-
-#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
+#include <linux/clk.h>
#include <linux/ctype.h>
+
+#ifdef CONFIG_SUPERH
#include <asm/clock.h>
#include <asm/sh_bios.h>
#include <asm/kgdb.h>
@@ -80,7 +78,7 @@ struct sci_port {
struct timer_list break_timer;
int break_flag;
-#if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
+#ifdef CONFIG_SUPERH
/* Port clock */
struct clk *clk;
#endif
@@ -365,21 +363,19 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
{
unsigned int fcr_val = 0;
+ unsigned short data;
- if (cflag & CRTSCTS) {
- fcr_val |= SCFCR_MCE;
-
- ctrl_outw(0x0000, PORT_PSCR);
- } else {
- unsigned short data;
-
- data = ctrl_inw(PORT_PSCR);
- data &= 0x033f;
- data |= 0x0400;
- ctrl_outw(data, PORT_PSCR);
+ if (port->mapbase == 0xffe00000) {
+ data = ctrl_inw(PSCR);
+ data &= ~0x03cf;
+ if (cflag & CRTSCTS)
+ fcr_val |= SCFCR_MCE;
+ else
+ data |= 0x0340;
- ctrl_outw(ctrl_inw(SCSPTR0) & 0x17, SCSPTR0);
+ ctrl_outw(data, PSCR);
}
+ /* SCIF1 and SCIF2 should be setup by board code */
sci_out(port, SCFCR, fcr_val);
}
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index fa8700a968f..eb84833233f 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -76,12 +76,13 @@
# define SCSCR_INIT(port) 0x32 /* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */
# define SCIF_ONLY
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
-# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
-# define SCSPTR0 SCPDR0
+# define PADR 0xA4050120
+# define PSDR 0xA405013e
+# define PWDR 0xA4050166
+# define PSCR 0xA405011E
# define SCIF_ORER 0x0001 /* overrun error bit */
# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
# define SCIF_ONLY
-# define PORT_PSCR 0xA405011E
#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
# define SCSPTR0 SCPDR0
@@ -320,7 +321,7 @@
unsigned int addr = port->mapbase + (offset); \
if ((size) == 8) { \
ctrl_outb(value, addr); \
- } else { \
+ } else if ((size) == 16) { \
ctrl_outw(value, addr); \
}
@@ -451,7 +452,11 @@ SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
#else
SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
+#if defined(CONFIG_CPU_SUBTYPE_SH7722)
+SCIF_FNS(SCSPTR, 0, 0, 0, 0)
+#else
SCIF_FNS(SCSPTR, 0, 0, 0x20, 16)
+#endif
SCIF_FNS(SCLSR, 0, 0, 0x24, 16)
#endif
#endif
@@ -593,13 +598,25 @@ static inline int sci_rxd_in(struct uart_port *port)
return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || defined(CONFIG_CPU_SUBTYPE_SH7366)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
return ctrl_inb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
return 1;
}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+ if (port->mapbase == 0xffe00000)
+ return ctrl_inb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */
+ if (port->mapbase == 0xffe10000)
+ return ctrl_inb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */
+ if (port->mapbase == 0xffe20000)
+ return ctrl_inb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */
+
+ return 1;
+}
#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
static inline int sci_rxd_in(struct uart_port *port)
{
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index fae9e8f3d09..66ec5d8808d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -126,7 +126,6 @@ config SPI_MPC52xx_PSC
config SPI_MPC83xx
tristate "Freescale MPC83xx/QUICC Engine SPI controller"
depends on SPI_MASTER && (PPC_83xx || QUICC_ENGINE) && EXPERIMENTAL
- select SPI_BITBANG
help
This enables using the Freescale MPC83xx and QUICC Engine SPI
controllers in master mode.
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 90729469d48..681d62325d3 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -1,5 +1,5 @@
/*
- * MPC52xx SPC in SPI mode driver.
+ * MPC52xx PSC in SPI mode driver.
*
* Maintainer: Dragos Carp
*
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 654bb58be63..0c452c46ab0 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1567,7 +1567,7 @@ static int pxa2xx_spi_resume(struct platform_device *pdev)
int status = 0;
/* Enable the SSP clock */
- clk_disable(ssp->clk);
+ clk_enable(ssp->clk);
/* Start the queue running */
status = start_queue(drv_data);
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index 189f706b9e4..6832da6f710 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -49,6 +49,7 @@ struct mpc83xx_spi_reg {
#define SPMODE_LEN(x) ((x) << 20)
#define SPMODE_PM(x) ((x) << 16)
#define SPMODE_OP (1 << 14)
+#define SPMODE_CG(x) ((x) << 7)
/*
* Default for SPI Mode:
@@ -67,10 +68,6 @@ struct mpc83xx_spi_reg {
/* SPI Controller driver's private data. */
struct mpc83xx_spi {
- /* bitbang has to be first */
- struct spi_bitbang bitbang;
- struct completion done;
-
struct mpc83xx_spi_reg __iomem *base;
/* rx & tx bufs from the spi_transfer */
@@ -82,7 +79,7 @@ struct mpc83xx_spi {
u32(*get_tx) (struct mpc83xx_spi *);
unsigned int count;
- u32 irq;
+ int irq;
unsigned nsecs; /* (clock cycle time)/2 */
@@ -94,6 +91,25 @@ struct mpc83xx_spi {
void (*activate_cs) (u8 cs, u8 polarity);
void (*deactivate_cs) (u8 cs, u8 polarity);
+
+ u8 busy;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+
+ struct list_head queue;
+ spinlock_t lock;
+
+ struct completion done;
+};
+
+struct spi_mpc83xx_cs {
+ /* functions to deal with different sized buffers */
+ void (*get_rx) (u32 rx_data, struct mpc83xx_spi *);
+ u32 (*get_tx) (struct mpc83xx_spi *);
+ u32 rx_shift; /* RX data reg shift when in qe mode */
+ u32 tx_shift; /* TX data reg shift when in qe mode */
+ u32 hw_mode; /* Holds HW mode register settings */
};
static inline void mpc83xx_spi_write_reg(__be32 __iomem * reg, u32 val)
@@ -137,6 +153,7 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc83xx_spi *mpc83xx_spi;
u8 pol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ struct spi_mpc83xx_cs *cs = spi->controller_state;
mpc83xx_spi = spi_master_get_devdata(spi->master);
@@ -147,50 +164,26 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
if (value == BITBANG_CS_ACTIVE) {
u32 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
- u32 len = spi->bits_per_word;
- u8 pm;
- if (len == 32)
- len = 0;
- else
- len = len - 1;
-
- /* mask out bits we are going to set */
- regval &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
- | SPMODE_LEN(0xF) | SPMODE_DIV16
- | SPMODE_PM(0xF) | SPMODE_REV | SPMODE_LOOP);
-
- if (spi->mode & SPI_CPHA)
- regval |= SPMODE_CP_BEGIN_EDGECLK;
- if (spi->mode & SPI_CPOL)
- regval |= SPMODE_CI_INACTIVEHIGH;
- if (!(spi->mode & SPI_LSB_FIRST))
- regval |= SPMODE_REV;
- if (spi->mode & SPI_LOOP)
- regval |= SPMODE_LOOP;
-
- regval |= SPMODE_LEN(len);
-
- if ((mpc83xx_spi->spibrg / spi->max_speed_hz) >= 64) {
- pm = mpc83xx_spi->spibrg / (spi->max_speed_hz * 64) - 1;
- if (pm > 0x0f) {
- dev_err(&spi->dev, "Requested speed is too "
- "low: %d Hz. Will use %d Hz instead.\n",
- spi->max_speed_hz,
- mpc83xx_spi->spibrg / 1024);
- pm = 0x0f;
- }
- regval |= SPMODE_PM(pm) | SPMODE_DIV16;
- } else {
- pm = mpc83xx_spi->spibrg / (spi->max_speed_hz * 4);
- if (pm)
- pm--;
- regval |= SPMODE_PM(pm);
+ mpc83xx_spi->rx_shift = cs->rx_shift;
+ mpc83xx_spi->tx_shift = cs->tx_shift;
+ mpc83xx_spi->get_rx = cs->get_rx;
+ mpc83xx_spi->get_tx = cs->get_tx;
+
+ if (cs->hw_mode != regval) {
+ unsigned long flags;
+ void *tmp_ptr = &mpc83xx_spi->base->mode;
+
+ regval = cs->hw_mode;
+ /* Turn off IRQs locally to minimize time that
+ * SPI is disabled
+ */
+ local_irq_save(flags);
+ /* Turn off SPI unit prior changing mode */
+ mpc83xx_spi_write_reg(tmp_ptr, regval & ~SPMODE_ENABLE);
+ mpc83xx_spi_write_reg(tmp_ptr, regval);
+ local_irq_restore(flags);
}
-
- /* Turn off SPI unit prior changing mode */
- mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, 0);
- mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
if (mpc83xx_spi->activate_cs)
mpc83xx_spi->activate_cs(spi->chip_select, pol);
}
@@ -201,8 +194,9 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct mpc83xx_spi *mpc83xx_spi;
u32 regval;
- u8 bits_per_word;
+ u8 bits_per_word, pm;
u32 hz;
+ struct spi_mpc83xx_cs *cs = spi->controller_state;
mpc83xx_spi = spi_master_get_devdata(spi->master);
@@ -223,61 +217,191 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
|| ((bits_per_word > 16) && (bits_per_word != 32)))
return -EINVAL;
- mpc83xx_spi->rx_shift = 0;
- mpc83xx_spi->tx_shift = 0;
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ cs->rx_shift = 0;
+ cs->tx_shift = 0;
if (bits_per_word <= 8) {
- mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
- mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+ cs->get_rx = mpc83xx_spi_rx_buf_u8;
+ cs->get_tx = mpc83xx_spi_tx_buf_u8;
if (mpc83xx_spi->qe_mode) {
- mpc83xx_spi->rx_shift = 16;
- mpc83xx_spi->tx_shift = 24;
+ cs->rx_shift = 16;
+ cs->tx_shift = 24;
}
} else if (bits_per_word <= 16) {
- mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16;
- mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16;
+ cs->get_rx = mpc83xx_spi_rx_buf_u16;
+ cs->get_tx = mpc83xx_spi_tx_buf_u16;
if (mpc83xx_spi->qe_mode) {
- mpc83xx_spi->rx_shift = 16;
- mpc83xx_spi->tx_shift = 16;
+ cs->rx_shift = 16;
+ cs->tx_shift = 16;
}
} else if (bits_per_word <= 32) {
- mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32;
- mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32;
+ cs->get_rx = mpc83xx_spi_rx_buf_u32;
+ cs->get_tx = mpc83xx_spi_tx_buf_u32;
} else
return -EINVAL;
if (mpc83xx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) {
- mpc83xx_spi->tx_shift = 0;
+ cs->tx_shift = 0;
if (bits_per_word <= 8)
- mpc83xx_spi->rx_shift = 8;
+ cs->rx_shift = 8;
else
- mpc83xx_spi->rx_shift = 0;
+ cs->rx_shift = 0;
}
- /* nsecs = (clock period)/2 */
- if (!hz)
- hz = spi->max_speed_hz;
- mpc83xx_spi->nsecs = (1000000000 / 2) / hz;
- if (mpc83xx_spi->nsecs > MAX_UDELAY_MS * 1000)
- return -EINVAL;
+ mpc83xx_spi->rx_shift = cs->rx_shift;
+ mpc83xx_spi->tx_shift = cs->tx_shift;
+ mpc83xx_spi->get_rx = cs->get_rx;
+ mpc83xx_spi->get_tx = cs->get_tx;
if (bits_per_word == 32)
bits_per_word = 0;
else
bits_per_word = bits_per_word - 1;
- regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
-
/* mask out bits we are going to set */
- regval &= ~(SPMODE_LEN(0xF) | SPMODE_REV);
- regval |= SPMODE_LEN(bits_per_word);
- if (!(spi->mode & SPI_LSB_FIRST))
- regval |= SPMODE_REV;
+ cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16
+ | SPMODE_PM(0xF));
+
+ cs->hw_mode |= SPMODE_LEN(bits_per_word);
+
+ if ((mpc83xx_spi->spibrg / hz) >= 64) {
+ pm = mpc83xx_spi->spibrg / (hz * 64) - 1;
+ if (pm > 0x0f) {
+ dev_err(&spi->dev, "Requested speed is too "
+ "low: %d Hz. Will use %d Hz instead.\n",
+ hz, mpc83xx_spi->spibrg / 1024);
+ pm = 0x0f;
+ }
+ cs->hw_mode |= SPMODE_PM(pm) | SPMODE_DIV16;
+ } else {
+ pm = mpc83xx_spi->spibrg / (hz * 4);
+ if (pm)
+ pm--;
+ cs->hw_mode |= SPMODE_PM(pm);
+ }
+ regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
+ if (cs->hw_mode != regval) {
+ unsigned long flags;
+ void *tmp_ptr = &mpc83xx_spi->base->mode;
+
+ regval = cs->hw_mode;
+ /* Turn off IRQs locally to minimize time
+ * that SPI is disabled
+ */
+ local_irq_save(flags);
+ /* Turn off SPI unit prior changing mode */
+ mpc83xx_spi_write_reg(tmp_ptr, regval & ~SPMODE_ENABLE);
+ mpc83xx_spi_write_reg(tmp_ptr, regval);
+ local_irq_restore(flags);
+ }
+ return 0;
+}
- /* Turn off SPI unit prior changing mode */
- mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, 0);
- mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
+static int mpc83xx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct mpc83xx_spi *mpc83xx_spi;
+ u32 word, len, bits_per_word;
- return 0;
+ mpc83xx_spi = spi_master_get_devdata(spi->master);
+
+ mpc83xx_spi->tx = t->tx_buf;
+ mpc83xx_spi->rx = t->rx_buf;
+ bits_per_word = spi->bits_per_word;
+ if (t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+ len = t->len;
+ if (bits_per_word > 8)
+ len /= 2;
+ if (bits_per_word > 16)
+ len /= 2;
+ mpc83xx_spi->count = len;
+ INIT_COMPLETION(mpc83xx_spi->done);
+
+ /* enable rx ints */
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, SPIM_NE);
+
+ /* transmit word */
+ word = mpc83xx_spi->get_tx(mpc83xx_spi);
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, word);
+
+ wait_for_completion(&mpc83xx_spi->done);
+
+ /* disable rx ints */
+ mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0);
+
+ return mpc83xx_spi->count;
+}
+
+static void mpc83xx_spi_work(struct work_struct *work)
+{
+ struct mpc83xx_spi *mpc83xx_spi =
+ container_of(work, struct mpc83xx_spi, work);
+
+ spin_lock_irq(&mpc83xx_spi->lock);
+ mpc83xx_spi->busy = 1;
+ while (!list_empty(&mpc83xx_spi->queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ unsigned cs_change;
+ int status, nsecs = 50;
+
+ m = container_of(mpc83xx_spi->queue.next,
+ struct spi_message, queue);
+ list_del_init(&m->queue);
+ spin_unlock_irq(&mpc83xx_spi->lock);
+
+ spi = m->spi;
+ cs_change = 1;
+ status = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ /* Don't allow changes if CS is active */
+ status = -EINVAL;
+
+ if (cs_change)
+ status = mpc83xx_spi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ }
+
+ if (cs_change)
+ mpc83xx_spi_chipselect(spi, BITBANG_CS_ACTIVE);
+ cs_change = t->cs_change;
+ if (t->len)
+ status = mpc83xx_spi_bufs(spi, t);
+ if (status) {
+ status = -EMSGSIZE;
+ break;
+ }
+ m->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change) {
+ ndelay(nsecs);
+ mpc83xx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(nsecs);
+ }
+ }
+
+ m->status = status;
+ m->complete(m->context);
+
+ if (status || !cs_change) {
+ ndelay(nsecs);
+ mpc83xx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
+ }
+
+ mpc83xx_spi_setup_transfer(spi, NULL);
+
+ spin_lock_irq(&mpc83xx_spi->lock);
+ }
+ mpc83xx_spi->busy = 0;
+ spin_unlock_irq(&mpc83xx_spi->lock);
}
/* the spi->mode bits understood by this driver: */
@@ -286,9 +410,10 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
static int mpc83xx_spi_setup(struct spi_device *spi)
{
- struct spi_bitbang *bitbang;
struct mpc83xx_spi *mpc83xx_spi;
int retval;
+ u32 hw_mode;
+ struct spi_mpc83xx_cs *cs = spi->controller_state;
if (spi->mode & ~MODEBITS) {
dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
@@ -299,63 +424,56 @@ static int mpc83xx_spi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
- bitbang = spi_master_get_devdata(spi->master);
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ }
mpc83xx_spi = spi_master_get_devdata(spi->master);
if (!spi->bits_per_word)
spi->bits_per_word = 8;
+ hw_mode = cs->hw_mode; /* Save orginal settings */
+ cs->hw_mode = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
+ | SPMODE_REV | SPMODE_LOOP);
+
+ if (spi->mode & SPI_CPHA)
+ cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK;
+ if (spi->mode & SPI_CPOL)
+ cs->hw_mode |= SPMODE_CI_INACTIVEHIGH;
+ if (!(spi->mode & SPI_LSB_FIRST))
+ cs->hw_mode |= SPMODE_REV;
+ if (spi->mode & SPI_LOOP)
+ cs->hw_mode |= SPMODE_LOOP;
+
retval = mpc83xx_spi_setup_transfer(spi, NULL);
- if (retval < 0)
+ if (retval < 0) {
+ cs->hw_mode = hw_mode; /* Restore settings */
return retval;
+ }
- dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n",
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u Hz\n",
__func__, spi->mode & (SPI_CPOL | SPI_CPHA),
- spi->bits_per_word, 2 * mpc83xx_spi->nsecs);
-
+ spi->bits_per_word, spi->max_speed_hz);
+#if 0 /* Don't think this is needed */
/* NOTE we _need_ to call chipselect() early, ideally with adapter
* setup, unless the hardware defaults cooperate to avoid confusion
* between normal (active low) and inverted chipselects.
*/
/* deselect chip (low or high) */
- spin_lock(&bitbang->lock);
- if (!bitbang->busy) {
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(mpc83xx_spi->nsecs);
- }
- spin_unlock(&bitbang->lock);
-
+ spin_lock(&mpc83xx_spi->lock);
+ if (!mpc83xx_spi->busy)
+ mpc83xx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
+ spin_unlock(&mpc83xx_spi->lock);
+#endif
return 0;
}
-static int mpc83xx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
-{
- struct mpc83xx_spi *mpc83xx_spi;
- u32 word;
-
- mpc83xx_spi = spi_master_get_devdata(spi->master);
-
- mpc83xx_spi->tx = t->tx_buf;
- mpc83xx_spi->rx = t->rx_buf;
- mpc83xx_spi->count = t->len;
- INIT_COMPLETION(mpc83xx_spi->done);
-
- /* enable rx ints */
- mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, SPIM_NE);
-
- /* transmit word */
- word = mpc83xx_spi->get_tx(mpc83xx_spi);
- mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, word);
-
- wait_for_completion(&mpc83xx_spi->done);
-
- /* disable rx ints */
- mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0);
-
- return t->len - mpc83xx_spi->count;
-}
-
irqreturn_t mpc83xx_spi_irq(s32 irq, void *context_data)
{
struct mpc83xx_spi *mpc83xx_spi = context_data;
@@ -395,6 +513,28 @@ irqreturn_t mpc83xx_spi_irq(s32 irq, void *context_data)
return ret;
}
+static int mpc83xx_spi_transfer(struct spi_device *spi,
+ struct spi_message *m)
+{
+ struct mpc83xx_spi *mpc83xx_spi = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ spin_lock_irqsave(&mpc83xx_spi->lock, flags);
+ list_add_tail(&m->queue, &mpc83xx_spi->queue);
+ queue_work(mpc83xx_spi->workqueue, &mpc83xx_spi->work);
+ spin_unlock_irqrestore(&mpc83xx_spi->lock, flags);
+
+ return 0;
+}
+
+
+static void mpc83xx_spi_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
static int __init mpc83xx_spi_probe(struct platform_device *dev)
{
@@ -426,11 +566,11 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
ret = -ENODEV;
goto free_master;
}
+ master->setup = mpc83xx_spi_setup;
+ master->transfer = mpc83xx_spi_transfer;
+ master->cleanup = mpc83xx_spi_cleanup;
+
mpc83xx_spi = spi_master_get_devdata(master);
- mpc83xx_spi->bitbang.master = spi_master_get(master);
- mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect;
- mpc83xx_spi->bitbang.setup_transfer = mpc83xx_spi_setup_transfer;
- mpc83xx_spi->bitbang.txrx_bufs = mpc83xx_spi_bufs;
mpc83xx_spi->activate_cs = pdata->activate_cs;
mpc83xx_spi->deactivate_cs = pdata->deactivate_cs;
mpc83xx_spi->qe_mode = pdata->qe_mode;
@@ -445,7 +585,6 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
mpc83xx_spi->tx_shift = 24;
}
- mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup;
init_completion(&mpc83xx_spi->done);
mpc83xx_spi->base = ioremap(r->start, r->end - r->start + 1);
@@ -483,11 +622,21 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
regval |= SPMODE_OP;
mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
+ spin_lock_init(&mpc83xx_spi->lock);
+ init_completion(&mpc83xx_spi->done);
+ INIT_WORK(&mpc83xx_spi->work, mpc83xx_spi_work);
+ INIT_LIST_HEAD(&mpc83xx_spi->queue);
- ret = spi_bitbang_start(&mpc83xx_spi->bitbang);
-
- if (ret != 0)
+ mpc83xx_spi->workqueue = create_singlethread_workqueue(
+ master->dev.parent->bus_id);
+ if (mpc83xx_spi->workqueue == NULL) {
+ ret = -EBUSY;
goto free_irq;
+ }
+
+ ret = spi_register_master(master);
+ if (ret < 0)
+ goto unreg_master;
printk(KERN_INFO
"%s: MPC83xx SPI Controller driver at 0x%p (irq = %d)\n",
@@ -495,6 +644,8 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
return ret;
+unreg_master:
+ destroy_workqueue(mpc83xx_spi->workqueue);
free_irq:
free_irq(mpc83xx_spi->irq, mpc83xx_spi);
unmap_io:
@@ -515,10 +666,12 @@ static int __exit mpc83xx_spi_remove(struct platform_device *dev)
master = platform_get_drvdata(dev);
mpc83xx_spi = spi_master_get_devdata(master);
- spi_bitbang_stop(&mpc83xx_spi->bitbang);
+ flush_workqueue(mpc83xx_spi->workqueue);
+ destroy_workqueue(mpc83xx_spi->workqueue);
+ spi_unregister_master(master);
+
free_irq(mpc83xx_spi->irq, mpc83xx_spi);
iounmap(mpc83xx_spi->base);
- spi_master_put(mpc83xx_spi->bitbang.master);
return 0;
}
diff --git a/drivers/usb/c67x00/c67x00-ll-hpi.c b/drivers/usb/c67x00/c67x00-ll-hpi.c
index f3430b372f0..5100fbbf6cb 100644
--- a/drivers/usb/c67x00/c67x00-ll-hpi.c
+++ b/drivers/usb/c67x00/c67x00-ll-hpi.c
@@ -23,6 +23,7 @@
#include <asm/byteorder.h>
#include <linux/io.h>
+#include <linux/jiffies.h>
#include <linux/usb/c67x00.h>
#include "c67x00.h"
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index cefe7f2c6f7..63c34043b4d 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1248,6 +1248,9 @@ static struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 99e5a68a3f1..fae55a31e26 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -156,6 +156,10 @@ static struct attribute *ep_dev_attrs[] = {
static struct attribute_group ep_dev_attr_grp = {
.attrs = ep_dev_attrs,
};
+static struct attribute_group *ep_dev_groups[] = {
+ &ep_dev_attr_grp,
+ NULL
+};
static int usb_endpoint_major_init(void)
{
@@ -298,6 +302,7 @@ int usb_create_ep_files(struct device *parent,
ep_dev->desc = &endpoint->desc;
ep_dev->udev = udev;
+ ep_dev->dev.groups = ep_dev_groups;
ep_dev->dev.devt = MKDEV(usb_endpoint_major, ep_dev->minor);
ep_dev->dev.class = ep_class->class;
ep_dev->dev.parent = parent;
@@ -309,9 +314,6 @@ int usb_create_ep_files(struct device *parent,
retval = device_register(&ep_dev->dev);
if (retval)
goto error_chrdev;
- retval = sysfs_create_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
- if (retval)
- goto error_group;
/* create the symlink to the old-style "ep_XX" directory */
sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
@@ -322,8 +324,6 @@ int usb_create_ep_files(struct device *parent,
return retval;
error_link:
- sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
-error_group:
device_unregister(&ep_dev->dev);
destroy_endpoint_class();
return retval;
@@ -348,7 +348,6 @@ void usb_remove_ep_files(struct usb_host_endpoint *endpoint)
sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
- sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
device_unregister(&ep_dev->dev);
endpoint->ep_dev = NULL;
destroy_endpoint_class();
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 3e69266e1f4..fe47d145255 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1607,6 +1607,7 @@ free_interfaces:
intf->dev.driver = NULL;
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
+ intf->dev.groups = usb_interface_groups;
intf->dev.dma_mask = dev->dev.dma_mask;
device_initialize(&intf->dev);
mark_quiesced(intf);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5b20a60de8b..c783cb11184 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -538,6 +538,46 @@ static struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
+/* When modifying this list, be sure to modify dev_string_attrs_are_visible()
+ * accordingly.
+ */
+static struct attribute *dev_string_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_product.attr,
+ &dev_attr_serial.attr,
+ NULL
+};
+
+static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct usb_device *udev = to_usb_device(
+ container_of(kobj, struct device, kobj));
+
+ if (a == &dev_attr_manufacturer.attr) {
+ if (udev->manufacturer == NULL)
+ return 0;
+ } else if (a == &dev_attr_product.attr) {
+ if (udev->product == NULL)
+ return 0;
+ } else if (a == &dev_attr_serial.attr) {
+ if (udev->serial == NULL)
+ return 0;
+ }
+ return a->mode;
+}
+
+static struct attribute_group dev_string_attr_grp = {
+ .attrs = dev_string_attrs,
+ .is_visible = dev_string_attrs_are_visible,
+};
+
+struct attribute_group *usb_device_groups[] = {
+ &dev_attr_grp,
+ &dev_string_attr_grp,
+ NULL
+};
+
/* Binary descriptors */
static ssize_t
@@ -591,10 +631,9 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
struct device *dev = &udev->dev;
int retval;
- retval = sysfs_create_group(&dev->kobj, &dev_attr_grp);
- if (retval)
- return retval;
-
+ /* Unforunately these attributes cannot be created before
+ * the uevent is broadcast.
+ */
retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
if (retval)
goto error;
@@ -607,21 +646,6 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
if (retval)
goto error;
- if (udev->manufacturer) {
- retval = device_create_file(dev, &dev_attr_manufacturer);
- if (retval)
- goto error;
- }
- if (udev->product) {
- retval = device_create_file(dev, &dev_attr_product);
- if (retval)
- goto error;
- }
- if (udev->serial) {
- retval = device_create_file(dev, &dev_attr_serial);
- if (retval)
- goto error;
- }
retval = usb_create_ep_files(dev, &udev->ep0, udev);
if (retval)
goto error;
@@ -636,13 +660,9 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
struct device *dev = &udev->dev;
usb_remove_ep_files(&udev->ep0);
- device_remove_file(dev, &dev_attr_manufacturer);
- device_remove_file(dev, &dev_attr_product);
- device_remove_file(dev, &dev_attr_serial);
remove_power_attributes(dev);
remove_persist_attributes(dev);
device_remove_bin_file(dev, &dev_bin_attr_descriptors);
- sysfs_remove_group(&dev->kobj, &dev_attr_grp);
}
/* Interface Accociation Descriptor fields */
@@ -688,17 +708,15 @@ static ssize_t show_interface_string(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf;
- struct usb_device *udev;
- int len;
+ char *string;
intf = to_usb_interface(dev);
- udev = interface_to_usbdev(intf);
- len = snprintf(buf, 256, "%s", intf->cur_altsetting->string);
- if (len < 0)
+ string = intf->cur_altsetting->string;
+ barrier(); /* The altsetting might change! */
+
+ if (!string)
return 0;
- buf[len] = '\n';
- buf[len+1] = 0;
- return len+1;
+ return sprintf(buf, "%s\n", string);
}
static DEVICE_ATTR(interface, S_IRUGO, show_interface_string, NULL);
@@ -727,18 +745,6 @@ static ssize_t show_modalias(struct device *dev,
}
static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
-static struct attribute *intf_assoc_attrs[] = {
- &dev_attr_iad_bFirstInterface.attr,
- &dev_attr_iad_bInterfaceCount.attr,
- &dev_attr_iad_bFunctionClass.attr,
- &dev_attr_iad_bFunctionSubClass.attr,
- &dev_attr_iad_bFunctionProtocol.attr,
- NULL,
-};
-static struct attribute_group intf_assoc_attr_grp = {
- .attrs = intf_assoc_attrs,
-};
-
static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceNumber.attr,
&dev_attr_bAlternateSetting.attr,
@@ -753,6 +759,37 @@ static struct attribute_group intf_attr_grp = {
.attrs = intf_attrs,
};
+static struct attribute *intf_assoc_attrs[] = {
+ &dev_attr_iad_bFirstInterface.attr,
+ &dev_attr_iad_bInterfaceCount.attr,
+ &dev_attr_iad_bFunctionClass.attr,
+ &dev_attr_iad_bFunctionSubClass.attr,
+ &dev_attr_iad_bFunctionProtocol.attr,
+ NULL,
+};
+
+static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct usb_interface *intf = to_usb_interface(
+ container_of(kobj, struct device, kobj));
+
+ if (intf->intf_assoc == NULL)
+ return 0;
+ return a->mode;
+}
+
+static struct attribute_group intf_assoc_attr_grp = {
+ .attrs = intf_assoc_attrs,
+ .is_visible = intf_assoc_attrs_are_visible,
+};
+
+struct attribute_group *usb_interface_groups[] = {
+ &intf_attr_grp,
+ &intf_assoc_attr_grp,
+ NULL
+};
+
static inline void usb_create_intf_ep_files(struct usb_interface *intf,
struct usb_device *udev)
{
@@ -777,23 +814,21 @@ static inline void usb_remove_intf_ep_files(struct usb_interface *intf)
int usb_create_sysfs_intf_files(struct usb_interface *intf)
{
- struct device *dev = &intf->dev;
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
int retval;
if (intf->sysfs_files_created)
return 0;
- retval = sysfs_create_group(&dev->kobj, &intf_attr_grp);
- if (retval)
- return retval;
+ /* The interface string may be present in some altsettings
+ * and missing in others. Hence its attribute cannot be created
+ * before the uevent is broadcast.
+ */
if (alt->string == NULL)
alt->string = usb_cache_string(udev, alt->desc.iInterface);
if (alt->string)
- retval = device_create_file(dev, &dev_attr_interface);
- if (intf->intf_assoc)
- retval = sysfs_create_group(&dev->kobj, &intf_assoc_attr_grp);
+ retval = device_create_file(&intf->dev, &dev_attr_interface);
usb_create_intf_ep_files(intf, udev);
intf->sysfs_files_created = 1;
return 0;
@@ -807,7 +842,5 @@ void usb_remove_sysfs_intf_files(struct usb_interface *intf)
return;
usb_remove_intf_ep_files(intf);
device_remove_file(dev, &dev_attr_interface);
- sysfs_remove_group(&dev->kobj, &intf_attr_grp);
- sysfs_remove_group(&intf->dev.kobj, &intf_assoc_attr_grp);
intf->sysfs_files_created = 0;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 1f0db51190c..32577437583 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -291,6 +291,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
device_initialize(&dev->dev);
dev->dev.bus = &usb_bus_type;
dev->dev.type = &usb_device_type;
+ dev->dev.groups = usb_device_groups;
dev->dev.dma_mask = bus->controller->dma_mask;
set_dev_node(&dev->dev, dev_to_node(bus->controller));
dev->state = USB_STATE_ATTACHED;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 1bf8ccb9c58..1a8bc21c335 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -130,6 +130,10 @@ static inline int is_active(const struct usb_interface *f)
/* for labeling diagnostics */
extern const char *usbcore_name;
+/* sysfs stuff */
+extern struct attribute_group *usb_device_groups[];
+extern struct attribute_group *usb_interface_groups[];
+
/* usbfs stuff */
extern struct mutex usbfs_mutex;
extern struct usb_driver usbfs_driver;
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index ce337cb5d13..f261d2a9a5f 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -3251,7 +3251,7 @@ static int udc_pci_probe(
/* pci setup */
if (pci_enable_device(pdev) < 0) {
kfree(dev);
- dev = 0;
+ dev = NULL;
retval = -ENODEV;
goto finished;
}
@@ -3264,7 +3264,7 @@ static int udc_pci_probe(
if (!request_mem_region(resource, len, name)) {
dev_dbg(&pdev->dev, "pci device used already\n");
kfree(dev);
- dev = 0;
+ dev = NULL;
retval = -EBUSY;
goto finished;
}
@@ -3274,7 +3274,7 @@ static int udc_pci_probe(
if (dev->virt_addr == NULL) {
dev_dbg(&pdev->dev, "start address cannot be mapped\n");
kfree(dev);
- dev = 0;
+ dev = NULL;
retval = -EFAULT;
goto finished;
}
@@ -3282,7 +3282,7 @@ static int udc_pci_probe(
if (!pdev->irq) {
dev_err(&dev->pdev->dev, "irq not set\n");
kfree(dev);
- dev = 0;
+ dev = NULL;
retval = -ENODEV;
goto finished;
}
@@ -3290,7 +3290,7 @@ static int udc_pci_probe(
if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
kfree(dev);
- dev = 0;
+ dev = NULL;
retval = -EBUSY;
goto finished;
}
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index e756023362c..07e5a0b5dcd 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -649,7 +649,13 @@ static int usba_ep_disable(struct usb_ep *_ep)
if (!ep->desc) {
spin_unlock_irqrestore(&udc->lock, flags);
- DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name);
+ /* REVISIT because this driver disables endpoints in
+ * reset_all_endpoints() before calling disconnect(),
+ * most gadget drivers would trigger this non-error ...
+ */
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN)
+ DBG(DBG_ERR, "ep_disable: %s not enabled\n",
+ ep->ep.name);
return -EINVAL;
}
ep->desc = NULL;
@@ -1032,8 +1038,6 @@ static struct usba_udc the_udc = {
.release = nop_release,
},
},
-
- .lock = SPIN_LOCK_UNLOCKED,
};
/*
@@ -1052,6 +1056,12 @@ static void reset_all_endpoints(struct usba_udc *udc)
request_complete(ep, req, -ECONNRESET);
}
+ /* NOTE: normally, the next call to the gadget driver is in
+ * charge of disabling endpoints... usually disconnect().
+ * The exception would be entering a high speed test mode.
+ *
+ * FIXME remove this code ... and retest thoroughly.
+ */
list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
if (ep->desc) {
spin_unlock(&udc->lock);
@@ -1219,7 +1229,7 @@ static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
struct usb_ctrlrequest *crq)
{
- int retval = 0;;
+ int retval = 0;
switch (crq->bRequest) {
case USB_REQ_GET_STATUS: {
@@ -1693,6 +1703,14 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
reset_all_endpoints(udc);
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN
+ && udc->driver->disconnect) {
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock(&udc->lock);
+ udc->driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+
if (status & USBA_HIGH_SPEED) {
DBG(DBG_BUS, "High-speed bus reset detected\n");
udc->gadget.speed = USB_SPEED_HIGH;
@@ -1716,9 +1734,13 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
| USBA_DET_SUSPEND
| USBA_END_OF_RESUME));
+ /*
+ * Unclear why we hit this irregularly, e.g. in usbtest,
+ * but it's clearly harmless...
+ */
if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
- dev_warn(&udc->pdev->dev,
- "WARNING: EP0 configuration is invalid!\n");
+ dev_dbg(&udc->pdev->dev,
+ "ODD: EP0 configuration is invalid!\n");
}
spin_unlock(&udc->lock);
@@ -1751,9 +1773,11 @@ static irqreturn_t usba_vbus_irq(int irq, void *devid)
reset_all_endpoints(udc);
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
- spin_unlock(&udc->lock);
- udc->driver->disconnect(&udc->gadget);
- spin_lock(&udc->lock);
+ if (udc->driver->disconnect) {
+ spin_unlock(&udc->lock);
+ udc->driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
}
udc->vbus_prev = vbus;
}
@@ -1825,7 +1849,7 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
if (!udc->pdev)
return -ENODEV;
- if (driver != udc->driver)
+ if (driver != udc->driver || !driver->unbind)
return -EINVAL;
if (udc->vbus_pin != -1)
@@ -1840,6 +1864,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
+ if (udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
+
driver->unbind(&udc->gadget);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
@@ -1879,6 +1906,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
goto err_get_hclk;
}
+ spin_lock_init(&udc->lock);
udc->pdev = pdev;
udc->pclk = pclk;
udc->hclk = hclk;
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 75eba202f73..499b7a23f35 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1546,7 +1546,6 @@ static __init void udc_init_data(struct pxa_udc *dev)
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
ep0_idle(dev);
- strcpy(dev->dev->bus_id, "");
/* PXA endpoints init */
for (i = 0; i < NR_PXA_ENDPOINTS; i++) {
@@ -1746,13 +1745,10 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
ep_err(ep, "wrong to have extra bytes for setup : 0x%08x\n", i);
}
- le16_to_cpus(&u.r.wValue);
- le16_to_cpus(&u.r.wIndex);
- le16_to_cpus(&u.r.wLength);
-
ep_dbg(ep, "SETUP %02x.%02x v%04x i%04x l%04x\n",
u.r.bRequestType, u.r.bRequest,
- u.r.wValue, u.r.wIndex, u.r.wLength);
+ le16_to_cpu(u.r.wValue), le16_to_cpu(u.r.wIndex),
+ le16_to_cpu(u.r.wLength));
if (unlikely(have_extrabytes))
goto stall;
@@ -2296,7 +2292,8 @@ static void pxa_udc_shutdown(struct platform_device *_dev)
{
struct pxa_udc *udc = platform_get_drvdata(_dev);
- udc_disable(udc);
+ if (udc_readl(udc, UDCCR) & UDCCR_UDE)
+ udc_disable(udc);
}
#ifdef CONFIG_PM
@@ -2361,9 +2358,8 @@ static int pxa_udc_resume(struct platform_device *_dev)
* Upon exit from sleep mode and before clearing OTGPH,
* Software must configure the USB OTG pad, UDC, and UHC
* to the state they were in before entering sleep mode.
- *
- * Should be : PSSR |= PSSR_OTGPH;
*/
+ PSSR |= PSSR_OTGPH;
return 0;
}
@@ -2387,6 +2383,9 @@ static struct platform_driver udc_driver = {
static int __init udc_init(void)
{
+ if (!cpu_is_pxa27x())
+ return -ENODEV;
+
printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
return platform_driver_probe(&udc_driver, pxa_udc_probe);
}
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index 1d1b7936ee1..97453db924f 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -484,4 +484,12 @@ static inline struct pxa_udc *to_gadget_udc(struct usb_gadget *gadget)
#define ep_warn(ep, fmt, arg...) \
dev_warn(ep->dev->dev, "%s:%s:" fmt, EPNAME(ep), __func__, ## arg)
+/*
+ * Cannot include pxa-regs.h, as register names are similar.
+ * So PSSR is redefined here. This should be removed once UDC registers will
+ * be gone from pxa-regs.h.
+ */
+#define PSSR __REG(0x40F00004) /* Power Manager Sleep Status */
+#define PSSR_OTGPH (1 << 6) /* OTG Peripheral Hold */
+
#endif /* __LINUX_USB_GADGET_PXA27X_H */
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 54cdd6f9403..fa019fa7333 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -14,7 +14,6 @@
* This software is distributed under the terms of the GNU General
* Public License ("GPL") as published by the Free Software Foundation,
* either version 2 of that License or (at your option) any later version.
- *
*/
#include <linux/kernel.h>
@@ -33,7 +32,7 @@
/* Defines */
#define GS_VERSION_STR "v2.2"
-#define GS_VERSION_NUM 0x0202
+#define GS_VERSION_NUM 0x2200
#define GS_LONG_NAME "Gadget Serial"
#define GS_SHORT_NAME "g_serial"
@@ -41,7 +40,11 @@
#define GS_MAJOR 127
#define GS_MINOR_START 0
-#define GS_NUM_PORTS 16
+/* REVISIT only one port is supported for now;
+ * see gs_{send,recv}_packet() ... no multiplexing,
+ * and no support for multiple ACM devices.
+ */
+#define GS_NUM_PORTS 1
#define GS_NUM_CONFIGS 1
#define GS_NO_CONFIG_ID 0
@@ -65,6 +68,9 @@
#define GS_DEFAULT_USE_ACM 0
+/* 9600-8-N-1 ... matches init_termios.c_cflag and defaults
+ * expected by "usbser.sys" on MS-Windows.
+ */
#define GS_DEFAULT_DTE_RATE 9600
#define GS_DEFAULT_DATA_BITS 8
#define GS_DEFAULT_PARITY USB_CDC_NO_PARITY
@@ -107,10 +113,6 @@ static int debug = 1;
#define GS_NOTIFY_MAXPACKET 8
-/* Structures */
-
-struct gs_dev;
-
/* circular buffer */
struct gs_buf {
unsigned int buf_size;
@@ -119,12 +121,6 @@ struct gs_buf {
char *buf_put;
};
-/* list of requests */
-struct gs_req_entry {
- struct list_head re_entry;
- struct usb_request *re_req;
-};
-
/* the port structure holds info for each port, one for each minor number */
struct gs_port {
struct gs_dev *port_dev; /* pointer to device struct */
@@ -164,26 +160,7 @@ struct gs_dev {
/* Functions */
-/* module */
-static int __init gs_module_init(void);
-static void __exit gs_module_exit(void);
-
-/* tty driver */
-static int gs_open(struct tty_struct *tty, struct file *file);
-static void gs_close(struct tty_struct *tty, struct file *file);
-static int gs_write(struct tty_struct *tty,
- const unsigned char *buf, int count);
-static int gs_put_char(struct tty_struct *tty, unsigned char ch);
-static void gs_flush_chars(struct tty_struct *tty);
-static int gs_write_room(struct tty_struct *tty);
-static int gs_chars_in_buffer(struct tty_struct *tty);
-static void gs_throttle(struct tty_struct * tty);
-static void gs_unthrottle(struct tty_struct * tty);
-static void gs_break(struct tty_struct *tty, int break_state);
-static int gs_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
-static void gs_set_termios(struct tty_struct *tty, struct ktermios *old);
-
+/* tty driver internals */
static int gs_send(struct gs_dev *dev);
static int gs_send_packet(struct gs_dev *dev, char *packet,
unsigned int size);
@@ -192,19 +169,7 @@ static int gs_recv_packet(struct gs_dev *dev, char *packet,
static void gs_read_complete(struct usb_ep *ep, struct usb_request *req);
static void gs_write_complete(struct usb_ep *ep, struct usb_request *req);
-/* gadget driver */
-static int gs_bind(struct usb_gadget *gadget);
-static void gs_unbind(struct usb_gadget *gadget);
-static int gs_setup(struct usb_gadget *gadget,
- const struct usb_ctrlrequest *ctrl);
-static int gs_setup_standard(struct usb_gadget *gadget,
- const struct usb_ctrlrequest *ctrl);
-static int gs_setup_class(struct usb_gadget *gadget,
- const struct usb_ctrlrequest *ctrl);
-static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req);
-static void gs_setup_complete_set_line_coding(struct usb_ep *ep,
- struct usb_request *req);
-static void gs_disconnect(struct usb_gadget *gadget);
+/* gadget driver internals */
static int gs_set_config(struct gs_dev *dev, unsigned config);
static void gs_reset_config(struct gs_dev *dev);
static int gs_build_config_buf(u8 *buf, struct usb_gadget *g,
@@ -214,10 +179,6 @@ static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len,
gfp_t kmalloc_flags);
static void gs_free_req(struct usb_ep *ep, struct usb_request *req);
-static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len,
- gfp_t kmalloc_flags);
-static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req);
-
static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags);
static void gs_free_ports(struct gs_dev *dev);
@@ -232,62 +193,15 @@ static unsigned int gs_buf_put(struct gs_buf *gb, const char *buf,
static unsigned int gs_buf_get(struct gs_buf *gb, char *buf,
unsigned int count);
-/* external functions */
-extern int net2280_set_fifo_mode(struct usb_gadget *gadget, int mode);
-
/* Globals */
static struct gs_dev *gs_device;
-static const char *EP_IN_NAME;
-static const char *EP_OUT_NAME;
-static const char *EP_NOTIFY_NAME;
-
static struct mutex gs_open_close_lock[GS_NUM_PORTS];
-static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
-static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
-
-static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
-
-static unsigned int use_acm = GS_DEFAULT_USE_ACM;
-
-
-/* tty driver struct */
-static const struct tty_operations gs_tty_ops = {
- .open = gs_open,
- .close = gs_close,
- .write = gs_write,
- .put_char = gs_put_char,
- .flush_chars = gs_flush_chars,
- .write_room = gs_write_room,
- .ioctl = gs_ioctl,
- .set_termios = gs_set_termios,
- .throttle = gs_throttle,
- .unthrottle = gs_unthrottle,
- .break_ctl = gs_break,
- .chars_in_buffer = gs_chars_in_buffer,
-};
-static struct tty_driver *gs_tty_driver;
-
-/* gadget driver struct */
-static struct usb_gadget_driver gs_gadget_driver = {
-#ifdef CONFIG_USB_GADGET_DUALSPEED
- .speed = USB_SPEED_HIGH,
-#else
- .speed = USB_SPEED_FULL,
-#endif /* CONFIG_USB_GADGET_DUALSPEED */
- .function = GS_LONG_NAME,
- .bind = gs_bind,
- .unbind = gs_unbind,
- .setup = gs_setup,
- .disconnect = gs_disconnect,
- .driver = {
- .name = GS_SHORT_NAME,
- },
-};
+/*-------------------------------------------------------------------------*/
/* USB descriptors */
@@ -304,7 +218,6 @@ static char manufacturer[50];
static struct usb_string gs_strings[] = {
{ GS_MANUFACTURER_STR_ID, manufacturer },
{ GS_PRODUCT_STR_ID, GS_LONG_NAME },
- { GS_SERIAL_STR_ID, "0" },
{ GS_BULK_CONFIG_STR_ID, "Gadget Serial Bulk" },
{ GS_ACM_CONFIG_STR_ID, "Gadget Serial CDC ACM" },
{ GS_CONTROL_STR_ID, "Gadget Serial Control" },
@@ -327,7 +240,6 @@ static struct usb_device_descriptor gs_device_desc = {
.idProduct = __constant_cpu_to_le16(GS_PRODUCT_ID),
.iManufacturer = GS_MANUFACTURER_STR_ID,
.iProduct = GS_PRODUCT_STR_ID,
- .iSerialNumber = GS_SERIAL_STR_ID,
.bNumConfigurations = GS_NUM_CONFIGS,
};
@@ -364,7 +276,7 @@ static const struct usb_interface_descriptor gs_bulk_interface_desc = {
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = GS_BULK_INTERFACE_ID,
.bNumEndpoints = 2,
- .bInterfaceClass = USB_CLASS_CDC_DATA,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 0,
.iInterface = GS_DATA_STR_ID,
@@ -521,6 +433,8 @@ static const struct usb_descriptor_header *gs_acm_highspeed_function[] = {
};
+/*-------------------------------------------------------------------------*/
+
/* Module */
MODULE_DESCRIPTION(GS_LONG_NAME);
MODULE_AUTHOR("Al Borchers");
@@ -531,84 +445,23 @@ module_param(debug, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging, 0=off, 1=on");
#endif
+static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
module_param(read_q_size, uint, S_IRUGO);
MODULE_PARM_DESC(read_q_size, "Read request queue size, default=32");
+static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
module_param(write_q_size, uint, S_IRUGO);
MODULE_PARM_DESC(write_q_size, "Write request queue size, default=32");
+static unsigned int write_buf_size = GS_DEFAULT_WRITE_BUF_SIZE;
module_param(write_buf_size, uint, S_IRUGO);
MODULE_PARM_DESC(write_buf_size, "Write buffer size, default=8192");
+static unsigned int use_acm = GS_DEFAULT_USE_ACM;
module_param(use_acm, uint, S_IRUGO);
MODULE_PARM_DESC(use_acm, "Use CDC ACM, 0=no, 1=yes, default=no");
-module_init(gs_module_init);
-module_exit(gs_module_exit);
-
-/*
-* gs_module_init
-*
-* Register as a USB gadget driver and a tty driver.
-*/
-static int __init gs_module_init(void)
-{
- int i;
- int retval;
-
- retval = usb_gadget_register_driver(&gs_gadget_driver);
- if (retval) {
- pr_err("gs_module_init: cannot register gadget driver, "
- "ret=%d\n", retval);
- return retval;
- }
-
- gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
- if (!gs_tty_driver)
- return -ENOMEM;
- gs_tty_driver->owner = THIS_MODULE;
- gs_tty_driver->driver_name = GS_SHORT_NAME;
- gs_tty_driver->name = "ttygs";
- gs_tty_driver->major = GS_MAJOR;
- gs_tty_driver->minor_start = GS_MINOR_START;
- gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- gs_tty_driver->init_termios = tty_std_termios;
- gs_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- tty_set_operations(gs_tty_driver, &gs_tty_ops);
-
- for (i=0; i < GS_NUM_PORTS; i++)
- mutex_init(&gs_open_close_lock[i]);
-
- retval = tty_register_driver(gs_tty_driver);
- if (retval) {
- usb_gadget_unregister_driver(&gs_gadget_driver);
- put_tty_driver(gs_tty_driver);
- pr_err("gs_module_init: cannot register tty driver, "
- "ret=%d\n", retval);
- return retval;
- }
-
- pr_info("gs_module_init: %s %s loaded\n",
- GS_LONG_NAME, GS_VERSION_STR);
- return 0;
-}
-
-/*
-* gs_module_exit
-*
-* Unregister as a tty driver and a USB gadget driver.
-*/
-static void __exit gs_module_exit(void)
-{
- tty_unregister_driver(gs_tty_driver);
- put_tty_driver(gs_tty_driver);
- usb_gadget_unregister_driver(&gs_gadget_driver);
-
- pr_info("gs_module_exit: %s %s unloaded\n",
- GS_LONG_NAME, GS_VERSION_STR);
-}
+/*-------------------------------------------------------------------------*/
/* TTY Driver */
@@ -753,15 +606,15 @@ exit_unlock_dev:
* gs_close
*/
-#define GS_WRITE_FINISHED_EVENT_SAFELY(p) \
-({ \
- int cond; \
- \
- spin_lock_irq(&(p)->port_lock); \
- cond = !(p)->port_dev || !gs_buf_data_avail((p)->port_write_buf); \
- spin_unlock_irq(&(p)->port_lock); \
- cond; \
-})
+static int gs_write_finished_event_safely(struct gs_port *p)
+{
+ int cond;
+
+ spin_lock_irq(&(p)->port_lock);
+ cond = !(p)->port_dev || !gs_buf_data_avail((p)->port_write_buf);
+ spin_unlock_irq(&(p)->port_lock);
+ return cond;
+}
static void gs_close(struct tty_struct *tty, struct file *file)
{
@@ -807,7 +660,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
if (gs_buf_data_avail(port->port_write_buf) > 0) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->port_write_wait,
- GS_WRITE_FINISHED_EVENT_SAFELY(port),
+ gs_write_finished_event_safely(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
}
@@ -1065,6 +918,23 @@ static void gs_set_termios(struct tty_struct *tty, struct ktermios *old)
{
}
+static const struct tty_operations gs_tty_ops = {
+ .open = gs_open,
+ .close = gs_close,
+ .write = gs_write,
+ .put_char = gs_put_char,
+ .flush_chars = gs_flush_chars,
+ .write_room = gs_write_room,
+ .ioctl = gs_ioctl,
+ .set_termios = gs_set_termios,
+ .throttle = gs_throttle,
+ .unthrottle = gs_unthrottle,
+ .break_ctl = gs_break,
+ .chars_in_buffer = gs_chars_in_buffer,
+};
+
+/*-------------------------------------------------------------------------*/
+
/*
* gs_send
*
@@ -1080,7 +950,6 @@ static int gs_send(struct gs_dev *dev)
unsigned long flags;
struct usb_ep *ep;
struct usb_request *req;
- struct gs_req_entry *req_entry;
if (dev == NULL) {
pr_err("gs_send: NULL device pointer\n");
@@ -1093,10 +962,8 @@ static int gs_send(struct gs_dev *dev)
while(!list_empty(&dev->dev_req_list)) {
- req_entry = list_entry(dev->dev_req_list.next,
- struct gs_req_entry, re_entry);
-
- req = req_entry->re_req;
+ req = list_entry(dev->dev_req_list.next,
+ struct usb_request, list);
len = gs_send_packet(dev, req->buf, ep->maxpacket);
@@ -1106,7 +973,7 @@ static int gs_send(struct gs_dev *dev)
*((unsigned char *)req->buf),
*((unsigned char *)req->buf+1),
*((unsigned char *)req->buf+2));
- list_del(&req_entry->re_entry);
+ list_del(&req->list);
req->length = len;
spin_unlock_irqrestore(&dev->dev_lock, flags);
if ((ret=usb_ep_queue(ep, req, GFP_ATOMIC))) {
@@ -1289,7 +1156,6 @@ requeue:
static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gs_dev *dev = ep->driver_data;
- struct gs_req_entry *gs_req = req->context;
if (dev == NULL) {
pr_err("gs_write_complete: NULL device pointer\n");
@@ -1300,13 +1166,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
case 0:
/* normal completion */
requeue:
- if (gs_req == NULL) {
- pr_err("gs_write_complete: NULL request pointer\n");
- return;
- }
-
spin_lock(&dev->dev_lock);
- list_add(&gs_req->re_entry, &dev->dev_req_list);
+ list_add(&req->list, &dev->dev_req_list);
spin_unlock(&dev->dev_lock);
gs_send(dev);
@@ -1328,9 +1189,39 @@ requeue:
}
}
+/*-------------------------------------------------------------------------*/
+
/* Gadget Driver */
/*
+ * gs_unbind
+ *
+ * Called on module unload. Frees the control request and device
+ * structure.
+ */
+static void /* __init_or_exit */ gs_unbind(struct usb_gadget *gadget)
+{
+ struct gs_dev *dev = get_gadget_data(gadget);
+
+ gs_device = NULL;
+
+ /* read/write requests already freed, only control request remains */
+ if (dev != NULL) {
+ if (dev->dev_ctrl_req != NULL) {
+ gs_free_req(gadget->ep0, dev->dev_ctrl_req);
+ dev->dev_ctrl_req = NULL;
+ }
+ gs_reset_config(dev);
+ gs_free_ports(dev);
+ kfree(dev);
+ set_gadget_data(gadget, NULL);
+ }
+
+ pr_info("gs_unbind: %s %s unbound\n", GS_LONG_NAME,
+ GS_VERSION_STR);
+}
+
+/*
* gs_bind
*
* Called on module load. Allocates and initializes the device
@@ -1362,19 +1253,23 @@ static int __init gs_bind(struct usb_gadget *gadget)
__constant_cpu_to_le16(GS_VERSION_NUM|0x0099);
}
+ dev = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
usb_ep_autoconfig_reset(gadget);
ep = usb_ep_autoconfig(gadget, &gs_fullspeed_in_desc);
if (!ep)
goto autoconf_fail;
- EP_IN_NAME = ep->name;
- ep->driver_data = ep; /* claim the endpoint */
+ dev->dev_in_ep = ep;
+ ep->driver_data = dev; /* claim the endpoint */
ep = usb_ep_autoconfig(gadget, &gs_fullspeed_out_desc);
if (!ep)
goto autoconf_fail;
- EP_OUT_NAME = ep->name;
- ep->driver_data = ep; /* claim the endpoint */
+ dev->dev_out_ep = ep;
+ ep->driver_data = dev; /* claim the endpoint */
if (use_acm) {
ep = usb_ep_autoconfig(gadget, &gs_fullspeed_notify_desc);
@@ -1384,8 +1279,8 @@ static int __init gs_bind(struct usb_gadget *gadget)
}
gs_device_desc.idProduct = __constant_cpu_to_le16(
GS_CDC_PRODUCT_ID),
- EP_NOTIFY_NAME = ep->name;
- ep->driver_data = ep; /* claim the endpoint */
+ dev->dev_notify_ep = ep;
+ ep->driver_data = dev; /* claim the endpoint */
}
gs_device_desc.bDeviceClass = use_acm
@@ -1415,9 +1310,7 @@ static int __init gs_bind(struct usb_gadget *gadget)
gs_acm_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- gs_device = dev = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
- if (dev == NULL)
- return -ENOMEM;
+ gs_device = dev;
snprintf(manufacturer, sizeof(manufacturer), "%s %s with %s",
init_utsname()->sysname, init_utsname()->release,
@@ -1441,8 +1334,6 @@ static int __init gs_bind(struct usb_gadget *gadget)
gs_unbind(gadget);
return -ENOMEM;
}
- dev->dev_ctrl_req->complete = gs_setup_complete;
-
gadget->ep0->driver_data = dev;
pr_info("gs_bind: %s %s bound\n",
@@ -1451,99 +1342,11 @@ static int __init gs_bind(struct usb_gadget *gadget)
return 0;
autoconf_fail:
+ kfree(dev);
pr_err("gs_bind: cannot autoconfigure on %s\n", gadget->name);
return -ENODEV;
}
-/*
- * gs_unbind
- *
- * Called on module unload. Frees the control request and device
- * structure.
- */
-static void /* __init_or_exit */ gs_unbind(struct usb_gadget *gadget)
-{
- struct gs_dev *dev = get_gadget_data(gadget);
-
- gs_device = NULL;
-
- /* read/write requests already freed, only control request remains */
- if (dev != NULL) {
- if (dev->dev_ctrl_req != NULL) {
- gs_free_req(gadget->ep0, dev->dev_ctrl_req);
- dev->dev_ctrl_req = NULL;
- }
- gs_free_ports(dev);
- if (dev->dev_notify_ep)
- usb_ep_disable(dev->dev_notify_ep);
- if (dev->dev_in_ep)
- usb_ep_disable(dev->dev_in_ep);
- if (dev->dev_out_ep)
- usb_ep_disable(dev->dev_out_ep);
- kfree(dev);
- set_gadget_data(gadget, NULL);
- }
-
- pr_info("gs_unbind: %s %s unbound\n", GS_LONG_NAME,
- GS_VERSION_STR);
-}
-
-/*
- * gs_setup
- *
- * Implements all the control endpoint functionality that's not
- * handled in hardware or the hardware driver.
- *
- * Returns the size of the data sent to the host, or a negative
- * error number.
- */
-static int gs_setup(struct usb_gadget *gadget,
- const struct usb_ctrlrequest *ctrl)
-{
- int ret = -EOPNOTSUPP;
- struct gs_dev *dev = get_gadget_data(gadget);
- struct usb_request *req = dev->dev_ctrl_req;
- u16 wIndex = le16_to_cpu(ctrl->wIndex);
- u16 wValue = le16_to_cpu(ctrl->wValue);
- u16 wLength = le16_to_cpu(ctrl->wLength);
-
- req->complete = gs_setup_complete;
-
- switch (ctrl->bRequestType & USB_TYPE_MASK) {
- case USB_TYPE_STANDARD:
- ret = gs_setup_standard(gadget,ctrl);
- break;
-
- case USB_TYPE_CLASS:
- ret = gs_setup_class(gadget,ctrl);
- break;
-
- default:
- pr_err("gs_setup: unknown request, type=%02x, request=%02x, "
- "value=%04x, index=%04x, length=%d\n",
- ctrl->bRequestType, ctrl->bRequest,
- wValue, wIndex, wLength);
- break;
- }
-
- /* respond with data transfer before status phase? */
- if (ret >= 0) {
- req->length = ret;
- req->zero = ret < wLength
- && (ret % gadget->ep0->maxpacket) == 0;
- ret = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
- if (ret < 0) {
- pr_err("gs_setup: cannot queue response, ret=%d\n",
- ret);
- req->status = 0;
- gs_setup_complete(gadget->ep0, req);
- }
- }
-
- /* device either stalls (ret < 0) or reports success */
- return ret;
-}
-
static int gs_setup_standard(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl)
{
@@ -1673,6 +1476,42 @@ set_interface_done:
return ret;
}
+static void gs_setup_complete_set_line_coding(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct gs_dev *dev = ep->driver_data;
+ struct gs_port *port = dev->dev_port[0]; /* ACM only has one port */
+
+ switch (req->status) {
+ case 0:
+ /* normal completion */
+ if (req->actual != sizeof(port->port_line_coding))
+ usb_ep_set_halt(ep);
+ else if (port) {
+ struct usb_cdc_line_coding *value = req->buf;
+
+ /* REVISIT: we currently just remember this data.
+ * If we change that, (a) validate it first, then
+ * (b) update whatever hardware needs updating.
+ */
+ spin_lock(&port->port_lock);
+ port->port_line_coding = *value;
+ spin_unlock(&port->port_lock);
+ }
+ break;
+
+ case -ESHUTDOWN:
+ /* disconnect */
+ gs_free_req(ep, req);
+ break;
+
+ default:
+ /* unexpected */
+ break;
+ }
+ return;
+}
+
static int gs_setup_class(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl)
{
@@ -1734,52 +1573,72 @@ static int gs_setup_class(struct usb_gadget *gadget,
return ret;
}
-static void gs_setup_complete_set_line_coding(struct usb_ep *ep,
- struct usb_request *req)
+/*
+ * gs_setup_complete
+ */
+static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req)
{
- struct gs_dev *dev = ep->driver_data;
- struct gs_port *port = dev->dev_port[0]; /* ACM only has one port */
+ if (req->status || req->actual != req->length) {
+ pr_err("gs_setup_complete: status error, status=%d, "
+ "actual=%d, length=%d\n",
+ req->status, req->actual, req->length);
+ }
+}
- switch (req->status) {
- case 0:
- /* normal completion */
- if (req->actual != sizeof(port->port_line_coding))
- usb_ep_set_halt(ep);
- else if (port) {
- struct usb_cdc_line_coding *value = req->buf;
+/*
+ * gs_setup
+ *
+ * Implements all the control endpoint functionality that's not
+ * handled in hardware or the hardware driver.
+ *
+ * Returns the size of the data sent to the host, or a negative
+ * error number.
+ */
+static int gs_setup(struct usb_gadget *gadget,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int ret = -EOPNOTSUPP;
+ struct gs_dev *dev = get_gadget_data(gadget);
+ struct usb_request *req = dev->dev_ctrl_req;
+ u16 wIndex = le16_to_cpu(ctrl->wIndex);
+ u16 wValue = le16_to_cpu(ctrl->wValue);
+ u16 wLength = le16_to_cpu(ctrl->wLength);
- /* REVISIT: we currently just remember this data.
- * If we change that, (a) validate it first, then
- * (b) update whatever hardware needs updating.
- */
- spin_lock(&port->port_lock);
- port->port_line_coding = *value;
- spin_unlock(&port->port_lock);
- }
+ req->complete = gs_setup_complete;
+
+ switch (ctrl->bRequestType & USB_TYPE_MASK) {
+ case USB_TYPE_STANDARD:
+ ret = gs_setup_standard(gadget, ctrl);
break;
- case -ESHUTDOWN:
- /* disconnect */
- gs_free_req(ep, req);
+ case USB_TYPE_CLASS:
+ ret = gs_setup_class(gadget, ctrl);
break;
default:
- /* unexpected */
+ pr_err("gs_setup: unknown request, type=%02x, request=%02x, "
+ "value=%04x, index=%04x, length=%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ wValue, wIndex, wLength);
break;
}
- return;
-}
-/*
- * gs_setup_complete
- */
-static void gs_setup_complete(struct usb_ep *ep, struct usb_request *req)
-{
- if (req->status || req->actual != req->length) {
- pr_err("gs_setup_complete: status error, status=%d, "
- "actual=%d, length=%d\n",
- req->status, req->actual, req->length);
+ /* respond with data transfer before status phase? */
+ if (ret >= 0) {
+ req->length = ret;
+ req->zero = ret < wLength
+ && (ret % gadget->ep0->maxpacket) == 0;
+ ret = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0) {
+ pr_err("gs_setup: cannot queue response, ret=%d\n",
+ ret);
+ req->status = 0;
+ gs_setup_complete(gadget->ep0, req);
+ }
}
+
+ /* device either stalls (ret < 0) or reports success */
+ return ret;
}
/*
@@ -1811,6 +1670,23 @@ static void gs_disconnect(struct usb_gadget *gadget)
pr_info("gs_disconnect: %s disconnected\n", GS_LONG_NAME);
}
+static struct usb_gadget_driver gs_gadget_driver = {
+#ifdef CONFIG_USB_GADGET_DUALSPEED
+ .speed = USB_SPEED_HIGH,
+#else
+ .speed = USB_SPEED_FULL,
+#endif /* CONFIG_USB_GADGET_DUALSPEED */
+ .function = GS_LONG_NAME,
+ .bind = gs_bind,
+ .unbind = gs_unbind,
+ .setup = gs_setup,
+ .disconnect = gs_disconnect,
+ .driver = {
+ .name = GS_SHORT_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
/*
* gs_set_config
*
@@ -1826,9 +1702,8 @@ static int gs_set_config(struct gs_dev *dev, unsigned config)
int ret = 0;
struct usb_gadget *gadget = dev->dev_gadget;
struct usb_ep *ep;
- struct usb_endpoint_descriptor *ep_desc;
+ struct usb_endpoint_descriptor *out, *in, *notify;
struct usb_request *req;
- struct gs_req_entry *req_entry;
if (dev == NULL) {
pr_err("gs_set_config: NULL device pointer\n");
@@ -1846,86 +1721,62 @@ static int gs_set_config(struct gs_dev *dev, unsigned config)
case GS_BULK_CONFIG_ID:
if (use_acm)
return -EINVAL;
- /* device specific optimizations */
- if (gadget_is_net2280(gadget))
- net2280_set_fifo_mode(gadget, 1);
break;
case GS_ACM_CONFIG_ID:
if (!use_acm)
return -EINVAL;
- /* device specific optimizations */
- if (gadget_is_net2280(gadget))
- net2280_set_fifo_mode(gadget, 1);
break;
default:
return -EINVAL;
}
- dev->dev_config = config;
-
- gadget_for_each_ep(ep, gadget) {
-
- if (EP_NOTIFY_NAME
- && strcmp(ep->name, EP_NOTIFY_NAME) == 0) {
- ep_desc = choose_ep_desc(gadget,
+ in = choose_ep_desc(gadget,
+ &gs_highspeed_in_desc,
+ &gs_fullspeed_in_desc);
+ out = choose_ep_desc(gadget,
+ &gs_highspeed_out_desc,
+ &gs_fullspeed_out_desc);
+ notify = dev->dev_notify_ep
+ ? choose_ep_desc(gadget,
&gs_highspeed_notify_desc,
- &gs_fullspeed_notify_desc);
- ret = usb_ep_enable(ep,ep_desc);
- if (ret == 0) {
- ep->driver_data = dev;
- dev->dev_notify_ep = ep;
- dev->dev_notify_ep_desc = ep_desc;
- } else {
- pr_err("gs_set_config: cannot enable NOTIFY "
- "endpoint %s, ret=%d\n",
- ep->name, ret);
- goto exit_reset_config;
- }
- }
-
- else if (strcmp(ep->name, EP_IN_NAME) == 0) {
- ep_desc = choose_ep_desc(gadget,
- &gs_highspeed_in_desc,
- &gs_fullspeed_in_desc);
- ret = usb_ep_enable(ep,ep_desc);
- if (ret == 0) {
- ep->driver_data = dev;
- dev->dev_in_ep = ep;
- dev->dev_in_ep_desc = ep_desc;
- } else {
- pr_err("gs_set_config: cannot enable IN "
- "endpoint %s, ret=%d\n",
- ep->name, ret);
- goto exit_reset_config;
- }
- }
+ &gs_fullspeed_notify_desc)
+ : NULL;
- else if (strcmp(ep->name, EP_OUT_NAME) == 0) {
- ep_desc = choose_ep_desc(gadget,
- &gs_highspeed_out_desc,
- &gs_fullspeed_out_desc);
- ret = usb_ep_enable(ep,ep_desc);
- if (ret == 0) {
- ep->driver_data = dev;
- dev->dev_out_ep = ep;
- dev->dev_out_ep_desc = ep_desc;
- } else {
- pr_err("gs_set_config: cannot enable OUT "
- "endpoint %s, ret=%d\n",
- ep->name, ret);
- goto exit_reset_config;
- }
- }
+ ret = usb_ep_enable(dev->dev_in_ep, in);
+ if (ret == 0) {
+ dev->dev_in_ep_desc = in;
+ } else {
+ pr_debug("%s: cannot enable %s %s, ret=%d\n",
+ __func__, "IN", dev->dev_in_ep->name, ret);
+ return ret;
+ }
+ ret = usb_ep_enable(dev->dev_out_ep, out);
+ if (ret == 0) {
+ dev->dev_out_ep_desc = out;
+ } else {
+ pr_debug("%s: cannot enable %s %s, ret=%d\n",
+ __func__, "OUT", dev->dev_out_ep->name, ret);
+fail0:
+ usb_ep_disable(dev->dev_in_ep);
+ return ret;
}
- if (dev->dev_in_ep == NULL || dev->dev_out_ep == NULL
- || (config != GS_BULK_CONFIG_ID && dev->dev_notify_ep == NULL)) {
- pr_err("gs_set_config: cannot find endpoints\n");
- ret = -ENODEV;
- goto exit_reset_config;
+ if (notify) {
+ ret = usb_ep_enable(dev->dev_notify_ep, notify);
+ if (ret == 0) {
+ dev->dev_notify_ep_desc = notify;
+ } else {
+ pr_debug("%s: cannot enable %s %s, ret=%d\n",
+ __func__, "NOTIFY",
+ dev->dev_notify_ep->name, ret);
+ usb_ep_disable(dev->dev_out_ep);
+ goto fail0;
+ }
}
+ dev->dev_config = config;
+
/* allocate and queue read requests */
ep = dev->dev_out_ep;
for (i=0; i<read_q_size && ret == 0; i++) {
@@ -1946,9 +1797,10 @@ static int gs_set_config(struct gs_dev *dev, unsigned config)
/* allocate write requests, and put on free list */
ep = dev->dev_in_ep;
for (i=0; i<write_q_size; i++) {
- if ((req_entry=gs_alloc_req_entry(ep, ep->maxpacket, GFP_ATOMIC))) {
- req_entry->re_req->complete = gs_write_complete;
- list_add(&req_entry->re_entry, &dev->dev_req_list);
+ req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+ if (req) {
+ req->complete = gs_write_complete;
+ list_add(&req->list, &dev->dev_req_list);
} else {
pr_err("gs_set_config: cannot allocate "
"write requests\n");
@@ -1986,7 +1838,7 @@ exit_reset_config:
*/
static void gs_reset_config(struct gs_dev *dev)
{
- struct gs_req_entry *req_entry;
+ struct usb_request *req;
if (dev == NULL) {
pr_err("gs_reset_config: NULL device pointer\n");
@@ -2000,26 +1852,18 @@ static void gs_reset_config(struct gs_dev *dev)
/* free write requests on the free list */
while(!list_empty(&dev->dev_req_list)) {
- req_entry = list_entry(dev->dev_req_list.next,
- struct gs_req_entry, re_entry);
- list_del(&req_entry->re_entry);
- gs_free_req_entry(dev->dev_in_ep, req_entry);
+ req = list_entry(dev->dev_req_list.next,
+ struct usb_request, list);
+ list_del(&req->list);
+ gs_free_req(dev->dev_in_ep, req);
}
/* disable endpoints, forcing completion of pending i/o; */
/* completion handlers free their requests in this case */
- if (dev->dev_notify_ep) {
+ if (dev->dev_notify_ep)
usb_ep_disable(dev->dev_notify_ep);
- dev->dev_notify_ep = NULL;
- }
- if (dev->dev_in_ep) {
- usb_ep_disable(dev->dev_in_ep);
- dev->dev_in_ep = NULL;
- }
- if (dev->dev_out_ep) {
- usb_ep_disable(dev->dev_out_ep);
- dev->dev_out_ep = NULL;
- }
+ usb_ep_disable(dev->dev_in_ep);
+ usb_ep_disable(dev->dev_out_ep);
}
/*
@@ -2113,46 +1957,6 @@ static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
}
/*
- * gs_alloc_req_entry
- *
- * Allocates a request and its buffer, using the given
- * endpoint, buffer len, and kmalloc flags.
- */
-static struct gs_req_entry *
-gs_alloc_req_entry(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
-{
- struct gs_req_entry *req;
-
- req = kmalloc(sizeof(struct gs_req_entry), kmalloc_flags);
- if (req == NULL)
- return NULL;
-
- req->re_req = gs_alloc_req(ep, len, kmalloc_flags);
- if (req->re_req == NULL) {
- kfree(req);
- return NULL;
- }
-
- req->re_req->context = req;
-
- return req;
-}
-
-/*
- * gs_free_req_entry
- *
- * Frees a request and its buffer.
- */
-static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req)
-{
- if (ep != NULL && req != NULL) {
- if (req->re_req != NULL)
- gs_free_req(ep, req->re_req);
- kfree(req);
- }
-}
-
-/*
* gs_alloc_ports
*
* Allocate all ports and set the gs_dev struct to point to them.
@@ -2233,6 +2037,8 @@ static void gs_free_ports(struct gs_dev *dev)
}
}
+/*-------------------------------------------------------------------------*/
+
/* Circular Buffer */
/*
@@ -2393,3 +2199,77 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned int count)
return count;
}
+
+/*-------------------------------------------------------------------------*/
+
+static struct tty_driver *gs_tty_driver;
+
+/*
+ * gs_module_init
+ *
+ * Register as a USB gadget driver and a tty driver.
+ */
+static int __init gs_module_init(void)
+{
+ int i;
+ int retval;
+
+ retval = usb_gadget_register_driver(&gs_gadget_driver);
+ if (retval) {
+ pr_err("gs_module_init: cannot register gadget driver, "
+ "ret=%d\n", retval);
+ return retval;
+ }
+
+ gs_tty_driver = alloc_tty_driver(GS_NUM_PORTS);
+ if (!gs_tty_driver)
+ return -ENOMEM;
+ gs_tty_driver->owner = THIS_MODULE;
+ gs_tty_driver->driver_name = GS_SHORT_NAME;
+ gs_tty_driver->name = "ttygs";
+ gs_tty_driver->major = GS_MAJOR;
+ gs_tty_driver->minor_start = GS_MINOR_START;
+ gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ gs_tty_driver->init_termios = tty_std_termios;
+ /* must match GS_DEFAULT_DTE_RATE and friends */
+ gs_tty_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ gs_tty_driver->init_termios.c_ispeed = GS_DEFAULT_DTE_RATE;
+ gs_tty_driver->init_termios.c_ospeed = GS_DEFAULT_DTE_RATE;
+ tty_set_operations(gs_tty_driver, &gs_tty_ops);
+
+ for (i = 0; i < GS_NUM_PORTS; i++)
+ mutex_init(&gs_open_close_lock[i]);
+
+ retval = tty_register_driver(gs_tty_driver);
+ if (retval) {
+ usb_gadget_unregister_driver(&gs_gadget_driver);
+ put_tty_driver(gs_tty_driver);
+ pr_err("gs_module_init: cannot register tty driver, "
+ "ret=%d\n", retval);
+ return retval;
+ }
+
+ pr_info("gs_module_init: %s %s loaded\n",
+ GS_LONG_NAME, GS_VERSION_STR);
+ return 0;
+}
+module_init(gs_module_init);
+
+/*
+ * gs_module_exit
+ *
+ * Unregister as a tty driver and a USB gadget driver.
+ */
+static void __exit gs_module_exit(void)
+{
+ tty_unregister_driver(gs_tty_driver);
+ put_tty_driver(gs_tty_driver);
+ usb_gadget_unregister_driver(&gs_gadget_driver);
+
+ pr_info("gs_module_exit: %s %s unloaded\n",
+ GS_LONG_NAME, GS_VERSION_STR);
+}
+module_exit(gs_module_exit);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 33b467a8352..1ef6df395e0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -129,7 +129,7 @@ config USB_ISP1760_PCI
config USB_ISP1760_OF
bool "Support for the OF platform bus"
- depends on USB_ISP1760_HCD && OF
+ depends on USB_ISP1760_HCD && PPC_OF
---help---
Enables support for the device present on the PowerPC
OpenFirmware platform bus.
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 4ba96c1e060..c9cec873826 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -988,7 +988,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
* This did not trigger for a long time now.
*/
printk(KERN_ERR "Reloading ptd %p/%p... qh %p readed: "
- "%d of %d done: %08x cur: %08x\n", qtd,
+ "%d of %zu done: %08x cur: %08x\n", qtd,
urb, qh, PTD_XFERRED_LENGTH(dw3),
qtd->length, done_map,
(1 << queue_entry));
@@ -1088,7 +1088,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
} else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
/* short BULK received */
- printk(KERN_ERR "short bulk, %d instead %d\n", length,
+ printk(KERN_ERR "short bulk, %d instead %zu\n", length,
qtd->length);
if (urb->transfer_flags & URB_SHORT_NOT_OK) {
urb->status = -EREMOTEIO;
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 73fb2a38f1e..440bf94f0d4 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -256,7 +256,7 @@ static struct pci_driver isp1761_pci_driver = {
static int __init isp1760_init(void)
{
- int ret;
+ int ret = -ENODEV;
init_kmem_once();
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 77204f001b9..e899a77dfb8 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -90,7 +90,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res, *mem;
int retval, irq;
- struct usb_hcd *hcd = 0;
+ struct usb_hcd *hcd = NULL;
irq = retval = platform_get_irq(pdev, 0);
if (retval < 0)
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 7aafd53fbca..189a9db0350 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -63,9 +63,6 @@
#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
-#define USB_VENDOR_ID_MICROCHIP 0x04d8
-#define USB_DEVICE_ID_PICDEM 0x000c
-
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define USB_LD_MINOR_BASE 0
#else
@@ -92,7 +89,6 @@ static struct usb_device_id ld_usb_table [] = {
{ USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
{ USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
{ USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
- { USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICDEM) },
{ USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 742be3c3594..054dedd2812 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -856,6 +856,11 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
struct urb *u;
struct usb_ctrlrequest req;
struct subcase *reqp;
+
+ /* sign of this variable means:
+ * -: tested code must return this (negative) error code
+ * +: tested code may return this (negative too) error code
+ */
int expected = 0;
/* requests here are mostly expected to succeed on any
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 2cffec85ee7..9ba64ccc135 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -447,6 +447,15 @@ config USB_SERIAL_MOS7840
To compile this driver as a module, choose M here: the
module will be called mos7840. If unsure, choose N.
+config USB_SERIAL_MOTOROLA
+ tristate "USB Motorola Phone modem driver"
+ ---help---
+ Say Y here if you want to use a Motorola phone with a USB
+ connector as a modem link.
+
+ To compile this driver as a module, choose M here: the
+ module will be called moto_modem. If unsure, choose N.
+
config USB_SERIAL_NAVMAN
tristate "USB Navman GPS device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 756859510d8..17a762ab676 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_USB_SERIAL_KOBIL_SCT) += kobil_sct.o
obj-$(CONFIG_USB_SERIAL_MCT_U232) += mct_u232.o
obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o
obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
+obj-$(CONFIG_USB_SERIAL_MOTOROLA) += moto_modem.o
obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
obj-$(CONFIG_USB_SERIAL_OPTION) += option.o
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index dc0ea08ed23..f5b57b196c5 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -73,6 +73,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 8a217648b25..a01e987c7d3 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -643,7 +643,7 @@ static void read_buf_callback(struct urb *urb)
static int iuu_bulk_write(struct usb_serial_port *port)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
- unsigned int flags;
+ unsigned long flags;
int result;
int i;
char *buf_ptr = port->write_urb->transfer_buffer;
@@ -694,7 +694,7 @@ static void iuu_uart_read_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct iuu_private *priv = usb_get_serial_port_data(port);
- unsigned int flags;
+ unsigned long flags;
int status;
int error = 0;
int len = 0;
@@ -759,7 +759,7 @@ static int iuu_uart_write(struct usb_serial_port *port, const u8 *buf,
int count)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
- unsigned int flags;
+ unsigned long flags;
dbg("%s - enter", __func__);
if (count > 256)
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
new file mode 100644
index 00000000000..2e8e05462ef
--- /dev/null
+++ b/drivers/usb/serial/moto_modem.c
@@ -0,0 +1,70 @@
+/*
+ * Motorola USB Phone driver
+ *
+ * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * {sigh}
+ * Mororola should be using the CDC ACM USB spec, but instead
+ * they try to just "do their own thing"... This driver should handle a
+ * few phones in which a basic "dumb serial connection" is needed to be
+ * able to get a connection through to them.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+static struct usb_device_id id_table [] = {
+ { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
+ { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
+ { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver moto_driver = {
+ .name = "moto-modem",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver moto_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "moto-modem",
+ },
+ .id_table = id_table,
+ .num_ports = 1,
+};
+
+static int __init moto_init(void)
+{
+ int retval;
+
+ retval = usb_serial_register(&moto_device);
+ if (retval)
+ return retval;
+ retval = usb_register(&moto_driver);
+ if (retval)
+ usb_serial_deregister(&moto_device);
+ return retval;
+}
+
+static void __exit moto_exit(void)
+{
+ usb_deregister(&moto_driver);
+ usb_serial_deregister(&moto_device);
+}
+
+module_init(moto_init);
+module_exit(moto_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e4be2d442b1..e7e016e6033 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -154,8 +154,6 @@ static int option_send_setup(struct usb_serial_port *port);
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
-#define NOVATELWIRELESS_PRODUCT_U727 0x5010
-
/* FUTURE NOVATEL PRODUCTS */
#define NOVATELWIRELESS_PRODUCT_EVDO_1 0x6000
#define NOVATELWIRELESS_PRODUCT_HSPA_1 0x7000
@@ -184,6 +182,9 @@ static int option_send_setup(struct usb_serial_port *port);
#define AXESSTEL_VENDOR_ID 0x1726
#define AXESSTEL_PRODUCT_MV110H 0x1000
+#define ONDA_VENDOR_ID 0x19d2
+#define ONDA_PRODUCT_ET502HS 0x0002
+
#define BANDRICH_VENDOR_ID 0x1A8D
#define BANDRICH_PRODUCT_C100_1 0x1002
#define BANDRICH_PRODUCT_C100_2 0x1003
@@ -269,7 +270,6 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel U727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */
@@ -293,14 +293,17 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(DELL_VENDOR_ID, 0x8133) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8136) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
{ USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */
+ { USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
+ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
+ { USB_DEVICE(0x19d2, 0x0001) }, /* Telstra NextG CDMA */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index a0ed889230a..1b09578cbb1 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -401,6 +401,14 @@ UNUSUAL_DEV( 0x04a5, 0x3010, 0x0100, 0x0100,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
+UNUSUAL_DEV( 0x04b4, 0x6830, 0x0000, 0x9999,
+ "Cypress",
+ "Cypress AT2LP",
+ US_SC_CYP_ATACB, US_PR_BULK, NULL,
+ 0),
+#endif
+
/* Reported by Simon Levitt <simon@whattf.com>
* This entry needs Sub and Proto fields */
UNUSUAL_DEV( 0x04b8, 0x0601, 0x0100, 0x0100,
@@ -539,17 +547,6 @@ UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
"CD-RW Device",
US_SC_8020, US_PR_CB, NULL, 0),
-/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
- * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
- * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
- */
-
-UNUSUAL_DEV( 0x04fc, 0x80c2, 0x0100, 0x0100,
- "Kobian Mercury",
- "Binocam DCB-132",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_BULK32),
-
#ifdef CONFIG_USB_STORAGE_USBAT
UNUSUAL_DEV( 0x04e6, 0x1010, 0x0000, 0x9999,
"Shuttle/SCM",
@@ -565,6 +562,16 @@ UNUSUAL_DEV( 0x04e8, 0x507c, 0x0220, 0x0220,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64),
+/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+ * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
+ * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
+ */
+UNUSUAL_DEV( 0x04fc, 0x80c2, 0x0100, 0x0100,
+ "Kobian Mercury",
+ "Binocam DCB-132",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BULK32),
+
/* Reported by Bob Sass <rls@vectordb.com> -- only rev 1.33 tested */
UNUSUAL_DEV( 0x050d, 0x0115, 0x0133, 0x0133,
"Belkin",
@@ -1304,6 +1311,16 @@ UNUSUAL_DEV( 0x0ace, 0x20ff, 0x0101, 0x0101,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_DEVICE ),
+/* Reported by F. Aben <f.aben@option.com>
+ * This device (wrongly) has a vendor-specific device descriptor.
+ * The entry is needed so usb-storage can bind to it's mass-storage
+ * interface as an interface driver */
+UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
+ "Option",
+ "GI 0401 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
#ifdef CONFIG_USB_STORAGE_ISD200
UNUSUAL_DEV( 0x0bf6, 0xa001, 0x0100, 0x0110,
"ATI",
@@ -1361,13 +1378,6 @@ UNUSUAL_DEV( 0x0d96, 0x410a, 0x0001, 0xffff,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY),
-/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
-UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
- "INTOVA",
- "Pixtreme",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
/*
* Entry for Jenoptik JD 5200z3
*
@@ -1684,6 +1694,16 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Mauro Andreolini <andreoli@weblab.ing.unimo.it>
+ * This entry is needed to bypass the ZeroCD mechanism
+ * and to properly load as a modem device.
+ */
+UNUSUAL_DEV( 0x19d2, 0x2000, 0x0000, 0x0000,
+ "Onda ET502HS",
+ "USB MMC Storage",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_DEVICE),
+
/* patch submitted by Davide Perini <perini.davide@dpsoftware.org>
* and Renato Perini <rperini@email.it>
*/
@@ -1721,6 +1741,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_GO_SLOW ),
+/* Reported by Rohan Hart <rohan.hart17@gmail.com> */
+UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
+ "INTOVA",
+ "Pixtreme",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/*
* David Härdeman <david@2gen.com>
* The key makes the SCSI stack print confusing (but harmless) messages
@@ -1745,14 +1772,6 @@ UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_CAPACITY_HEURISTICS),
-#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
-UNUSUAL_DEV( 0x04b4, 0x6830, 0x0000, 0x9999,
- "Cypress",
- "Cypress AT2LP",
- US_SC_CYP_ATACB, US_PR_BULK, NULL,
- 0),
-#endif
-
/* Control/Bulk transport for all SubClass values */
USUAL_DEV(US_SC_RBC, US_PR_CB, USB_US_TYPE_STOR),
USUAL_DEV(US_SC_8020, US_PR_CB, USB_US_TYPE_STOR),
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index bb1dadaa4a2..2cdaf1ff831 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -171,7 +171,6 @@ config FB_SYS_FOPS
config FB_DEFERRED_IO
bool
depends on FB
- default y
config FB_METRONOME
tristate
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 8ffdf357876..b004036d408 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -441,14 +441,15 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
value = DIV_ROUND_UP(clk_value_khz, PICOS2KHZ(info->var.pixclock));
- value = (value / 2) - 1;
- dev_dbg(info->device, " * programming CLKVAL = 0x%08lx\n", value);
-
- if (value <= 0) {
+ if (value < 2) {
dev_notice(info->device, "Bypassing pixel clock divider\n");
lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, ATMEL_LCDC_BYPASS);
} else {
- lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, value << ATMEL_LCDC_CLKVAL_OFFSET);
+ value = (value / 2) - 1;
+ dev_dbg(info->device, " * programming CLKVAL = 0x%08lx\n",
+ value);
+ lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1,
+ value << ATMEL_LCDC_CLKVAL_OFFSET);
info->var.pixclock = KHZ2PICOS(clk_value_khz / (2 * (value + 1)));
dev_dbg(info->device, " updated pixclk: %lu KHz\n",
PICOS2KHZ(info->var.pixclock));
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 275d9dab0c6..e721644bad7 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -17,11 +17,9 @@
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/oplib.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
@@ -299,7 +297,7 @@ static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *
par->physbase = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
- sbusfb_fill_var(&info->var, dp->node, 1);
+ sbusfb_fill_var(&info->var, dp, 1);
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
@@ -329,7 +327,7 @@ static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *
if (!info->screen_base)
goto out_unmap_regs;
- bw2_blank(0, info);
+ bw2_blank(FB_BLANK_UNBLANK, info);
bw2_init_fix(info, linebytes);
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index 0db0fecba93..b17e7467177 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -17,10 +17,9 @@
#include <linux/fb.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
@@ -482,7 +481,7 @@ static int __devinit cg14_probe(struct of_device *op, const struct of_device_id
spin_lock_init(&par->lock);
- sbusfb_fill_var(&info->var, dp->node, 8);
+ sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 010ea53978f..3aa7b6cb026 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -17,11 +17,9 @@
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/oplib.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
@@ -373,7 +371,7 @@ static int __devinit cg3_probe(struct of_device *op,
par->physbase = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
- sbusfb_fill_var(&info->var, dp->node, 8);
+ sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
@@ -398,7 +396,7 @@ static int __devinit cg3_probe(struct of_device *op,
if (!info->screen_base)
goto out_unmap_regs;
- cg3_blank(0, info);
+ cg3_blank(FB_BLANK_UNBLANK, info);
if (!of_find_property(dp, "width", NULL)) {
err = cg3_do_default_mode(par);
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index fc90db6da65..2f64bb3bd25 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -17,9 +17,9 @@
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
@@ -728,7 +728,7 @@ static int __devinit cg6_probe(struct of_device *op,
par->physbase = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
- sbusfb_fill_var(&info->var, dp->node, 8);
+ sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
@@ -767,7 +767,7 @@ static int __devinit cg6_probe(struct of_device *op,
cg6_bt_init(par);
cg6_chip_init(info);
- cg6_blank(0, info);
+ cg6_blank(FB_BLANK_UNBLANK, info);
if (fb_alloc_cmap(&info->cmap, 256, 0))
goto out_unmap_regs;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index ad31983b43e..5fa8b76673c 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1853,6 +1853,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
+ unsigned short saved_ec;
+ int ret;
if (fbcon_is_inactive(vc, info))
return -EINVAL;
@@ -1865,6 +1867,11 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
* whole screen (prevents flicker).
*/
+ saved_ec = vc->vc_video_erase_char;
+ vc->vc_video_erase_char = vc->vc_scrl_erase_char;
+
+ ret = 0;
+
switch (dir) {
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
@@ -1883,7 +1890,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
(b - count)),
vc->vc_scrl_erase_char,
vc->vc_size_row * count);
- return 1;
+ ret = 1;
break;
case SCROLL_WRAP_MOVE:
@@ -1955,7 +1962,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
(b - count)),
vc->vc_scrl_erase_char,
vc->vc_size_row * count);
- return 1;
+ ret = 1;
+ break;
}
break;
@@ -1974,7 +1982,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
t),
vc->vc_scrl_erase_char,
vc->vc_size_row * count);
- return 1;
+ ret = 1;
break;
case SCROLL_WRAP_MOVE:
@@ -2044,10 +2052,13 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
t),
vc->vc_scrl_erase_char,
vc->vc_size_row * count);
- return 1;
+ ret = 1;
+ break;
}
+ break;
}
- return 0;
+ vc->vc_video_erase_char = saved_ec;
+ return ret;
}
@@ -2507,6 +2518,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
c = vc->vc_video_erase_char;
vc->vc_video_erase_char =
((c & 0xfe00) >> 1) | (c & 0xff);
+ c = vc->vc_def_color;
+ vc->vc_scrl_erase_char =
+ ((c & 0xFE00) >> 1) | (c & 0xFF);
vc->vc_attr >>= 1;
}
} else if (!vc->vc_hi_font_mask && cnt == 512) {
@@ -2537,9 +2551,14 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
if (vc->vc_can_do_color) {
vc->vc_video_erase_char =
((c & 0xff00) << 1) | (c & 0xff);
+ c = vc->vc_def_color;
+ vc->vc_scrl_erase_char =
+ ((c & 0xFF00) << 1) | (c & 0xFF);
vc->vc_attr <<= 1;
- } else
+ } else {
vc->vc_video_erase_char = c & ~0x100;
+ vc->vc_scrl_erase_char = c & ~0x100;
+ }
}
}
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 93dca3e2aa5..7992b13ee68 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -16,11 +16,10 @@
#include <linux/fb.h>
#include <linux/mm.h>
#include <linux/timer.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/upa.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
@@ -941,7 +940,7 @@ static int __devinit ffb_probe(struct of_device *op,
info->screen_base = (char *) par->physbase + FFB_DFB24_POFF;
info->pseudo_palette = par->pseudo_palette;
- sbusfb_fill_var(&info->var, dp->node, 32);
+ sbusfb_fill_var(&info->var, dp, 32);
par->fbsize = PAGE_ALIGN(info->var.xres * info->var.yres * 4);
ffb_fixup_var_rgb(&info->var);
@@ -987,7 +986,7 @@ static int __devinit ffb_probe(struct of_device *op,
* chosen console, it will have video outputs off in
* the DAC.
*/
- ffb_blank(0, info);
+ ffb_blank(FB_BLANK_UNBLANK, info);
if (fb_alloc_cmap(&info->cmap, 256, 0))
goto out_unmap_dac;
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c
index cd9d4cc2695..aaef9165ec9 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/geode/lxfb_ops.c
@@ -63,54 +63,32 @@ static const struct {
{ 0x00014284, 19688 },
{ 0x00011104, 20400 },
{ 0x00016363, 23625 },
- { 0x00015303, 24380 },
{ 0x000031AC, 24923 },
{ 0x0000215D, 25175 },
{ 0x00001087, 27000 },
{ 0x0000216C, 28322 },
{ 0x0000218D, 28560 },
- { 0x00010041, 29913 },
{ 0x000010C9, 31200 },
{ 0x00003147, 31500 },
- { 0x000141A1, 32400 },
{ 0x000010A7, 33032 },
- { 0x00012182, 33375 },
- { 0x000141B1, 33750 },
{ 0x00002159, 35112 },
{ 0x00004249, 35500 },
{ 0x00000057, 36000 },
- { 0x000141E1, 37125 },
{ 0x0000219A, 37889 },
{ 0x00002158, 39168 },
{ 0x00000045, 40000 },
- { 0x000131A1, 40500 },
- { 0x00010061, 42301 },
{ 0x00000089, 43163 },
- { 0x00012151, 43875 },
{ 0x000010E7, 44900 },
{ 0x00002136, 45720 },
- { 0x000152E1, 47250 },
- { 0x00010071, 48000 },
{ 0x00003207, 49500 },
{ 0x00002187, 50000 },
- { 0x00014291, 50625 },
- { 0x00011101, 51188 },
- { 0x00017481, 54563 },
{ 0x00004286, 56250 },
- { 0x00014170, 57375 },
- { 0x00016210, 58500 },
{ 0x000010E5, 60065 },
- { 0x00013140, 62796 },
{ 0x00004214, 65000 },
- { 0x00016250, 65250 },
{ 0x00001105, 68179 },
- { 0x000141C0, 69600 },
- { 0x00015220, 70160 },
- { 0x00010050, 72000 },
{ 0x000031E4, 74250 },
{ 0x00003183, 75000 },
{ 0x00004284, 78750 },
- { 0x00012130, 80052 },
{ 0x00001104, 81600 },
{ 0x00006363, 94500 },
{ 0x00005303, 97520 },
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index f3160fc2979..8bc46e93034 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -16,10 +16,9 @@
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
@@ -562,7 +561,7 @@ static int __devinit leo_probe(struct of_device *op, const struct of_device_id *
par->physbase = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
- sbusfb_fill_var(&info->var, dp->node, 32);
+ sbusfb_fill_var(&info->var, dp, 32);
leo_fixup_var_rgb(&info->var);
linebytes = of_getintprop_default(dp, "linebytes",
@@ -601,7 +600,7 @@ static int __devinit leo_probe(struct of_device *op, const struct of_device_id *
leo_init_wids(info);
leo_init_hw(info);
- leo_blank(0, info);
+ leo_blank(FB_BLANK_UNBLANK, info);
if (fb_alloc_cmap(&info->cmap, 256, 0))
goto out_unmap_regs;
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 9de1c114f80..39ac49e0682 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -27,6 +27,16 @@ config LOGO_LINUX_CLUT224
bool "Standard 224-color Linux logo"
default y
+config LOGO_BLACKFIN_VGA16
+ bool "16-colour Blackfin Processor Linux logo"
+ depends on BLACKFIN
+ default y
+
+config LOGO_BLACKFIN_CLUT224
+ bool "224-colour Blackfin Processor Linux logo"
+ depends on BLACKFIN
+ default y
+
config LOGO_DEC_CLUT224
bool "224-color Digital Equipment Corporation Linux logo"
depends on MACH_DECSTATION || ALPHA
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index a5fc4edf84e..b91251d1fe4 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -4,6 +4,8 @@ obj-$(CONFIG_LOGO) += logo.o
obj-$(CONFIG_LOGO_LINUX_MONO) += logo_linux_mono.o
obj-$(CONFIG_LOGO_LINUX_VGA16) += logo_linux_vga16.o
obj-$(CONFIG_LOGO_LINUX_CLUT224) += logo_linux_clut224.o
+obj-$(CONFIG_LOGO_BLACKFIN_CLUT224) += logo_blackfin_clut224.o
+obj-$(CONFIG_LOGO_BLACKFIN_VGA16) += logo_blackfin_vga16.o
obj-$(CONFIG_LOGO_DEC_CLUT224) += logo_dec_clut224.o
obj-$(CONFIG_LOGO_MAC_CLUT224) += logo_mac_clut224.o
obj-$(CONFIG_LOGO_PARISC_CLUT224) += logo_parisc_clut224.o
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index fc72684aae5..2e85a2b52d0 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -24,6 +24,8 @@
extern const struct linux_logo logo_linux_mono;
extern const struct linux_logo logo_linux_vga16;
extern const struct linux_logo logo_linux_clut224;
+extern const struct linux_logo logo_blackfin_vga16;
+extern const struct linux_logo logo_blackfin_clut224;
extern const struct linux_logo logo_dec_clut224;
extern const struct linux_logo logo_mac_clut224;
extern const struct linux_logo logo_parisc_clut224;
@@ -65,6 +67,10 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
/* Generic Linux logo */
logo = &logo_linux_vga16;
#endif
+#ifdef CONFIG_LOGO_BLACKFIN_VGA16
+ /* Blackfin processor logo */
+ logo = &logo_blackfin_vga16;
+#endif
#ifdef CONFIG_LOGO_SUPERH_VGA16
/* SuperH Linux logo */
logo = &logo_superh_vga16;
@@ -76,6 +82,10 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
/* Generic Linux logo */
logo = &logo_linux_clut224;
#endif
+#ifdef CONFIG_LOGO_BLACKFIN_CLUT224
+ /* Blackfin Linux logo */
+ logo = &logo_blackfin_clut224;
+#endif
#ifdef CONFIG_LOGO_DEC_CLUT224
/* DEC Linux logo on MIPS/MIPS64 or ALPHA */
logo = &logo_dec_clut224;
diff --git a/drivers/video/logo/logo_blackfin_clut224.ppm b/drivers/video/logo/logo_blackfin_clut224.ppm
new file mode 100644
index 00000000000..dc9a50a1447
--- /dev/null
+++ b/drivers/video/logo/logo_blackfin_clut224.ppm
@@ -0,0 +1,1127 @@
+P3
+# This was generated by the GIMP & Netpbm tools
+# gimp linux_bf.svg (create 80x80 save as linux_bf.ppm)
+# pnmquant 224 linux_bf.ppm | pnmnoraw > logo_blackfin_clut224.ppm
+#
+80 80
+255
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+1 1 1 3 3 3 4 6 6 6 6 6 4 6 6 3 3 3
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 10 10 10 26 26 27
+44 44 45 66 66 66 78 81 81 78 81 81 75 75 76 60 60 60
+39 39 39 20 20 20 6 6 6 1 1 1 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 2 2 2 14 14 14 47 47 47 84 84 84 75 75 76
+47 47 47 12 12 12 0 0 0 0 0 0 0 0 0 20 20 20
+53 54 54 81 81 82 74 74 74 31 31 31 6 6 6 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+4 4 4 34 34 35 84 84 84 60 60 60 4 4 4 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 17 18 18 75 75 76 66 66 66 17 18 18
+1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3 3
+42 42 43 84 84 84 8 8 8 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 3 3 36 40 40 10 16 16 0 0 0 31 31 31 84 84 84
+29 29 30 2 2 2 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 26 27 27
+84 84 84 3 3 3 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+15 19 19 114 115 115 110 114 114 44 46 46 0 0 0 12 12 12
+90 87 86 24 24 24 1 1 1 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 8 8 8 75 75 76
+14 14 14 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+30 40 40 133 133 133 129 130 130 78 85 85 23 31 30 0 0 0
+19 19 19 78 81 81 13 13 13 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 26 27 27 81 81 82
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+36 40 40 89 90 91 55 63 63 23 31 30 4 6 6 0 0 0
+0 0 0 60 60 60 47 47 47 2 2 2 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 53 54 54 34 34 35
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+4 10 10 7 9 9 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 1 1 1 84 84 84 13 13 13 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 4 6 6 78 81 81 2 2 2
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 65 64 64 36 36 36 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 10 11 11 81 81 82 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 12 12 12 67 70 70 4 4 4 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 16 16 16 81 81 82 0 0 0
+0 0 0 0 0 0 4 10 10 44 50 50 18 21 21 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 1 1 78 85 85 120 121 122 7 9 9 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 82 82 81 12 12 12 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 19 19 19 81 81 82 0 0 0
+0 0 0 2 2 2 8 8 8 55 63 63 108 110 110 52 58 58
+0 0 0 0 0 0 0 0 0 0 0 0 42 42 43 129 130 130
+140 142 143 114 115 115 110 114 114 129 130 130 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 75 75 76 24 24 24 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 19 19 19 74 74 74 0 0 0
+4 6 6 167 168 167 196 196 197 196 196 197 61 65 66 78 85 85
+0 0 0 0 0 0 0 0 0 118 118 118 202 202 203 219 219 219
+219 219 219 214 214 215 187 187 188 78 85 85 29 33 34 0 0 0
+0 0 0 0 0 0 0 0 0 60 60 60 39 39 39 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 19 19 19 72 71 71 0 0 0
+185 185 184 244 245 245 250 251 252 251 251 252 247 248 249 36 36 36
+0 0 0 0 0 0 13 13 13 243 243 241 252 252 252 253 253 253
+253 253 253 252 252 252 247 247 246 193 193 194 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 42 42 43 50 51 51 1 1 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 19 19 19 78 81 81 0 0 0
+247 247 246 193 193 194 95 97 97 193 193 194 255 255 255 237 237 238
+0 0 0 0 0 0 202 202 203 255 255 255 247 247 246 108 107 107
+82 85 86 167 168 167 255 255 255 248 248 249 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 34 34 35 56 56 56 2 2 2
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 19 19 19 78 81 81 0 0 0
+250 250 251 50 51 51 153 154 155 150 151 151 244 245 245 244 245 245
+44 50 50 84 89 89 153 154 155 255 255 255 140 142 143 0 0 0
+149 149 150 156 155 156 237 237 238 254 254 254 67 70 70 0 0 0
+0 0 0 0 0 0 0 0 0 39 39 39 47 47 47 1 1 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 19 19 19 81 81 82 0 0 0
+248 248 249 34 34 35 72 71 71 165 165 165 202 202 203 244 245 245
+10 16 16 82 85 86 89 90 91 255 255 255 95 97 97 0 0 0
+0 0 0 53 54 54 177 177 174 255 255 255 127 127 126 0 0 0
+0 0 0 0 0 0 0 0 0 39 39 39 36 36 36 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 14 14 14 78 81 81 0 0 0
+243 243 243 89 90 91 0 0 0 36 40 40 201 147 55 241 205 27
+241 205 27 241 205 27 241 205 27 238 192 33 108 110 110 0 0 0
+0 0 0 0 0 0 191 190 190 254 254 254 34 34 35 0 0 0
+0 0 0 0 0 0 0 0 0 42 42 43 42 42 43 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 10 10 10 75 75 76 0 0 0
+202 202 203 218 217 217 21 19 17 230 165 41 199 129 48 213 157 40
+244 212 23 243 206 27 180 121 62 243 206 27 244 209 25 226 179 40
+15 10 7 103 103 103 254 254 254 251 251 252 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 17 18 18 58 58 58 2 2 2
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 9 9 9 84 84 84 0 0 0
+0 0 0 226 226 219 213 157 40 244 209 25 245 211 23 245 211 23
+245 214 38 245 214 38 245 211 23 245 211 23 245 211 23 244 212 23
+244 212 23 241 205 27 226 179 40 196 196 197 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 74 74 74 4 6 6
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 7 7 7 84 84 84 0 0 0
+54 42 32 213 157 40 243 206 27 245 211 23 245 211 23 245 211 23
+245 215 41 245 214 35 245 211 23 245 211 23 245 214 35 245 215 41
+245 214 35 245 211 23 245 211 23 238 204 29 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 81 81 82 12 12 12
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 4 6 6 74 74 74 0 0 0
+201 147 55 241 205 27 245 211 23 245 211 23 245 211 23 245 213 29
+245 214 38 245 211 23 245 211 23 245 214 35 245 215 41 245 215 41
+245 213 29 142 83 36 142 83 36 244 209 25 1 1 1 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 74 74 74 25 25 26
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 4 4 4 72 71 71 6 6 6
+213 157 40 244 209 25 245 211 23 245 211 23 245 211 23 245 213 29
+244 212 23 245 211 23 245 214 35 245 215 41 245 215 41 245 213 29
+142 83 36 142 83 36 238 192 33 241 205 27 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 44 44 44 49 50 50
+2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 3 3 3 65 64 64 17 18 18
+199 129 48 199 129 48 245 211 23 245 211 23 245 211 23 245 211 23
+245 211 23 244 212 23 245 214 38 245 214 38 142 83 36 142 83 36
+142 83 36 245 211 23 244 210 23 230 165 41 0 0 0 0 0 0
+78 81 81 114 115 115 73 79 79 0 0 0 3 3 3 81 81 82
+9 9 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 1 1 1 49 50 50 29 29 30
+90 87 86 199 129 48 173 101 51 173 101 51 245 211 23 245 211 23
+245 211 23 230 165 41 142 83 36 142 83 36 142 83 36 245 211 23
+244 210 23 241 205 27 230 165 41 175 173 165 3 3 3 0 0 0
+44 46 46 118 118 118 118 118 118 108 110 110 0 0 0 75 75 76
+28 28 28 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 1 1 1 52 53 53 26 26 27
+118 118 118 175 173 165 199 129 48 173 101 51 173 101 51 173 101 51
+173 101 51 142 83 36 173 101 51 245 211 23 244 209 25 238 204 29
+213 157 40 214 196 166 227 227 227 214 214 215 120 121 122 0 0 0
+0 0 0 108 110 110 118 118 118 118 118 118 0 0 0 23 23 23
+66 66 66 4 6 6 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 7 7 7 75 75 76 4 4 4
+127 127 126 205 205 205 181 181 181 199 129 48 226 179 40 244 209 25
+244 209 25 244 209 25 243 206 27 238 192 33 213 157 40 187 166 103
+234 234 234 248 248 249 251 252 252 248 248 249 214 214 215 0 0 0
+0 0 0 0 0 0 103 103 103 100 103 103 0 0 0 0 0 0
+78 81 81 24 24 24 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 26 27 27 82 82 81 0 0 0
+146 146 147 234 234 234 222 221 221 178 178 179 180 121 62 213 157 40
+213 157 40 213 157 40 201 147 55 180 121 62 219 219 219 243 243 241
+253 253 253 255 255 255 255 255 255 255 255 255 250 250 251 120 121 122
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+20 20 20 72 71 71 8 8 8 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 10 10 10 75 75 76 22 22 22 0 0 0
+205 205 205 253 253 253 247 248 249 212 211 212 178 178 179 161 161 162
+165 165 165 181 181 181 205 205 205 227 227 227 244 245 245 254 254 254
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 239 239 240
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 67 70 70 39 39 39 2 2 2 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 4 4 4 50 51 51 60 60 60 0 0 0 16 16 16
+249 250 251 255 255 255 255 255 255 240 240 240 209 210 210 193 193 194
+200 200 197 212 211 212 231 231 231 246 247 248 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 253 253 253
+153 154 155 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 3 3 3 84 84 84 20 20 20 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 2 2 33 33 34 81 81 82 0 0 0 0 0 0 231 231 231
+255 255 255 255 255 255 255 255 255 253 253 253 234 234 234 222 221 221
+227 227 227 237 237 238 250 250 251 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+240 240 240 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 26 27 27 72 71 71 8 8 8 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1
+21 21 22 84 84 84 7 7 7 0 0 0 150 151 151 252 252 252
+255 255 255 255 255 255 255 255 255 255 255 255 252 252 252 244 245 245
+246 247 248 253 253 253 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+251 251 252 9 9 9 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 65 64 64 47 47 47 3 3 3
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 12 12 12
+75 75 76 26 26 27 0 0 0 1 1 1 239 239 240 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 202 202 203 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 84 84 84 28 28 29
+1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 4 4 4 55 55 55
+60 60 60 0 0 0 0 0 0 95 97 97 248 248 249 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 244 245 245 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 14 14 14 82 82 81
+15 15 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 1 1 1 29 29 30 84 84 84
+0 0 0 0 0 0 0 0 0 156 155 156 247 247 246 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 247 247 246 240 240 240 232 232 233 232 232 233
+243 243 243 253 253 253 53 54 54 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 44 44 44
+60 60 60 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 10 10 10 81 81 82 14 14 14
+0 0 0 0 0 0 6 6 6 150 151 151 214 214 215 250 251 252
+255 255 255 255 255 255 255 255 255 246 247 248 218 217 217 214 214 215
+218 217 217 244 245 245 255 255 255 255 255 255 255 255 255 250 248 249
+232 232 233 214 214 215 196 196 197 182 183 184 181 181 181 181 181 181
+187 187 188 240 240 240 232 232 233 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+78 81 81 34 34 35 1 1 1 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 1 1 1 39 39 39 74 74 74 0 0 0
+0 0 0 0 0 0 60 60 60 161 161 162 200 200 197 229 229 230
+251 251 252 255 255 255 255 255 255 255 255 255 243 243 241 214 214 215
+248 248 249 255 255 255 255 255 255 255 255 255 255 255 255 254 254 254
+239 239 240 214 214 215 193 193 194 182 183 184 178 178 179 176 177 177
+176 177 177 182 183 184 248 248 249 14 14 14 0 0 0 61 65 66
+10 16 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+10 10 10 84 84 84 13 13 13 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 10 11 11 82 82 81 7 7 7 0 0 0
+0 0 0 0 0 0 165 165 165 229 229 230 249 250 251 254 254 254
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 253 253 253 240 240 240 227 227 227 205 205 205
+181 181 181 176 177 177 191 190 190 227 227 227 0 0 0 44 50 50
+84 89 89 61 65 66 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 58 58 58 49 50 50 3 3 3 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 1 1 1 36 36 36 66 66 66 0 0 0 29 33 34
+0 3 3 26 27 27 234 234 234 254 254 254 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+254 254 254 253 253 254 252 253 253 253 253 254 253 254 254 253 254 254
+254 254 254 255 255 255 255 255 255 255 255 255 255 255 255 251 251 252
+227 227 227 187 187 188 176 177 177 222 221 221 13 13 13 0 0 0
+12 15 14 73 79 79 36 40 40 0 0 0 0 0 0 0 0 0
+0 0 0 1 1 1 90 87 86 17 18 18 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 78 81 81 12 12 12 23 31 30 52 58 58
+0 0 0 209 210 210 253 253 253 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 254 254 254
+251 251 252 150 151 151 103 103 103 129 130 130 196 196 197 250 250 251
+252 252 253 254 254 254 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 240 240 240 193 193 194 196 196 197 229 229 230 0 0 0
+0 0 0 4 10 10 30 40 40 0 3 3 0 0 0 0 0 0
+0 0 0 0 0 0 47 47 47 53 54 54 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 23 23 23 81 81 82 0 0 0 52 58 58 36 40 40
+42 42 43 250 250 251 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 254 254 254
+227 227 227 7 7 7 7 7 7 7 7 7 7 7 7 44 44 45
+156 155 156 249 250 251 253 253 253 254 254 254 255 255 255 255 255 255
+255 255 255 255 255 255 247 247 246 222 221 221 239 239 240 0 0 0
+30 40 40 44 50 50 23 31 30 29 33 34 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 90 87 86 16 16 16 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 2 2 50 51 51 42 42 43 29 33 34 52 58 58 0 0 0
+232 232 233 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 254 254 254
+250 251 252 44 44 44 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 56 56 56 209 210 210 252 252 253 254 254 254 255 255 255
+255 255 255 255 255 255 255 255 255 254 253 253 249 250 251 146 146 147
+36 40 40 44 50 50 36 40 40 67 70 70 61 65 66 0 0 0
+0 0 0 0 0 0 0 0 0 55 55 55 44 44 45 1 1 1
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+10 10 10 81 81 82 1 1 1 52 58 58 44 50 50 52 53 53
+251 251 252 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 254 254 254
+253 253 253 187 187 188 8 8 8 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 19 19 19 178 178 179 252 252 253 254 254 254
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 237 237 238
+10 16 16 30 40 40 0 3 3 23 31 30 84 89 89 0 0 0
+0 0 0 0 0 0 0 0 0 3 3 3 81 81 82 9 9 9
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+29 29 30 72 71 71 10 16 16 52 58 58 0 0 0 222 221 221
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+254 254 254 251 251 252 95 97 97 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 10 10 10 161 161 162 251 252 252
+254 254 254 255 255 255 255 255 255 255 255 255 255 255 255 248 248 249
+0 0 0 0 0 0 0 0 0 0 0 0 84 89 89 0 3 3
+0 0 0 0 0 0 0 0 0 0 0 0 74 74 74 26 27 27
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 4 4 4
+65 64 64 20 20 20 20 25 25 30 40 40 0 0 0 247 247 246
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 253 253 254 222 221 221 9 9 9 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 8 8 8 149 149 150
+252 252 253 254 254 254 255 255 255 255 255 255 255 255 255 252 252 252
+0 0 0 0 0 0 0 0 0 0 0 0 73 79 79 12 15 14
+0 0 0 0 0 0 0 0 0 0 0 0 36 36 36 58 58 58
+3 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20 20 20
+74 74 74 0 0 0 4 10 10 4 10 10 36 36 36 252 252 252
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 227 227 227 253 253 253 255 255 255
+255 255 255 254 254 254 250 251 252 65 64 64 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 8 8 8
+146 146 147 251 252 252 254 254 254 255 255 255 255 255 255 253 254 254
+0 0 0 0 0 0 0 0 0 0 0 0 52 58 58 10 16 16
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 82 82 81
+9 9 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 4 6 6 65 64 64
+25 25 25 0 3 3 30 40 40 0 0 0 187 187 188 254 254 254
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 193 193 194 253 252 252 255 255 255
+255 255 255 255 255 255 252 253 253 129 130 130 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+8 8 8 149 149 150 252 252 253 254 254 254 255 255 255 254 254 254
+52 53 53 0 0 0 0 0 0 0 0 0 20 25 25 2 5 4
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 81 81 82
+20 20 20 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 26 26 27 81 81 82
+0 0 0 18 21 21 73 79 79 0 0 0 237 237 238 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 182 183 184 255 255 255 255 255 255
+255 255 255 255 255 255 253 253 253 176 177 177 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 8 8 8 153 154 155 251 252 252 254 254 254 255 255 255
+150 151 151 0 0 0 0 0 0 0 0 0 20 25 25 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 65 64 64
+33 33 34 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 6 6 6 67 70 70 20 20 20
+0 0 0 23 31 30 82 85 86 0 0 0 247 247 246 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 182 183 184 255 255 255 255 255 255
+255 255 255 255 255 255 253 254 254 214 214 215 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 8 8 8 156 155 156 252 252 253 254 254 254
+167 168 167 0 0 0 0 0 0 0 0 0 67 70 70 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 47 47 47
+44 44 44 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 21 21 22 75 75 76 0 0 0
+0 0 0 29 33 34 84 89 89 0 0 0 248 248 249 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 248 248 249 181 181 181 255 255 255 255 255 255
+255 255 255 255 255 255 254 254 254 240 240 240 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 8 8 8 161 161 162 251 252 252
+185 185 184 4 4 4 0 0 0 10 11 11 100 103 103 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 36 36 36
+55 55 55 2 2 2 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 33 33 34 50 51 51 0 0 0
+0 0 0 9 11 11 82 85 86 10 16 16 248 248 249 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 245 244 245 179 180 181 255 255 255 255 255 255
+255 255 255 255 255 255 254 254 254 251 252 252 20 20 20 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 10 10 10 161 161 162
+205 205 205 17 18 18 0 0 0 95 97 97 78 81 81 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 36 36 36
+53 54 54 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 31 31 31 58 58 58 0 0 0
+0 0 0 0 0 0 67 70 70 78 81 81 248 248 249 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 234 234 234 179 180 181 255 255 255 255 255 255
+255 255 255 255 255 255 254 254 254 251 252 252 23 23 23 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+10 11 11 84 84 84 161 161 162 209 210 210 229 229 230 237 237 238
+202 202 203 26 26 27 9 11 11 44 50 50 0 0 0 4 6 6
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 52 53 53
+39 39 39 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 23 23 23 78 81 81 213 157 40
+243 206 27 243 206 27 54 42 32 73 79 79 222 221 221 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 238 238 236 178 178 179 255 255 255 255 255 255
+255 255 255 255 255 255 254 254 254 251 252 253 36 36 36 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 84 84 84
+222 221 221 251 252 252 252 253 253 253 253 253 253 254 254 252 252 253
+146 146 147 140 142 143 156 155 156 110 114 114 26 27 27 82 85 86
+84 89 89 95 97 97 36 40 40 0 0 0 0 0 0 74 74 74
+23 23 23 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 14 14 14
+24 24 24 26 26 27 26 26 27 26 26 27 25 25 26 21 21 22
+7 7 7 0 0 0 1 1 1 34 34 35 238 192 33 244 210 23
+244 212 23 244 212 23 244 210 23 88 79 47 200 200 197 254 254 254
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 244 245 245 179 180 181 255 255 255 255 255 255
+255 255 255 255 255 255 254 254 254 252 252 253 36 36 36 7 7 7
+7 7 7 7 7 7 7 7 7 8 8 8 149 149 150 251 251 252
+252 252 253 253 253 253 253 253 253 250 248 249 239 223 156 239 223 156
+120 121 122 182 183 184 176 177 177 120 121 122 33 33 34 3 3 3
+0 0 0 67 70 70 146 146 147 20 25 25 1 1 1 82 82 81
+9 9 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 19 19 19 89 90 91
+146 146 147 150 151 151 150 151 151 150 151 151 150 151 151 129 130 130
+58 58 58 6 6 6 14 14 14 201 147 55 245 211 23 245 213 29
+245 214 35 245 215 41 245 213 29 244 210 23 142 83 36 232 232 233
+254 254 254 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 185 185 184 255 255 255 255 255 255
+255 255 255 255 255 255 254 254 254 251 252 252 50 51 51 7 7 7
+7 7 7 7 7 7 7 7 7 146 146 147 251 252 252 252 253 253
+251 252 253 239 239 240 171 168 154 129 130 130 137 136 134 175 173 165
+221 218 200 65 64 64 22 22 22 186 186 187 114 115 115 26 26 27
+2 2 2 0 0 0 61 65 66 31 33 27 238 192 33 108 96 91
+9 9 9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 52 53 53 178 178 179
+21 21 22 7 7 7 7 7 7 7 7 7 7 7 7 118 118 118
+137 136 134 36 36 36 65 64 64 243 206 27 244 212 23 245 215 41
+245 215 41 245 215 41 245 215 41 244 209 25 244 209 25 1 1 1
+219 219 219 253 253 253 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 214 214 215 255 255 255 255 255 255
+255 255 255 255 255 255 254 254 254 252 252 253 50 51 51 7 7 7
+7 7 7 7 7 7 84 84 84 250 251 252 252 253 253 251 251 252
+167 168 167 22 22 22 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 34 34 35 187 187 188 103 103 103
+29 29 30 3 3 3 7 9 9 238 204 29 245 215 41 245 214 35
+28 28 28 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 7 7 7 90 87 86 178 178 179
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 16 16 16
+193 193 194 133 133 133 187 166 103 245 218 76 245 218 76 245 216 51
+245 216 51 245 218 76 246 224 96 245 218 76 245 218 76 245 218 76
+25 25 25 186 186 187 252 252 252 254 254 254 254 254 254 253 254 254
+254 254 254 254 254 254 254 254 254 246 247 248 254 254 254 253 254 254
+254 254 254 254 254 254 253 254 254 251 252 252 36 36 36 7 7 7
+7 7 7 20 20 20 229 229 230 253 253 253 252 253 253 178 178 179
+10 10 10 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 42 42 43 196 196 197
+118 118 118 33 33 34 238 204 29 245 215 41 245 215 41 245 215 41
+49 50 50 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 17 18 18 120 121 122 137 136 134
+7 7 7 7 7 7 34 34 35 20 20 20 7 7 7 7 7 7
+202 202 203 209 206 202 193 187 162 193 187 162 248 234 156 245 218 76
+245 218 76 248 234 156 193 187 162 193 187 162 193 187 162 214 196 166
+240 219 129 95 97 97 196 196 197 186 186 187 187 187 188 196 196 197
+252 252 253 251 252 253 212 211 212 187 187 188 196 196 197 251 252 252
+218 217 217 187 187 188 191 190 190 250 251 252 24 24 24 7 7 7
+7 7 7 110 114 114 252 252 253 253 254 254 250 251 252 89 90 91
+89 90 91 129 130 130 127 127 126 44 44 44 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 49 50 50
+202 202 203 214 196 166 245 216 51 245 214 38 245 214 35 245 214 38
+58 58 58 2 2 2 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 31 31 31 156 155 156 82 82 81
+7 7 7 10 10 10 237 237 238 66 66 66 7 7 7 25 25 25
+247 248 249 81 81 82 7 7 7 31 31 31 247 237 174 245 218 76
+246 226 108 200 200 197 7 7 7 7 7 7 7 7 7 137 136 134
+247 237 174 193 193 194 72 71 71 7 7 7 7 7 7 8 8 8
+196 196 197 250 251 252 67 70 70 7 7 7 84 84 84 244 245 245
+47 47 47 7 7 7 118 118 118 249 250 251 12 12 12 7 7 7
+9 9 9 218 217 217 253 253 253 254 254 254 252 253 253 251 251 252
+249 250 251 237 237 238 95 97 97 9 9 9 15 15 15 95 97 97
+47 47 47 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+66 66 66 240 230 197 246 226 108 245 214 38 245 211 23 244 212 23
+65 64 64 3 3 3 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 2 2 2 52 53 53 185 185 184 25 25 25
+7 7 7 60 60 60 240 240 240 14 14 14 7 7 7 84 84 84
+247 248 249 23 23 23 7 7 7 94 91 88 248 234 156 245 218 76
+248 234 156 127 127 126 7 7 7 7 7 7 7 7 7 167 168 167
+251 248 240 65 64 64 7 7 7 7 7 7 7 7 7 7 7 7
+84 84 84 243 243 243 15 15 15 7 7 7 140 142 143 146 146 147
+7 7 7 33 33 34 237 237 238 243 243 243 21 21 22 120 121 122
+218 217 217 252 252 253 254 254 254 253 253 254 252 253 253 251 252 252
+247 248 249 72 71 71 7 7 7 58 58 58 222 221 221 248 248 249
+75 75 76 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 82 82 81 246 239 193 246 226 108 245 216 51 245 214 38
+238 192 33 21 21 22 1 1 1 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 8 8 8 90 87 86 182 183 184 7 7 7
+7 7 7 120 121 122 187 187 188 7 7 7 7 7 7 146 146 147
+205 205 205 7 7 7 7 7 7 153 153 148 240 219 129 246 224 96
+246 239 193 39 39 39 60 60 60 108 110 110 7 7 7 202 202 203
+227 227 227 7 7 7 7 7 7 205 205 205 89 90 91 7 7 7
+120 121 122 193 193 194 7 7 7 7 7 7 186 186 187 25 25 25
+7 7 7 167 168 167 251 251 252 243 243 243 214 214 215 250 251 252
+251 252 253 254 254 254 253 253 253 219 219 219 140 140 139 140 140 139
+118 118 118 7 7 7 52 53 53 237 237 238 247 247 246 176 177 177
+8 8 8 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 95 97 97 246 239 193 246 226 108 245 216 51
+245 214 38 201 147 55 31 31 31 103 103 103 103 103 103 72 71 71
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 17 18 18 127 127 126 140 140 139 7 7 7
+7 7 7 17 18 18 17 18 18 7 7 7 95 97 97 244 245 245
+146 146 147 7 7 7 7 7 7 200 200 197 246 226 108 240 219 129
+194 194 184 7 7 7 140 140 139 89 90 91 7 7 7 232 232 233
+165 165 165 7 7 7 31 31 31 249 250 251 39 39 39 7 7 7
+176 177 177 133 133 133 7 7 7 22 22 22 108 110 110 7 7 7
+72 71 71 251 252 252 252 253 253 250 251 252 247 248 249 205 205 205
+251 252 253 254 254 254 252 252 253 84 84 84 7 7 7 7 7 7
+7 7 7 7 7 7 140 142 143 247 248 249 140 140 139 14 14 14
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 16 16 16
+14 14 14 7 7 7 7 7 7 114 115 115 246 239 193 246 224 96
+245 216 51 245 216 51 243 235 220 176 177 177 185 185 184 229 229 230
+47 47 47 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 31 31 31 156 155 156 90 87 86 7 7 7
+7 7 7 7 7 7 7 7 7 31 31 31 243 243 241 247 247 246
+84 84 84 7 7 7 26 27 27 246 239 193 246 226 108 248 234 156
+108 110 110 7 7 7 212 211 212 44 44 44 22 22 22 249 250 251
+108 107 107 7 7 7 89 90 91 238 238 236 114 115 115 118 118 118
+231 231 231 75 75 76 7 7 7 34 34 35 10 11 11 12 12 12
+214 214 215 253 253 253 253 253 253 200 200 197 31 31 31 103 103 103
+252 252 253 252 253 253 218 217 217 9 9 9 7 7 7 7 7 7
+7 7 7 7 7 7 25 25 25 39 39 39 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 103 103 103 234 234 234
+181 181 181 7 7 7 7 7 7 7 7 7 133 133 133 247 237 174
+246 224 96 246 226 108 185 185 184 177 177 174 153 154 155 181 181 181
+140 140 139 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 1 1 1 49 50 50 186 186 187 28 28 28 7 7 7
+12 12 12 22 22 22 7 7 7 7 7 7 108 107 107 247 247 246
+25 25 25 7 7 7 90 87 86 247 237 174 246 226 108 246 239 193
+28 28 28 44 44 44 237 237 238 9 9 9 53 54 54 249 250 251
+49 50 50 7 7 7 153 153 148 249 241 199 214 196 166 185 185 184
+229 229 230 19 19 19 7 7 7 7 7 7 7 7 7 103 103 103
+251 252 253 254 254 254 253 253 253 150 151 151 7 7 7 187 187 188
+252 252 253 251 251 252 103 103 103 7 7 7 7 7 7 7 7 7
+7 7 7 23 23 23 17 18 18 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 12 12 12 153 153 148 246 239 193 249 241 199
+161 161 162 9 9 9 84 84 84 108 110 110 25 25 25 153 153 148
+247 237 174 246 224 96 218 217 217 165 165 165 182 183 184 193 193 194
+114 115 115 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 4 4 4 74 74 74 181 181 181 7 7 7 7 7 7
+110 114 114 200 200 197 7 7 7 7 7 7 60 60 60 209 210 210
+7 7 7 7 7 7 146 146 147 248 234 156 248 234 156 177 177 174
+7 7 7 118 118 118 193 193 194 7 7 7 84 84 84 232 232 233
+8 8 8 7 7 7 209 210 210 221 218 200 193 187 162 219 219 219
+200 200 197 7 7 7 7 7 7 7 7 7 7 7 7 95 97 97
+251 252 252 254 254 254 252 253 253 118 118 118 29 29 30 247 248 249
+252 252 253 227 227 227 16 16 16 7 7 7 7 7 7 7 7 7
+100 103 103 218 217 217 219 218 214 7 7 7 7 7 7 7 7 7
+7 7 7 21 21 22 185 185 184 246 239 193 248 234 156 240 230 197
+60 60 60 194 194 184 246 239 193 249 241 199 137 136 134 10 10 10
+171 168 154 248 234 156 248 234 156 226 226 219 209 210 210 249 241 199
+28 28 28 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 13 13 13 108 110 110 146 146 147 7 7 7 7 7 7
+167 168 167 140 140 139 7 7 7 7 7 7 120 121 122 146 146 147
+7 7 7 7 7 7 194 194 184 240 219 129 247 237 174 95 97 97
+7 7 7 95 97 97 90 87 86 7 7 7 118 118 118 176 177 177
+7 7 7 28 28 28 248 248 249 44 44 45 7 7 7 167 168 167
+140 140 139 7 7 7 36 36 36 74 74 74 7 7 7 65 64 64
+251 252 253 254 254 254 251 252 252 81 81 82 108 110 110 251 252 252
+251 251 252 127 127 126 7 7 7 7 7 7 8 8 8 140 140 139
+181 181 181 140 140 139 221 218 200 7 7 7 7 7 7 7 7 7
+34 34 35 209 210 210 231 231 231 246 239 193 247 237 174 194 194 184
+227 227 227 249 241 199 240 219 129 248 234 156 153 153 148 7 7 7
+13 13 13 185 185 184 248 234 156 245 218 76 245 216 51 245 214 38
+31 31 31 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 31 31 31 153 154 155 89 90 91 7 7 7 8 8 8
+232 232 233 82 82 81 7 7 7 7 7 7 179 180 181 89 90 91
+7 7 7 24 24 24 243 235 220 248 234 156 240 230 197 20 20 20
+7 7 7 7 7 7 7 7 7 7 7 7 149 149 150 118 118 118
+7 7 7 90 87 86 229 229 230 7 7 7 7 7 7 229 229 230
+82 82 81 7 7 7 95 97 97 100 103 103 7 7 7 34 34 35
+251 252 252 253 253 254 251 251 252 47 47 47 193 193 194 251 252 252
+239 239 240 23 23 23 7 7 7 13 13 13 165 165 165 234 234 234
+149 149 150 146 114 101 200 200 197 7 7 7 7 7 7 52 53 53
+227 227 227 167 168 167 16 16 16 214 196 166 248 234 156 243 235 220
+219 219 219 156 155 156 247 237 174 246 239 193 75 75 76 7 7 7
+60 60 60 227 227 227 243 235 220 240 219 129 245 218 76 245 213 29
+16 16 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+1 1 1 49 50 50 185 185 184 33 33 34 7 7 7 10 11 11
+56 56 56 16 16 16 7 7 7 10 10 10 237 237 238 26 27 27
+7 7 7 55 55 55 185 185 184 221 218 200 167 168 167 7 7 7
+20 20 20 39 39 39 10 11 11 7 7 7 181 181 181 58 58 58
+7 7 7 103 103 103 133 133 133 7 7 7 44 44 44 247 248 249
+24 24 24 7 7 7 156 155 156 129 130 130 7 7 7 9 9 9
+244 245 245 252 253 253 237 237 238 34 34 35 248 248 249 251 251 252
+161 161 162 7 7 7 24 24 24 187 187 188 212 211 212 67 70 70
+187 187 188 173 170 143 209 206 202 10 10 10 95 97 97 237 237 238
+129 130 130 8 8 8 89 90 91 246 239 193 247 237 174 177 177 174
+17 18 18 137 136 134 249 241 199 219 218 214 10 10 10 95 97 97
+243 243 243 150 151 151 31 31 31 221 218 200 240 219 129 53 54 54
+3 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+4 4 4 72 71 71 182 183 184 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 12 12 12 161 161 162 209 210 210 7 7 7
+7 7 7 7 7 7 7 7 7 187 187 188 82 82 81 7 7 7
+146 146 147 247 248 249 17 18 18 7 7 7 212 211 212 47 47 47
+7 7 7 7 7 7 7 7 7 8 8 8 146 146 147 205 205 205
+7 7 7 7 7 7 214 214 215 156 155 156 7 7 7 7 7 7
+218 217 217 251 252 252 186 186 187 110 114 114 249 250 251 248 248 249
+75 75 76 34 34 35 205 205 205 129 130 130 16 16 16 7 7 7
+156 155 156 214 196 166 240 230 197 243 243 241 227 227 227 74 74 74
+7 7 7 29 29 30 226 226 219 249 241 199 175 173 165 14 14 14
+9 9 9 221 218 200 246 239 193 153 153 148 146 146 147 246 247 248
+110 114 114 7 7 7 7 7 7 42 42 43 193 193 194 95 97 97
+19 19 19 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+6 6 6 84 84 84 140 142 143 7 7 7 7 7 7 7 7 7
+7 7 7 20 20 20 177 177 174 249 241 199 149 149 150 7 7 7
+7 7 7 7 7 7 10 11 11 226 226 219 13 13 13 8 8 8
+219 218 214 219 218 214 7 7 7 8 8 8 238 238 236 200 200 197
+13 13 13 7 7 7 13 13 13 161 161 162 243 235 220 146 146 147
+7 7 7 29 29 30 232 232 233 176 177 177 7 7 7 7 7 7
+182 183 184 237 237 238 129 130 130 167 168 167 176 177 177 202 202 203
+10 11 11 95 97 97 44 44 45 7 7 7 7 7 7 7 7 7
+75 75 76 226 226 219 243 235 220 156 155 156 24 24 24 7 7 7
+7 7 7 176 177 177 247 247 246 200 200 197 17 18 18 7 7 7
+49 50 50 246 239 193 248 234 156 251 248 240 239 239 240 84 84 84
+7 7 7 7 7 7 7 7 7 7 7 7 60 60 60 187 187 188
+84 84 84 14 14 14 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+4 4 4 53 54 54 137 136 134 156 155 156 161 161 162 161 161 162
+167 168 167 239 223 156 240 219 129 246 226 108 239 223 156 239 223 156
+239 223 156 239 223 156 214 196 166 239 223 156 193 187 162 193 187 162
+248 234 156 239 223 156 193 187 162 193 187 162 248 234 156 248 234 156
+214 196 166 193 187 162 214 196 166 248 234 156 240 219 129 214 196 166
+193 187 162 193 187 162 171 168 154 146 146 147 137 136 134 137 136 134
+161 161 162 209 210 210 65 64 64 202 202 203 179 180 181 140 140 139
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 60 60 60 39 39 39 7 7 7 7 7 7 7 7 7
+66 66 66 249 250 251 202 202 203 16 16 16 7 7 7 7 7 7
+23 23 23 243 235 220 246 239 193 226 226 219 52 53 53 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 75 75 76
+176 177 177 66 66 66 9 9 9 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 10 10 10 28 28 29 34 34 35 36 36 36 36 36 36
+44 44 45 146 114 101 241 207 50 241 207 50 241 207 50 241 211 63
+241 211 63 241 211 63 241 211 63 241 211 63 241 211 63 245 216 51
+245 216 51 245 216 51 241 211 63 241 211 63 245 216 51 241 211 63
+245 218 76 245 218 76 245 216 51 245 215 41 245 214 38 241 207 50
+241 211 63 201 147 55 88 79 47 29 29 30 34 34 35 42 42 43
+103 103 103 191 190 190 75 75 76 196 196 197 200 200 197 65 64 64
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+90 87 86 146 146 147 19 19 19 7 7 7 7 7 7 7 7 7
+7 7 7 90 87 86 140 140 139 31 31 31 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+103 103 103 161 161 162 53 54 54 7 7 7 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 12 12 12 50 51 51 146 114 101 180 121 62 199 129 48
+201 147 55 213 157 40 213 157 40 230 165 41 226 179 40 226 179 40
+238 192 33 241 205 27 244 209 25 244 210 23 244 212 23 245 211 23
+245 211 23 245 211 23 245 211 23 244 209 25 238 204 29 226 179 40
+213 157 40 199 129 48 54 42 32 0 0 0 4 6 6 44 44 45
+150 151 151 129 130 130 137 136 134 205 205 205 202 202 203 8 8 8
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 129 130 130 146 146 147 47 47 47 4 4 4 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 2 2 2 12 12 12 28 28 29 49 50 50
+74 74 74 108 96 91 180 121 62 180 121 62 199 129 48 201 147 55
+213 157 40 230 165 41 226 179 40 238 192 33 241 205 27 241 205 27
+243 206 27 243 206 27 241 205 27 238 204 29 226 179 40 213 157 40
+199 129 48 199 129 48 21 19 17 65 64 64 103 103 103 167 168 167
+202 202 203 24 24 24 193 193 194 229 229 230 140 140 139 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 8 8 8 156 155 156 133 133 133 36 36 36 3 3 3
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1
+4 4 4 10 11 11 21 21 22 39 39 39 60 60 60 108 96 91
+180 121 62 199 129 48 199 129 48 213 157 40 230 165 41 226 179 40
+226 179 40 226 179 40 226 179 40 226 179 40 213 157 40 199 129 48
+180 121 62 99 91 79 72 71 71 56 56 56 129 130 130 167 168 167
+21 21 22 17 18 18 231 231 231 229 229 230 52 53 53 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 13 13 13 176 177 177 120 121 122 33 33 34
+2 2 2 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 8 8 8
+21 21 22 47 47 47 99 91 79 180 121 62 199 129 48 199 129 48
+201 147 55 213 157 40 213 157 40 201 147 55 199 129 48 180 121 62
+99 91 79 26 26 27 9 9 9 60 60 60 186 186 187 31 31 31
+7 7 7 60 60 60 243 243 243 209 210 210 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7
+7 7 7 7 7 7 7 7 7 26 27 27 193 193 194 108 110 110
+22 22 22 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 1 1 1 8 8 8 24 24 24 58 58 58 108 96 91
+180 121 62 180 121 62 180 121 62 180 121 62 180 121 62 72 71 71
+15 15 15 0 0 0 4 6 6 75 75 76 156 155 156 24 24 24
+24 24 24 108 107 107 232 232 233 137 136 134 24 24 24 24 24 24
+24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24
+24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24
+24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24
+24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24
+24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24 24
+24 24 24 24 24 24 24 24 24 24 24 24 58 58 58 176 177 177
+60 60 60 3 3 3
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 12 12 12
+26 27 27 44 44 44 55 55 55 50 51 51 29 29 30 8 8 8
+0 0 0 0 0 0 3 3 3 47 47 47 127 127 126 150 151 151
+150 151 151 140 142 143 129 130 130 140 142 143 150 151 151 150 151 151
+150 151 151 150 151 151 150 151 151 150 151 151 150 151 151 150 151 151
+150 151 151 150 151 151 153 154 155 161 161 162 165 165 165 167 168 167
+177 177 174 167 168 167 161 161 162 156 155 156 150 151 151 150 151 151
+150 151 151 150 151 151 150 151 151 150 151 151 150 151 151 150 151 151
+150 151 151 150 151 151 150 151 151 150 151 151 150 151 151 150 151 151
+150 151 151 150 151 151 150 151 151 150 151 151 149 149 150 127 127 126
+44 44 45 2 2 2
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 2 2 2 1 1 1 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 7 7 7 21 21 22 25 25 26
+25 25 26 24 24 24 20 20 20 23 23 24 25 25 26 26 26 27
+26 26 27 26 26 27 26 26 27 26 26 27 26 26 27 26 26 27
+26 26 27 26 26 27 26 26 27 26 26 27 26 26 27 26 27 27
+28 28 29 26 27 27 26 26 27 26 26 27 26 26 27 26 26 27
+26 26 27 26 26 27 26 26 27 26 26 27 26 26 27 26 26 27
+26 26 27 26 26 27 26 26 27 26 26 27 26 26 27 26 26 27
+26 26 27 26 26 27 26 26 27 26 26 27 25 25 26 21 21 22
+7 7 7 0 0 0
diff --git a/drivers/video/logo/logo_blackfin_vga16.ppm b/drivers/video/logo/logo_blackfin_vga16.ppm
new file mode 100644
index 00000000000..1352b02a9d9
--- /dev/null
+++ b/drivers/video/logo/logo_blackfin_vga16.ppm
@@ -0,0 +1,1127 @@
+P3
+# This was generated by the GIMP & Netpbm tools
+# gimp linux_bf.svg (create 80x80 save as linux_bf.ppm)
+# ppmquant -mapfile clut_vga16.ppm linux_bf.ppm | pnmnoraw > logo_blackfin_vga16.ppm
+#
+80 80
+255
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 85 85 85 85 85 85 85 85 85 85 85 85 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 85 85 85 85 85 85
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 85 85 85 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 85 85 85 85 85 85 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 170 170 170 170 170 170 85 85 85 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 85 85 85 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 170 170 170
+170 170 170 85 85 85 85 85 85 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 170 170 170 170 170 170 170 170 170 85 85 85 85 85 85
+0 0 0 0 0 0 0 0 0 85 85 85 170 170 170 255 255 255
+255 255 255 255 255 255 170 170 170 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+170 170 170 255 255 255 255 255 255 255 255 255 255 255 255 0 0 0
+0 0 0 0 0 0 0 0 0 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+255 255 255 170 170 170 85 85 85 170 170 170 255 255 255 255 255 255
+0 0 0 0 0 0 170 170 170 255 255 255 255 255 255 85 85 85
+85 85 85 170 170 170 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+255 255 255 85 85 85 170 170 170 170 170 170 255 255 255 255 255 255
+85 85 85 85 85 85 170 170 170 255 255 255 170 170 170 0 0 0
+170 170 170 170 170 170 255 255 255 255 255 255 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+255 255 255 0 0 0 85 85 85 170 170 170 170 170 170 255 255 255
+0 0 0 85 85 85 85 85 85 255 255 255 85 85 85 0 0 0
+0 0 0 85 85 85 170 170 170 255 255 255 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+255 255 255 85 85 85 0 0 0 0 0 0 255 85 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 85 85 85 0 0 0
+0 0 0 0 0 0 170 170 170 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+170 170 170 255 255 255 0 0 0 255 85 85 170 85 0 170 85 0
+255 255 85 255 255 85 170 85 0 255 255 85 255 255 85 255 255 85
+0 0 0 85 85 85 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 255 255 255 255 85 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 255 85 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+170 85 0 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 170 85 0 85 85 85 255 255 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+255 85 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+170 85 0 85 85 85 255 255 85 255 255 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+170 85 0 170 85 0 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 170 85 0 170 85 0
+170 85 0 255 255 85 255 255 85 255 85 85 0 0 0 0 0 0
+85 85 85 85 85 85 85 85 85 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+85 85 85 170 85 0 170 85 0 170 85 0 255 255 85 255 255 85
+255 255 85 255 85 85 170 85 0 170 85 0 170 85 0 255 255 85
+255 255 85 255 255 85 255 85 85 170 170 170 0 0 0 0 0 0
+85 85 85 85 85 85 85 85 85 85 85 85 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+85 85 85 170 170 170 170 85 0 170 85 0 170 85 0 170 85 0
+170 85 0 170 85 0 170 85 0 255 255 85 255 255 85 255 255 85
+255 85 85 170 170 170 255 255 255 255 255 255 85 85 85 0 0 0
+0 0 0 85 85 85 85 85 85 85 85 85 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+170 170 170 170 170 170 170 170 170 170 85 0 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 85 85 170 170 170
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 0 0 0
+0 0 0 0 0 0 85 85 85 85 85 85 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+170 170 170 255 255 255 255 255 255 170 170 170 170 85 0 255 85 85
+255 85 85 255 85 85 255 85 85 255 85 85 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+170 170 170 255 255 255 255 255 255 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 170 170 170 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 85 85 85 0 0 0 0 0 0
+255 255 255 255 255 255 255 255 255 255 255 255 170 170 170 170 170 170
+170 170 170 170 170 170 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+170 170 170 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 0 0 0 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 170 170 170 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 170 170 170 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+85 85 85 0 0 0 0 0 0 85 85 85 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 255 255 255 255 255 255 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 85 85 85 170 170 170 170 170 170 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 255 255 255 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 170 170 170 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 170 170 170
+170 170 170 170 170 170 170 170 170 255 255 255 0 0 0 85 85 85
+85 85 85 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 170 170 170 170 170 170 255 255 255 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 0 0 0 85 85 85
+0 0 0 170 170 170 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 170 170 170 85 85 85 170 170 170 170 170 170 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 170 170 170 170 170 170 255 255 255 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 85 85 85 0 0 0
+0 0 0 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+170 170 170 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 85 85 85 0 0 0
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 170 170 170 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 170 170 170
+0 0 0 85 85 85 0 0 0 85 85 85 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 85 85 85 85 85 85 85 85 85
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 170 170 170 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 85 85 85 0 0 0 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 170 170 170
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+170 170 170 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 170 170 170 255 255 255 255 255 255 255 255 255 255 255 255
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 85 85 85 0 0 0 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 170 170 170 255 255 255 255 255 255 255 255 255
+170 170 170 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 255 255 255 255 255 255
+170 170 170 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 255 255 255
+170 170 170 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 170 170 170
+170 170 170 0 0 0 0 0 0 85 85 85 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 0 0 0
+0 0 0 0 0 0 85 85 85 85 85 85 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 170 170 170 170 170 170 255 255 255 255 255 255
+170 170 170 0 0 0 0 0 0 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 170 85 0
+255 255 85 255 255 85 0 0 0 85 85 85 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+170 170 170 170 170 170 170 170 170 85 85 85 0 0 0 85 85 85
+85 85 85 85 85 85 0 0 0 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 85 85 85 170 170 170 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 85 170 170 170
+85 85 85 170 170 170 170 170 170 85 85 85 0 0 0 0 0 0
+0 0 0 85 85 85 170 170 170 0 0 0 0 0 0 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+85 85 85 0 0 0 0 0 0 170 85 0 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 170 85 0 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 255 255 255 255 255 255
+255 255 255 255 255 255 170 170 170 170 170 170 170 170 170 170 170 170
+255 255 255 85 85 85 0 0 0 170 170 170 85 85 85 0 0 0
+0 0 0 0 0 0 85 85 85 0 0 0 255 255 85 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 170 170 170
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+170 170 170 0 0 0 85 85 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 0 0 0
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 85 85 85 0 0 0
+0 0 0 0 0 0 85 85 85 255 255 255 255 255 255 255 255 255
+170 170 170 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 85 85 85
+0 0 0 0 0 0 0 0 0 255 255 85 255 255 85 255 255 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 170 170 170
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+170 170 170 170 170 170 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+0 0 0 170 170 170 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 0 0 0 0 0 0
+0 0 0 0 0 0 255 255 255 255 255 255 255 255 255 170 170 170
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 170 170 170
+85 85 85 0 0 0 255 255 85 255 255 85 255 255 85 255 255 85
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 170 170 170
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+170 170 170 170 170 170 170 170 170 170 170 170 255 255 85 255 255 85
+255 255 85 255 255 85 170 170 170 170 170 170 170 170 170 170 170 170
+255 255 85 85 85 85 170 170 170 170 170 170 170 170 170 170 170 170
+255 255 255 255 255 255 170 170 170 170 170 170 170 170 170 255 255 255
+255 255 255 170 170 170 170 170 170 255 255 255 0 0 0 0 0 0
+0 0 0 85 85 85 255 255 255 255 255 255 255 255 255 85 85 85
+85 85 85 170 170 170 170 170 170 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+170 170 170 170 170 170 255 255 85 255 255 85 255 255 85 255 255 85
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 85 85 85
+0 0 0 0 0 0 255 255 255 85 85 85 0 0 0 0 0 0
+255 255 255 85 85 85 0 0 0 0 0 0 255 255 255 255 255 85
+255 255 85 170 170 170 0 0 0 0 0 0 0 0 0 170 170 170
+255 255 255 170 170 170 85 85 85 0 0 0 0 0 0 0 0 0
+170 170 170 255 255 255 85 85 85 0 0 0 85 85 85 255 255 255
+85 85 85 0 0 0 85 85 85 255 255 255 0 0 0 0 0 0
+0 0 0 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 85 85 85 0 0 0 0 0 0 85 85 85
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 255 255 255 255 255 85 255 255 85 255 255 85 255 255 85
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 170 170 170 0 0 0
+0 0 0 85 85 85 255 255 255 0 0 0 0 0 0 85 85 85
+255 255 255 0 0 0 0 0 0 85 85 85 255 255 85 255 255 85
+255 255 85 85 85 85 0 0 0 0 0 0 0 0 0 170 170 170
+255 255 255 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 255 255 255 0 0 0 0 0 0 170 170 170 170 170 170
+0 0 0 0 0 0 255 255 255 255 255 255 0 0 0 85 85 85
+255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 85 85 85 0 0 0 85 85 85 255 255 255 255 255 255
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 255 255 255 255 255 85 255 255 85 255 255 85
+255 255 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 170 170 170 0 0 0
+0 0 0 85 85 85 170 170 170 0 0 0 0 0 0 170 170 170
+170 170 170 0 0 0 0 0 0 170 170 170 255 255 85 255 255 85
+255 255 255 0 0 0 85 85 85 85 85 85 0 0 0 170 170 170
+255 255 255 0 0 0 0 0 0 170 170 170 85 85 85 0 0 0
+85 85 85 170 170 170 0 0 0 0 0 0 170 170 170 0 0 0
+0 0 0 170 170 170 255 255 255 255 255 255 255 255 255 255 255 255
+255 255 255 255 255 255 255 255 255 255 255 255 170 170 170 170 170 170
+85 85 85 0 0 0 85 85 85 255 255 255 255 255 255 170 170 170
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 255 255 255 255 255 85 255 255 85
+255 255 85 170 85 0 0 0 0 85 85 85 85 85 85 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 170 170 170 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 255 255 255
+170 170 170 0 0 0 0 0 0 170 170 170 255 255 85 255 255 85
+170 170 170 0 0 0 170 170 170 85 85 85 0 0 0 255 255 255
+170 170 170 0 0 0 0 0 0 255 255 255 0 0 0 0 0 0
+170 170 170 170 170 170 0 0 0 0 0 0 85 85 85 0 0 0
+85 85 85 255 255 255 255 255 255 255 255 255 255 255 255 170 170 170
+255 255 255 255 255 255 255 255 255 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 170 170 170 255 255 255 170 170 170 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 255 255 255 255 255 85
+255 255 85 255 255 85 255 255 255 170 170 170 170 170 170 255 255 255
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 255 255 255 255 255 255
+85 85 85 0 0 0 0 0 0 255 255 255 255 255 85 255 255 85
+85 85 85 0 0 0 255 255 255 85 85 85 0 0 0 255 255 255
+85 85 85 0 0 0 85 85 85 255 255 255 85 85 85 85 85 85
+255 255 255 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+255 255 255 255 255 255 255 255 255 170 170 170 0 0 0 85 85 85
+255 255 255 255 255 255 255 255 255 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 255 255 255
+170 170 170 0 0 0 0 0 0 0 0 0 170 170 170 255 255 255
+255 255 85 255 255 85 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 255 255 255
+0 0 0 0 0 0 85 85 85 255 255 85 255 255 85 255 255 255
+0 0 0 85 85 85 255 255 255 0 0 0 85 85 85 255 255 255
+85 85 85 0 0 0 170 170 170 255 255 255 170 170 170 170 170 170
+255 255 255 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+255 255 255 255 255 255 255 255 255 170 170 170 0 0 0 170 170 170
+255 255 255 255 255 255 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 255 255 255 255 255 255
+170 170 170 0 0 0 85 85 85 85 85 85 0 0 0 170 170 170
+255 255 85 255 255 85 255 255 255 170 170 170 170 170 170 170 170 170
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 170 170 170 0 0 0 0 0 0
+85 85 85 170 170 170 0 0 0 0 0 0 85 85 85 170 170 170
+0 0 0 0 0 0 170 170 170 255 255 85 255 255 85 170 170 170
+0 0 0 85 85 85 170 170 170 0 0 0 85 85 85 255 255 255
+0 0 0 0 0 0 170 170 170 170 170 170 170 170 170 255 255 255
+170 170 170 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+255 255 255 255 255 255 255 255 255 85 85 85 0 0 0 255 255 255
+255 255 255 255 255 255 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 255 255 255 255 255 255 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 170 170 170 255 255 255 255 255 85 255 255 255
+85 85 85 170 170 170 255 255 255 255 255 255 170 170 170 0 0 0
+170 170 170 255 255 85 255 255 85 255 255 255 170 170 170 255 255 255
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 170 170 170 0 0 0 0 0 0
+170 170 170 170 170 170 0 0 0 0 0 0 85 85 85 170 170 170
+0 0 0 0 0 0 170 170 170 255 255 85 255 255 255 85 85 85
+0 0 0 85 85 85 85 85 85 0 0 0 85 85 85 170 170 170
+0 0 0 0 0 0 255 255 255 85 85 85 0 0 0 170 170 170
+170 170 170 0 0 0 0 0 0 85 85 85 0 0 0 85 85 85
+255 255 255 255 255 255 255 255 255 85 85 85 85 85 85 255 255 255
+255 255 255 85 85 85 0 0 0 0 0 0 0 0 0 170 170 170
+170 170 170 170 170 170 255 255 255 0 0 0 0 0 0 0 0 0
+0 0 0 170 170 170 255 255 255 255 255 255 255 255 85 170 170 170
+255 255 255 255 255 255 255 255 85 255 255 85 170 170 170 0 0 0
+0 0 0 170 170 170 255 255 85 255 255 85 255 255 85 255 255 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 170 170 170 85 85 85 0 0 0 0 0 0
+255 255 255 85 85 85 0 0 0 0 0 0 170 170 170 85 85 85
+0 0 0 0 0 0 255 255 255 255 255 85 255 255 255 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 85 85 85
+0 0 0 85 85 85 255 255 255 0 0 0 0 0 0 255 255 255
+85 85 85 0 0 0 85 85 85 85 85 85 0 0 0 0 0 0
+255 255 255 255 255 255 255 255 255 85 85 85 170 170 170 255 255 255
+255 255 255 0 0 0 0 0 0 0 0 0 170 170 170 255 255 255
+170 170 170 85 85 85 170 170 170 0 0 0 0 0 0 85 85 85
+255 255 255 170 170 170 0 0 0 170 170 170 255 255 85 255 255 255
+255 255 255 170 170 170 255 255 255 255 255 255 85 85 85 0 0 0
+85 85 85 255 255 255 255 255 255 255 255 85 255 255 85 255 255 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 85 85 85 170 170 170 0 0 0 0 0 0 0 0 0
+85 85 85 0 0 0 0 0 0 0 0 0 255 255 255 0 0 0
+0 0 0 85 85 85 170 170 170 255 255 255 170 170 170 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 85 85 85
+0 0 0 85 85 85 170 170 170 0 0 0 85 85 85 255 255 255
+0 0 0 0 0 0 170 170 170 170 170 170 0 0 0 0 0 0
+255 255 255 255 255 255 255 255 255 0 0 0 255 255 255 255 255 255
+170 170 170 0 0 0 0 0 0 170 170 170 255 255 255 85 85 85
+170 170 170 170 170 170 170 170 170 0 0 0 85 85 85 255 255 255
+170 170 170 0 0 0 85 85 85 255 255 255 255 255 85 170 170 170
+0 0 0 170 170 170 255 255 255 255 255 255 0 0 0 85 85 85
+255 255 255 170 170 170 0 0 0 170 170 170 255 255 85 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 85 85 85 170 170 170 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 170 170 170 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 85 85 85 0 0 0
+170 170 170 255 255 255 0 0 0 0 0 0 170 170 170 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 170 170 170
+0 0 0 0 0 0 255 255 255 170 170 170 0 0 0 0 0 0
+255 255 255 255 255 255 170 170 170 85 85 85 255 255 255 255 255 255
+85 85 85 0 0 0 170 170 170 170 170 170 0 0 0 0 0 0
+170 170 170 170 170 170 255 255 255 255 255 255 255 255 255 85 85 85
+0 0 0 0 0 0 255 255 255 255 255 255 170 170 170 0 0 0
+0 0 0 170 170 170 255 255 255 170 170 170 170 170 170 255 255 255
+85 85 85 0 0 0 0 0 0 0 0 0 170 170 170 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 85 85 85 170 170 170 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 170 170 170 255 255 255 170 170 170 0 0 0
+0 0 0 0 0 0 0 0 0 255 255 255 0 0 0 0 0 0
+255 255 255 255 255 255 0 0 0 0 0 0 255 255 255 170 170 170
+0 0 0 0 0 0 0 0 0 170 170 170 255 255 255 170 170 170
+0 0 0 0 0 0 255 255 255 170 170 170 0 0 0 0 0 0
+170 170 170 255 255 255 170 170 170 170 170 170 170 170 170 170 170 170
+0 0 0 85 85 85 85 85 85 0 0 0 0 0 0 0 0 0
+85 85 85 255 255 255 255 255 255 170 170 170 0 0 0 0 0 0
+0 0 0 170 170 170 255 255 255 170 170 170 0 0 0 0 0 0
+85 85 85 255 255 255 255 255 85 255 255 255 255 255 255 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 170 170 170
+85 85 85 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 85 85 85 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 255 255 85 255 255 85 255 255 85 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+255 255 85 170 170 170 170 170 170 170 170 170 255 255 85 255 255 85
+170 170 170 170 170 170 170 170 170 255 255 85 255 255 85 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 85 85 85 170 170 170 170 170 170 170 170 170
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 255 255 255 170 170 170 0 0 0 0 0 0 0 0 0
+0 0 0 255 255 255 255 255 255 255 255 255 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+170 170 170 85 85 85 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 85 85 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 85 85 85 85 85 0 0 0 0 0 0 0 0 0
+85 85 85 170 170 170 85 85 85 170 170 170 170 170 170 85 85 85
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 170 170 170 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 170 170 170 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+85 85 85 170 170 170 85 85 85 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 85 85 85 85 85 85 170 85 0 170 85 0
+170 85 0 255 85 85 255 85 85 255 85 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 85 85 170 85 0 85 85 85 0 0 0 0 0 0 85 85 85
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 170 170 170 170 170 170 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 85 85 85
+85 85 85 85 85 85 170 85 0 170 85 0 170 85 0 170 85 0
+255 85 85 255 85 85 255 255 85 255 255 85 255 255 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 255 85 255 85 85
+170 85 0 170 85 0 0 0 0 85 85 85 85 85 85 170 170 170
+170 170 170 0 0 0 170 170 170 255 255 255 170 170 170 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 170 170 170 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 85 85 85
+170 85 0 170 85 0 170 85 0 255 85 85 255 85 85 255 255 85
+255 255 85 255 255 85 255 255 85 255 255 85 255 85 85 170 85 0
+170 85 0 85 85 85 85 85 85 85 85 85 170 170 170 170 170 170
+0 0 0 0 0 0 255 255 255 255 255 255 85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 170 170 170 85 85 85 0 0 0
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 85 85 85 170 85 0 170 85 0 170 85 0
+170 85 0 255 85 85 255 85 85 255 85 85 170 85 0 170 85 0
+85 85 85 0 0 0 0 0 0 85 85 85 170 170 170 0 0 0
+0 0 0 85 85 85 255 255 255 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 170 170 170 85 85 85
+0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 85 85 85
+170 85 0 170 85 0 170 85 0 170 85 0 170 85 0 85 85 85
+0 0 0 0 0 0 0 0 0 85 85 85 170 170 170 0 0 0
+0 0 0 85 85 85 255 255 255 170 170 170 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 85 85 85 170 170 170
+85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 85 85 85 85 85 85 85 85 85 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 85 85 85 85 85 85 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 170
+170 170 170 170 170 170 170 170 170 170 170 170 170 170 170 85 85 85
+85 85 85 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index c95874fe907..9e903454ffc 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -15,10 +15,9 @@
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
@@ -275,7 +274,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id
par->physbase = op->resource[2].start;
par->which_io = op->resource[2].flags & IORESOURCE_BITS;
- sbusfb_fill_var(&info->var, dp->node, 8);
+ sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
@@ -295,7 +294,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id
if (!info->screen_base)
goto out_unmap_regs;
- p9100_blank(0, info);
+ p9100_blank(FB_BLANK_UNBLANK, info);
if (fb_alloc_cmap(&info->cmap, 256, 0))
goto out_unmap_screen;
diff --git a/drivers/video/pnx4008/pnxrgbfb.c b/drivers/video/pnx4008/pnxrgbfb.c
index 685761a0732..4db6b48a871 100644
--- a/drivers/video/pnx4008/pnxrgbfb.c
+++ b/drivers/video/pnx4008/pnxrgbfb.c
@@ -100,7 +100,6 @@ static int rgbfb_remove(struct platform_device *pdev)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
platform_set_drvdata(pdev, NULL);
- kfree(info);
}
pnx4008_free_dum_channel(channel_owned, pdev->id);
@@ -168,23 +167,21 @@ static int __devinit rgbfb_probe(struct platform_device *pdev)
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret < 0)
- goto err2;
+ goto err1;
ret = register_framebuffer(info);
if (ret < 0)
- goto err3;
+ goto err2;
platform_set_drvdata(pdev, info);
return 0;
-err3:
- fb_dealloc_cmap(&info->cmap);
err2:
- framebuffer_release(info);
+ fb_dealloc_cmap(&info->cmap);
err1:
pnx4008_free_dum_channel(channel_owned, pdev->id);
err0:
- kfree(info);
+ framebuffer_release(info);
err:
return ret;
}
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 3ab6e3d973a..48aea39c35a 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1301,8 +1301,8 @@ static void pxafb_decode_mode_info(struct pxafb_info *fbi,
}
}
-static int pxafb_decode_mach_info(struct pxafb_info *fbi,
- struct pxafb_mach_info *inf)
+static void pxafb_decode_mach_info(struct pxafb_info *fbi,
+ struct pxafb_mach_info *inf)
{
unsigned int lcd_conn = inf->lcd_conn;
@@ -1333,7 +1333,7 @@ static int pxafb_decode_mach_info(struct pxafb_info *fbi,
fbi->lccr0 = inf->lccr0;
fbi->lccr3 = inf->lccr3;
fbi->lccr4 = inf->lccr4;
- return -EINVAL;
+ goto decode_mode;
}
if (lcd_conn == LCD_MONO_STN_8BPP)
@@ -1343,8 +1343,8 @@ static int pxafb_decode_mach_info(struct pxafb_info *fbi,
fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0;
fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0;
+decode_mode:
pxafb_decode_mode_info(fbi, inf->modes, inf->num_modes);
- return 0;
}
static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev)
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 4deaac05b93..37d764ad56b 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -10,18 +10,19 @@
#include <linux/fb.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/of_device.h>
-#include <asm/oplib.h>
#include <asm/fbio.h>
#include "sbuslib.h"
-void sbusfb_fill_var(struct fb_var_screeninfo *var, int prom_node, int bpp)
+void sbusfb_fill_var(struct fb_var_screeninfo *var, struct device_node *dp,
+ int bpp)
{
memset(var, 0, sizeof(*var));
- var->xres = prom_getintdefault(prom_node, "width", 1152);
- var->yres = prom_getintdefault(prom_node, "height", 900);
+ var->xres = of_getintprop_default(dp, "width", 1152);
+ var->yres = of_getintprop_default(dp, "height", 900);
var->xres_virtual = var->xres;
var->yres_virtual = var->yres;
var->bits_per_pixel = bpp;
diff --git a/drivers/video/sbuslib.h b/drivers/video/sbuslib.h
index 492828c3fe8..7ba3250236b 100644
--- a/drivers/video/sbuslib.h
+++ b/drivers/video/sbuslib.h
@@ -11,7 +11,8 @@ struct sbus_mmap_map {
#define SBUS_MMAP_FBSIZE(n) (-n)
#define SBUS_MMAP_EMPTY 0x80000000
-extern void sbusfb_fill_var(struct fb_var_screeninfo *var, int prom_node, int bpp);
+extern void sbusfb_fill_var(struct fb_var_screeninfo *var,
+ struct device_node *dp, int bpp);
struct vm_area_struct;
extern int sbusfb_mmap_helper(struct sbus_mmap_map *map,
unsigned long physbase, unsigned long fbsize,
@@ -21,6 +22,6 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
struct fb_info *info,
int type, int fb_depth, unsigned long fb_size);
int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg);
+ unsigned long arg);
#endif /* _SBUSLIB_H */
diff --git a/drivers/video/sunxvr2500.c b/drivers/video/sunxvr2500.c
index c3869a96ab5..b1dde09e701 100644
--- a/drivers/video/sunxvr2500.c
+++ b/drivers/video/sunxvr2500.c
@@ -9,10 +9,9 @@
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
struct s3d_info {
struct fb_info *info;
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
index 71bf3f1f00b..c2ba51b7ea1 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -9,10 +9,9 @@
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
/* XXX This device has a 'dev-comm' property which aparently is
* XXX a pointer into the openfirmware's address space which is
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index a7177430577..2a03f78bbb0 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -17,10 +17,9 @@
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
+#include <linux/of_device.h>
#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
#include <asm/fbio.h>
#include "sbuslib.h"
@@ -84,7 +83,7 @@ struct tcx_tec {
struct tcx_thc {
u32 thc_rev;
- u32 thc_pad0[511];
+ u32 thc_pad0[511];
u32 thc_hs; /* hsync timing */
u32 thc_hsdvs;
u32 thc_hd;
@@ -126,10 +125,10 @@ struct tcx_par {
};
/* Reset control plane so that WID is 8-bit plane. */
-static void __tcx_set_control_plane (struct tcx_par *par)
+static void __tcx_set_control_plane(struct tcx_par *par)
{
u32 __iomem *p, *pend;
-
+
if (par->lowdepth)
return;
@@ -143,8 +142,8 @@ static void __tcx_set_control_plane (struct tcx_par *par)
sbus_writel(tmp, p);
}
}
-
-static void tcx_reset (struct fb_info *info)
+
+static void tcx_reset(struct fb_info *info)
{
struct tcx_par *par = (struct tcx_par *) info->par;
unsigned long flags;
@@ -365,7 +364,8 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
info->screen_base, par->fbsize);
}
-static int __devinit tcx_init_one(struct of_device *op)
+static int __devinit tcx_probe(struct of_device *op,
+ const struct of_device_id *match)
{
struct device_node *dp = op->node;
struct fb_info *info;
@@ -384,7 +384,7 @@ static int __devinit tcx_init_one(struct of_device *op)
par->lowdepth =
(of_find_property(dp, "tcx-8-bit", NULL) != NULL);
- sbusfb_fill_var(&info->var, dp->node, 8);
+ sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
@@ -488,13 +488,6 @@ out_err:
return err;
}
-static int __devinit tcx_probe(struct of_device *dev, const struct of_device_id *match)
-{
- struct of_device *op = to_of_device(&dev->dev);
-
- return tcx_init_one(op);
-}
-
static int __devexit tcx_remove(struct of_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index bd54cd0de39..beefab2992c 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -27,7 +27,6 @@
#define VERSION "0.7.8-NEWAPI"
struct tridentfb_par {
- int vclk; /* in MHz */
void __iomem *io_virt; /* iospace virtual memory address */
};
@@ -669,27 +668,26 @@ static void set_screen_start(int base)
(read3X4(CRTHiOrd) & 0xF8) | ((base & 0xE0000) >> 17));
}
-/* Use 20.12 fixed-point for NTSC value and frequency calculation */
-#define calc_freq(n, m, k) ( ((unsigned long)0xE517 * (n + 8) / ((m + 2) * (1 << k))) >> 12 )
-
/* Set dotclock frequency */
-static void set_vclk(int freq)
+static void set_vclk(unsigned long freq)
{
int m, n, k;
- int f, fi, d, di;
+ unsigned long f, fi, d, di;
unsigned char lo = 0, hi = 0;
- d = 20;
+ d = 20000;
for (k = 2; k >= 0; k--)
for (m = 0; m < 63; m++)
for (n = 0; n < 128; n++) {
- fi = calc_freq(n, m, k);
+ fi = ((14318l * (n + 8)) / (m + 2)) >> k;
if ((di = abs(fi - freq)) < d) {
d = di;
f = fi;
lo = n;
hi = (k << 6) | m;
}
+ if (fi > freq)
+ break;
}
if (chip3D) {
write3C4(ClockHigh, hi);
@@ -888,6 +886,8 @@ static int tridentfb_set_par(struct fb_info *info)
struct fb_var_screeninfo *var = &info->var;
int bpp = var->bits_per_pixel;
unsigned char tmp;
+ unsigned long vclk;
+
debug("enter\n");
hdispend = var->xres / 8 - 1;
hsyncstart = (var->xres + var->right_margin) / 8;
@@ -905,7 +905,6 @@ static int tridentfb_set_par(struct fb_info *info)
vblankstart = var->yres;
vblankend = vtotal + 2;
- enable_mmio();
crtc_unlock();
write3CE(CyberControl, 8);
@@ -1015,11 +1014,11 @@ static int tridentfb_set_par(struct fb_info *info)
write3X4(Performance, 0x92);
write3X4(PCIReg, 0x07); /* MMIO & PCI read and write burst enable */
- /* convert from picoseconds to MHz */
- par->vclk = 1000000 / info->var.pixclock;
+ /* convert from picoseconds to kHz */
+ vclk = PICOS2KHZ(info->var.pixclock);
if (bpp == 32)
- par->vclk *= 2;
- set_vclk(par->vclk);
+ vclk *= 2;
+ set_vclk(vclk);
write3C4(0, 3);
write3C4(1, 1); /* set char clock 8 dots wide */