summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/apei/erst.c31
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/power/qos.c18
-rw-r--r--drivers/block/cciss.c12
-rw-r--r--drivers/block/cciss_scsi.c1
-rw-r--r--drivers/block/loop.c51
-rw-r--r--drivers/block/paride/pg.c1
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim3.c362
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c41
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/crypto/mv_cesa.c12
-rw-r--r--drivers/devfreq/Kconfig41
-rw-r--r--drivers/devfreq/devfreq.c10
-rw-r--r--drivers/dma/Kconfig4
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/firmware/efivars.c12
-rw-r--r--drivers/firmware/iscsi_ibft.c42
-rw-r--r--drivers/firmware/iscsi_ibft_find.c26
-rw-r--r--drivers/firmware/sigma.c81
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-da9052.c21
-rw-r--r--drivers/gpio/gpio-ml-ioh.c32
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c18
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pl061.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c58
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c49
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h37
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c120
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c546
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c19
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c35
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c19
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c195
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h29
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h31
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c94
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c210
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c411
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c23
-rw-r--r--drivers/gpu/vga/vgaarb.c62
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/gpio-fan.c13
-rw-r--r--drivers/hwmon/jz4740-hwmon.c16
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c22
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/i2c/i2c-core.c4
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/ide/cy82c693.c6
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/piix.c18
-rw-r--r--drivers/ide/triflex.c16
-rw-r--r--drivers/ieee802154/fakehard.c2
-rw-r--r--drivers/infiniband/core/addr.c50
-rw-r--r--drivers/infiniband/core/cma.c14
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c218
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c20
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c50
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/input/misc/cma3000_d0x.c4
-rw-r--r--drivers/input/mouse/elantech.c26
-rw-r--r--drivers/input/mouse/sentelic.c8
-rw-r--r--drivers/input/mouse/sentelic.h3
-rw-r--r--drivers/input/mouse/synaptics.c11
-rw-r--r--drivers/input/serio/ams_delta_serio.c1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h14
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/intr_remapping.c2
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/gigaset/i4l.c3
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/leds/led-class.c5
-rw-r--r--drivers/lguest/lguest_device.c6
-rw-r--r--drivers/md/bitmap.c9
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c30
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/media/common/tuners/mxl5007t.c3
-rw-r--r--drivers/media/common/tuners/tda18218.c2
-rw-r--r--drivers/media/rc/ati_remote.c111
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c96
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c128
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c114
-rw-r--r--drivers/media/video/au0828/au0828-cards.c7
-rw-r--r--drivers/media/video/gspca/gspca.c6
-rw-r--r--drivers/media/video/m5mols/m5mols.h2
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c22
-rw-r--r--drivers/media/video/mt9m111.c1
-rw-r--r--drivers/media/video/mt9t112.c4
-rw-r--r--drivers/media/video/omap/omap_vout.c9
-rw-r--r--drivers/media/video/omap1_camera.c1
-rw-r--r--drivers/media/video/omap24xxcam-dma.c2
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c2
-rw-r--r--drivers/media/video/omap3isp/ispstat.c2
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c1
-rw-r--r--drivers/media/video/ov6650.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c14
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c24
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c43
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c15
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c34
-rw-r--r--drivers/media/video/sh_mobile_csi2.c4
-rw-r--r--drivers/media/video/soc_camera.c3
-rw-r--r--drivers/mfd/ab5500-debugfs.c2
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/da903x.c3
-rw-r--r--drivers/mfd/jz4740-adc.c1
-rw-r--r--drivers/mfd/tps6586x.c2
-rw-r--r--drivers/mfd/tps65910.c2
-rw-r--r--drivers/mfd/twl-core.c16
-rw-r--r--drivers/mfd/twl4030-irq.c18
-rw-r--r--drivers/mfd/wm8994-core.c1
-rw-r--r--drivers/misc/Kconfig15
-rw-r--r--drivers/misc/ad525x_dpot.h2
-rw-r--r--drivers/misc/carma/carma-fpga-program.c9
-rw-r--r--drivers/misc/carma/carma-fpga.c9
-rw-r--r--drivers/misc/eeprom/Kconfig2
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c88
-rw-r--r--drivers/misc/pch_phub.c81
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/core/core.c98
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/mmci.c14
-rw-r--r--drivers/mmc/host/mxcmmc.c1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c6
-rw-r--r--drivers/mmc/host/sdhci-dove.c5
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c5
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c5
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c5
-rw-r--r--drivers/mmc/host/sdhci-pci.c26
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c18
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h6
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c5
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c5
-rw-r--r--drivers/mmc/host/sdhci-s3c.c23
-rw-r--r--drivers/mmc/host/sdhci-tegra.c5
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mmc/host/vub300.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c12
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/ndfc.c2
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/bonding/bond_ipv6.c225
-rw-r--r--drivers/net/bonding/bond_main.c116
-rw-r--r--drivers/net/bonding/bond_sysfs.c7
-rw-r--r--drivers/net/caif/caif_hsi.c13
-rw-r--r--drivers/net/caif/caif_serial.c10
-rw-r--r--drivers/net/caif/caif_shmcore.c27
-rw-r--r--drivers/net/caif/caif_spi.c178
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c13
-rw-r--r--drivers/net/can/bfin_can.c12
-rw-r--r--drivers/net/can/c_can/c_can_platform.c12
-rw-r--r--drivers/net/can/cc770/Kconfig21
-rw-r--r--drivers/net/can/cc770/Makefile9
-rw-r--r--drivers/net/can/cc770/cc770.c881
-rw-r--r--drivers/net/can/cc770/cc770.h203
-rw-r--r--drivers/net/can/cc770/cc770_isa.c367
-rw-r--r--drivers/net/can/cc770/cc770_platform.c272
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c15
-rw-r--r--drivers/net/can/janz-ican3.c13
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c12
-rw-r--r--drivers/net/can/mscan/mscan.c8
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c118
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c12
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c13
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/softing/softing_main.c16
-rw-r--r--drivers/net/can/ti_hecc.c15
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/dsa/Kconfig36
-rw-r--r--drivers/net/dsa/Makefile9
-rw-r--r--drivers/net/dsa/mv88e6060.c293
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c438
-rw-r--r--drivers/net/dsa/mv88e6131.c435
-rw-r--r--drivers/net/dsa/mv88e6xxx.c549
-rw-r--r--drivers/net/dsa/mv88e6xxx.h98
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c7
-rw-r--r--drivers/net/ethernet/3com/3c59x.c12
-rw-r--r--drivers/net/ethernet/3com/typhoon.c16
-rw-r--r--drivers/net/ethernet/8390/8390.h2
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c18
-rw-r--r--drivers/net/ethernet/8390/es3210.c2
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c2
-rw-r--r--drivers/net/ethernet/8390/hp.c2
-rw-r--r--drivers/net/ethernet/8390/hydra.c2
-rw-r--r--drivers/net/ethernet/8390/lne390.c4
-rw-r--r--drivers/net/ethernet/8390/ne-h8300.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/ne2.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c6
-rw-r--r--drivers/net/ethernet/8390/ne3210.c2
-rw-r--r--drivers/net/ethernet/8390/stnic.c2
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c13
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c15
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h5
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c15
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c19
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/amd/sunlance.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c13
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c13
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c23
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c13
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c196
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c366
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h116
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c76
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h217
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1099
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c528
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c55
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h9
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c707
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h44
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h98
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c493
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h54
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h97
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c70
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h27
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c623
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c153
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h3
-rw-r--r--drivers/net/ethernet/cadence/Kconfig1
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig7
-rw-r--r--drivers/net/ethernet/calxeda/Makefile1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1928
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c14
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c10
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dlink/de600.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c19
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h39
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c237
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h84
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c120
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c586
-rw-r--r--drivers/net/ethernet/ethoc.c13
-rw-r--r--drivers/net/ethernet/fealnx.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fec.c77
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c69
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c28
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h7
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h6
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c7
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c7
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c27
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c13
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c465
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c217
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c37
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c44
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c40
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h42
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/jme.c129
-rw-r--r--drivers/net/ethernet/jme.h19
-rw-r--r--drivers/net/ethernet/korina.c13
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c13
-rw-r--r--drivers/net/ethernet/marvell/skge.c15
-rw-r--r--drivers/net/ethernet/marvell/sky2.c145
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1332
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c430
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c415
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c902
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c228
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h670
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c486
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c616
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3104
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c132
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c13
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c513
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h15
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c16
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c75
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c13
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c13
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c13
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c511
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c5
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c2
-rw-r--r--drivers/net/ethernet/pasemi/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c48
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c19
-rw-r--r--drivers/net/ethernet/realtek/8139too.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c91
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c13
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c18
-rw-r--r--drivers/net/ethernet/sfc/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c35
-rw-r--r--drivers/net/ethernet/sfc/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx.c9
-rw-r--r--drivers/net/ethernet/sfc/selftest.c4
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c29
-rw-r--r--drivers/net/ethernet/sgi/meth.c67
-rw-r--r--drivers/net/ethernet/sis/sis190.c15
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/epic100.c6
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c123
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c570
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c221
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c198
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c42
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c13
-rw-r--r--drivers/net/ethernet/sun/sungem.c6
-rw-r--r--drivers/net/ethernet/sun/sunhme.c20
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/tile/tilepro.c12
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c20
-rw-r--r--drivers/net/ethernet/via/via-rhine.c21
-rw-r--r--drivers/net/ethernet/via/via-velocity.c12
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c37
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c30
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/hippi/Kconfig2
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/bfin_sir.c13
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c13
-rw-r--r--drivers/net/irda/sh_irda.c13
-rw-r--r--drivers/net/irda/sh_sir.c13
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/macvtap.c24
-rw-r--r--drivers/net/mii.c53
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/mdio-bitbang.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/phy_device.c20
-rw-r--r--drivers/net/phy/spi_ks8995.c375
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/team/Kconfig43
-rw-r--r--drivers/net/team/Makefile7
-rw-r--r--drivers/net/team/team.c1684
-rw-r--r--drivers/net/team/team_mode_activebackup.c136
-rw-r--r--drivers/net/team/team_mode_roundrobin.c107
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/asix.c76
-rw-r--r--drivers/net/usb/cdc-phonet.c10
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/lg-vl600.c25
-rw-r--r--drivers/net/usb/pegasus.c8
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/veth.c9
-rw-r--r--drivers/net/virtio_net.c45
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c11
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c44
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wimax/i2400m/tx.c8
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c7
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c14
-rw-r--r--drivers/net/wireless/b43/dma.c14
-rw-r--r--drivers/net/wireless/b43/leds.c16
-rw-r--r--drivers/net/wireless/b43/lo.c8
-rw-r--r--drivers/net/wireless/b43/main.c100
-rw-r--r--drivers/net/wireless/b43/phy_common.c8
-rw-r--r--drivers/net/wireless/b43/phy_g.c34
-rw-r--r--drivers/net/wireless/b43/phy_lp.c8
-rw-r--r--drivers/net/wireless/b43/phy_n.c4
-rw-r--r--drivers/net/wireless/b43/pio.c6
-rw-r--r--drivers/net/wireless/b43/xmit.c4
-rw-r--r--drivers/net/wireless/b43legacy/dma.c16
-rw-r--r--drivers/net/wireless/b43legacy/leds.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c64
-rw-r--r--drivers/net/wireless/b43legacy/radio.c20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c12
-rw-r--r--drivers/net/wireless/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/mwl8k.c10
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/ray_cs.c4
-rw-r--r--drivers/net/wireless/rayctl.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c10
-rw-r--r--drivers/net/xen-netfront.c12
-rw-r--r--drivers/of/irq.c29
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/oprofile/oprof.c29
-rw-r--r--drivers/oprofile/oprofile_files.c7
-rw-r--r--drivers/oprofile/oprofilefs.c11
-rw-r--r--drivers/oprofile/timer_int.c1
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/ats.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c25
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c27
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c4
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c21
-rw-r--r--drivers/power/intel_mid_battery.c12
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c14
-rw-r--r--drivers/regulator/twl-regulator.c46
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c50
-rw-r--r--drivers/rtc/rtc-m41t80.c9
-rw-r--r--drivers/rtc/rtc-puv3.c4
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c30
-rw-r--r--drivers/s390/cio/device_ops.c20
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/crypto/ap_bus.c25
-rw-r--r--drivers/s390/kvm/kvm_virtio.c6
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c219
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c46
-rw-r--r--drivers/s390/net/qeth_l2_main.c19
-rw-r--r--drivers/s390/net/qeth_l3_main.c39
-rw-r--r--drivers/s390/net/qeth_l3_sys.c4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c27
-rw-r--r--drivers/sbus/char/display7seg.c13
-rw-r--r--drivers/sbus/char/envctrl.c12
-rw-r--r--drivers/sbus/char/flash.c12
-rw-r--r--drivers/sbus/char/uctrl.c12
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c5
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c10
-rw-r--r--drivers/scsi/fcoe/fcoe.c116
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/hpsa.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h55
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h16
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c243
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1084
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/spi/spi-pl022.c8
-rw-r--r--drivers/staging/comedi/comedi_fops.c96
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c7
-rw-r--r--drivers/staging/et131x/Kconfig3
-rw-r--r--drivers/staging/et131x/et131x.c12
-rw-r--r--drivers/staging/iio/industrialio-core.c25
-rw-r--r--drivers/staging/media/as102/as102_drv.c4
-rw-r--r--drivers/staging/media/as102/as102_drv.h3
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c1
-rw-r--r--drivers/staging/slicoss/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/staging/usbip/vhci_rx.c10
-rw-r--r--drivers/target/iscsi/iscsi_target.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c41
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_cdb.c20
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c30
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_pr.c240
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c258
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c260
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
-rw-r--r--drivers/tty/hvc/hvc_dcc.c2
-rw-r--r--drivers/tty/serial/Kconfig14
-rw-r--r--drivers/tty/serial/atmel_serial.c16
-rw-r--r--drivers/tty/serial/crisv10.c10
-rw-r--r--drivers/tty/serial/mfd.c4
-rw-r--r--drivers/tty/serial/pch_uart.c19
-rw-r--r--drivers/tty/tty_ldisc.c30
-rw-r--r--drivers/usb/class/cdc-acm.c18
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/quirks.c27
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig9
-rw-r--r--drivers/usb/gadget/amd5536udc.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_msm.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c21
-rw-r--r--drivers/usb/gadget/epautoconf.c3
-rw-r--r--drivers/usb/gadget/f_mass_storage.c7
-rw-r--r--drivers/usb/gadget/f_midi.c138
-rw-r--r--drivers/usb/gadget/f_phonet.c13
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/file_storage.c4
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c80
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h10
-rw-r--r--drivers/usb/gadget/inode.c5
-rw-r--r--drivers/usb/gadget/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/pch_udc.c10
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c32
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c4
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c3
-rw-r--r--drivers/usb/gadget/udc-core.c10
-rw-r--r--drivers/usb/host/ehci-sched.c22
-rw-r--r--drivers/usb/host/ehci-xls.c2
-rw-r--r--drivers/usb/host/isp1760-if.c8
-rw-r--r--drivers/usb/host/ohci-at91.c6
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/ohci-pci.c26
-rw-r--r--drivers/usb/host/ohci.h1
-rw-r--r--drivers/usb/host/pci-quirks.c57
-rw-r--r--drivers/usb/host/whci/qset.c2
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c39
-rw-r--r--drivers/usb/musb/Kconfig3
-rw-r--r--drivers/usb/musb/am35x.c1
-rw-r--r--drivers/usb/musb/da8xx.c1
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod.h8
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c51
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c64
-rw-r--r--drivers/usb/serial/ark3116.c10
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c37
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/ene_ub6250.c3
-rw-r--r--drivers/usb/storage/protocol.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/video/da8xx-fb.c15
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap2/dss/dispc.c11
-rw-r--r--drivers/video/omap2/dss/hdmi.c2
-rw-r--r--drivers/video/via/share.h4
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_mmio.c8
-rw-r--r--drivers/virtio/virtio_pci.c26
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c355
-rw-r--r--drivers/watchdog/coh901327_wdt.c6
-rw-r--r--drivers/watchdog/hpwdt.c5
-rw-r--r--drivers/watchdog/iTCO_wdt.c6
-rw-r--r--drivers/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/watchdog/sp805_wdt.c2
-rw-r--r--drivers/watchdog/wm831x_wdt.c2
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/gntalloc.c4
-rw-r--r--drivers/xen/gntdev.c10
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/xen/xenbus/xenbus_client.c11
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c13
867 files changed, 32077 insertions, 10585 deletions
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca..631b9477b99 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *time, char **buf,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
u64 record_id;
- struct cper_pstore_record *rcd = (struct cper_pstore_record *)
- (erst_info.buf - sizeof(*rcd));
+ struct cper_pstore_record *rcd;
+ size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
if (erst_disable)
return -ENODEV;
+ rcd = kmalloc(rcd_len, GFP_KERNEL);
+ if (!rcd) {
+ rc = -ENOMEM;
+ goto out;
+ }
skip:
rc = erst_get_record_id_next(&reader_pos, &record_id);
if (rc)
@@ -1004,22 +1011,27 @@ skip:
/* no more record */
if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- rc = -1;
+ rc = -EINVAL;
goto out;
}
- len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
- erst_info.bufsize);
+ len = erst_read(record_id, &rcd->hdr, rcd_len);
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < 0) {
- rc = -1;
+ else if (len < sizeof(*rcd)) {
+ rc = -EIO;
goto out;
}
if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
goto skip;
+ *buf = kmalloc(len, GFP_KERNEL);
+ if (*buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
time->tv_nsec = 0;
out:
+ kfree(rcd);
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6bdedd7cca2..cf047c406d9 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -820,7 +820,7 @@ config PATA_PLATFORM
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
- depends on PATA_PLATFORM && OF
+ depends on PATA_PLATFORM && OF && OF_IRQ
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems with OpenFirmware
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ec555951176..43b875810d1 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -67,7 +67,7 @@ static int __init ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ahci_platform_data *pdata = dev_get_platdata(dev);
const struct platform_device_id *id = platform_get_device_id(pdev);
- struct ata_port_info pi = ahci_port_info[id->driver_data];
+ struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ahci_host_priv *hpriv;
struct ata_host *host;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 63d53277d6a..4cadfa28f94 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
if (rc)
goto out;
+#ifdef CONFIG_ATA_BMDMA
if (bmdma)
/* prepare and activate BMDMA host */
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
else
+#endif
/* prepare and activate SFF host */
rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
if (rc)
@@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev,
host->private_data = host_priv;
host->flags |= hflags;
+#ifdef CONFIG_ATA_BMDMA
if (bmdma) {
pci_set_master(pdev);
rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
} else
+#endif
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 3d0c2b0fed9..9e373ba2030 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1320,8 +1320,8 @@ static void rx_dle_intr(struct atm_dev *dev)
if (ia_vcc == NULL)
{
atomic_inc(&vcc->stats->rx_err);
+ atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
goto INCR_DLE;
}
// get real pkt length pwang_test
@@ -1334,8 +1334,8 @@ static void rx_dle_intr(struct atm_dev *dev)
atomic_inc(&vcc->stats->rx_err);
IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
length, skb->len);)
+ atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
goto INCR_DLE;
}
skb_trim(skb, length);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 82c865452c7..919daa7cd5b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -22,6 +22,7 @@
#include <linux/kallsyms.h>
#include <linux/mutex.h>
#include <linux/async.h>
+#include <linux/pm_runtime.h>
#include "base.h"
#include "power/power.h"
@@ -1743,6 +1744,10 @@ void device_shutdown(void)
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
+ /* Don't allow any more runtime suspends */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
+
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 793f796c4da..5693ecee9a4 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_WRITEBACK)),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
- nid, K(node_page_state(nid, NR_ANON_PAGES)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ nid, K(node_page_state(nid, NR_ANON_PAGES)
+ node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR
+ HPAGE_PMD_NR),
+#else
+ nid, K(node_page_state(nid, NR_ANON_PAGES)),
#endif
- ),
nid, K(node_page_state(nid, NR_SHMEM)),
nid, node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
- nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
, nid,
K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR)
+ HPAGE_PMD_NR));
+#else
+ nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
#endif
- );
n += hugetlb_report_node_meminfo(nid, buf + n);
return n;
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 5f0f85d5c57..428e55e012d 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -229,7 +229,8 @@ int pm_clk_suspend(struct device *dev)
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
if (ce->status < PCE_STATUS_ERROR) {
- clk_disable(ce->clk);
+ if (ce->status == PCE_STATUS_ENABLED)
+ clk_disable(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
}
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 7fa098464da..c3d2dfcf438 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -920,7 +920,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
if (!error) {
dev->power.is_suspended = true;
- if (dev->power.wakeup_path && dev->parent)
+ if (dev->power.wakeup_path
+ && dev->parent && !dev->parent->power.ignore_children)
dev->parent->power.wakeup_path = true;
}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 30a94eadc20..86de6c50fc4 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -212,11 +212,9 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
if (!dev || !req) /*guard against callers passing in null */
return -EINVAL;
- if (dev_pm_qos_request_active(req)) {
- WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
- "added request\n");
+ if (WARN(dev_pm_qos_request_active(req),
+ "%s() called for already added request\n", __func__))
return -EINVAL;
- }
req->dev = dev;
@@ -271,11 +269,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
if (!req) /*guard against callers passing in null */
return -EINVAL;
- if (!dev_pm_qos_request_active(req)) {
- WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
- "unknown object\n");
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
return -EINVAL;
- }
mutex_lock(&dev_pm_qos_mtx);
@@ -312,11 +308,9 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
if (!req) /*guard against callers passing in null */
return -EINVAL;
- if (!dev_pm_qos_request_active(req)) {
- WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
- "unknown object\n");
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
return -EINVAL;
- }
mutex_lock(&dev_pm_qos_mtx);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 486f94ef24d..587cce57ada 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -2600,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
@@ -4319,6 +4322,10 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
return -ENODEV;
}
+
+ pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
+
err = pci_enable_device(h->pdev);
if (err) {
dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
@@ -4875,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
{
if (h->msix_vector || h->msi_vector) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h))
+ 0, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
" for %s\n", h->intr[h->intr_mode],
@@ -4884,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
}
if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h))
+ IRQF_SHARED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
@@ -5158,6 +5165,7 @@ reinit_after_soft_reset:
h->cciss_max_sectors = 8192;
rebuild_lun_table(h, 1, 0);
+ cciss_engage_scsi(h);
h->busy_initializing = 0;
return 1;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 951a4e33b92..e820b68d2f6 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1720,5 +1720,6 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
/* If no tape support, then these become defined out of existence */
#define cciss_scsi_setup(cntl_num)
+#define cciss_engage_scsi(h)
#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3d806820280..1e888c9e85b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -161,17 +161,19 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
&xor_funcs
};
-static loff_t get_loop_size(struct loop_device *lo, struct file *file)
+static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
{
- loff_t size, offset, loopsize;
+ loff_t size, loopsize;
/* Compute loopsize in bytes */
size = i_size_read(file->f_mapping->host);
- offset = lo->lo_offset;
loopsize = size - offset;
- if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
- loopsize = lo->lo_sizelimit;
+ /* offset is beyond i_size, wierd but possible */
+ if (loopsize < 0)
+ return 0;
+ if (sizelimit > 0 && sizelimit < loopsize)
+ loopsize = sizelimit;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
@@ -179,17 +181,25 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
return loopsize >> 9;
}
+static loff_t get_loop_size(struct loop_device *lo, struct file *file)
+{
+ return get_size(lo->lo_offset, lo->lo_sizelimit, file);
+}
+
static int
-figure_loop_size(struct loop_device *lo)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
- loff_t size = get_loop_size(lo, lo->lo_backing_file);
+ loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
if (unlikely((loff_t)x != size))
return -EFBIG;
-
+ if (lo->lo_offset != offset)
+ lo->lo_offset = offset;
+ if (lo->lo_sizelimit != sizelimit)
+ lo->lo_sizelimit = sizelimit;
set_capacity(lo->lo_disk, x);
- return 0;
+ return 0;
}
static inline int
@@ -372,7 +382,8 @@ do_lo_receive(struct loop_device *lo,
if (retval < 0)
return retval;
-
+ if (retval != bvec->bv_len)
+ return -EIO;
return 0;
}
@@ -411,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
@@ -786,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9;
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
@@ -1058,9 +1069,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
- lo->lo_offset = info->lo_offset;
- lo->lo_sizelimit = info->lo_sizelimit;
- if (figure_loop_size(lo))
+ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
return -EFBIG;
}
loop_config_discard(lo);
@@ -1246,7 +1255,7 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
err = -ENXIO;
if (unlikely(lo->lo_state != Lo_bound))
goto out;
- err = figure_loop_size(lo);
+ err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
if (unlikely(err))
goto out;
sec = get_capacity(lo->lo_disk);
@@ -1284,13 +1293,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
goto out_unlocked;
break;
case LOOP_SET_STATUS:
- err = loop_set_status_old(lo, (struct loop_info __user *) arg);
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ err = loop_set_status_old(lo,
+ (struct loop_info __user *)arg);
break;
case LOOP_GET_STATUS:
err = loop_get_status_old(lo, (struct loop_info __user *) arg);
break;
case LOOP_SET_STATUS64:
- err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ err = loop_set_status64(lo,
+ (struct loop_info64 __user *) arg);
break;
case LOOP_GET_STATUS64:
err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 6b9a2000d56..a79fb4f7ff6 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -630,6 +630,7 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t
if (dev->status & 0x10)
return -ETIME;
+ memset(&hdr, 0, sizeof(hdr));
hdr.magic = PG_MAGIC;
hdr.dlen = dev->dlen;
copy = 0;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65cc424359b..148ab944378 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size);
static ssize_t rbd_snap_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
u32 snap_count = le32_to_cpu(ondisk->snap_count);
int ret = -ENOMEM;
+ if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+ return -ENXIO;
+ }
+
init_rwsem(&header->snap_rwsem);
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1356,32 +1356,6 @@ fail:
}
/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
- u64 snapid,
- const char *obj)
-{
- struct ceph_osd_req_op *ops;
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
- if (ret < 0)
- return ret;
-
- ops[0].snap.snapid = snapid;
-
- ret = rbd_req_sync_op(dev, NULL,
- CEPH_NOSNAP,
- 0,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- 1, obj, 0, 0, NULL, NULL, NULL);
-
- rbd_destroy_ops(ops);
-
- return ret;
-}
-
-/*
* Request sync osd read
*/
static int rbd_req_sync_exec(struct rbd_device *dev,
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
goto out_dh;
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -ENXIO) {
+ pr_warning("unrecognized header format"
+ " for image %s", rbd_dev->obj);
+ }
goto out_dh;
+ }
if (snap_count != header->total_snaps) {
snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
&dev_attr_create_snap.attr,
- &dev_attr_rollback_snap.attr,
NULL
};
@@ -2424,64 +2401,6 @@ err_unlock:
return ret;
}
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
- int ret;
- u64 snapid;
- u64 cur_ofs;
- char *seg_name = NULL;
- char *snap_name = kmalloc(count + 1, GFP_KERNEL);
- ret = -ENOMEM;
- if (!snap_name)
- return ret;
-
- /* parse snaps add command */
- snprintf(snap_name, count, "%s", buf);
- seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
- if (!seg_name)
- goto done;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
- if (ret < 0)
- goto done_unlock;
-
- dout("snapid=%lld\n", snapid);
-
- cur_ofs = 0;
- while (cur_ofs < rbd_dev->header.image_size) {
- cur_ofs += rbd_get_segment(&rbd_dev->header,
- rbd_dev->obj,
- cur_ofs, (u64)-1,
- seg_name, NULL);
- dout("seg_name=%s\n", seg_name);
-
- ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
- if (ret < 0)
- pr_warning("could not roll back obj %s err=%d\n",
- seg_name, ret);
- }
-
- ret = __rbd_update_snaps(rbd_dev);
- if (ret < 0)
- goto done_unlock;
-
- ret = count;
-
-done_unlock:
- mutex_unlock(&ctl_mutex);
-done:
- kfree(seg_name);
- kfree(snap_name);
-
- return ret;
-}
-
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167e17a..89ddab127e3 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
* handle GCR disks
*/
+#undef DEBUG
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -36,13 +38,11 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
#define MAX_FLOPPIES 2
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
enum swim_state {
idle,
locating,
@@ -177,7 +177,6 @@ struct swim3 {
struct floppy_state {
enum swim_state state;
- spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+ int index;
+ struct request *cur_req;
};
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
0, 0, 0, 0, 0, 0
};
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
{
- if (__blk_end_request(fd_req, err, nr_bytes))
- return true;
+ struct request *req = fs->cur_req;
+ int rc;
- fd_req = NULL;
- return false;
-}
+ swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
+ err, nr_bytes, req);
-static bool swim3_end_request_cur(int err)
-{
- return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+ if (err)
+ nr_bytes = blk_rq_cur_bytes(req);
+ rc = __blk_end_request(req, err, nr_bytes);
+ if (rc)
+ return true;
+ fs->cur_req = NULL;
+ return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void do_fd_request(struct request_queue * q)
-{
- int i;
-
- for(i=0; i<floppy_count; i++) {
- struct floppy_state *fs = &floppy_states[i];
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD)
- continue;
- start_request(fs);
- }
-}
-
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
+ swim3_dbg("start request, initial state=%d\n", fs->state);
+
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
- if (!fd_req) {
- fd_req = blk_fetch_request(swim3_queue);
- if (!fd_req)
+ swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+ if (!fs->cur_req) {
+ fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+ swim3_dbg(" fetched request %p\n", fs->cur_req);
+ if (!fs->cur_req)
break;
}
- req = fd_req;
-#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
- printk(" errors=%d current_nr_sectors=%u\n",
- req->errors, blk_rq_cur_sectors(req));
+ req = fs->cur_req;
+
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, -ENODEV, 0);
+ continue;
+ }
+
+#if 0 /* This is really too verbose */
+ swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+ req->rq_disk->disk_name, req->cmd,
+ (long)blk_rq_pos(req), blk_rq_sectors(req),
+ req->buffer);
+ swim3_dbg(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
+ (long)blk_rq_pos(req), (long)fs->total_secs);
+ swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->ejected) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " try to write, disk write protected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
}
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
- fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
}
}
+static void do_fd_request(struct request_queue * q)
+{
+ start_request(q->queuedata);
+}
+
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
- unsigned long flags;
-
- spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
- spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
+ struct request *req = fs->cur_req;
- if (blk_rq_cur_sectors(fd_req) <= 0) {
- printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+ if (blk_rq_cur_sectors(req) <= 0) {
+ swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > blk_rq_cur_sectors(fd_req))
- n = blk_rq_cur_sectors(fd_req);
+ if (n > blk_rq_cur_sectors(req))
+ n = blk_rq_cur_sectors(req);
}
+
+ swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
+ fs->req_sector, fs->secpertrack, fs->head, n);
+
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
- if (rq_data_dir(fd_req) == WRITE) {
+ if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
- init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+ init_dma(cp, OUTPUT_MORE, req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
- init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+ init_dma(cp, INPUT_LAST, req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
static void act(struct floppy_state *fs)
{
for (;;) {
+ swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+ fs->state, fs->req_cyl, fs->cur_cyl);
+
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
+ swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
break;
}
if (fs->req_cyl == fs->cur_cyl) {
- printk("whoops, seeking 0\n");
+ swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+ fs->req_cyl, fs->cur_cyl);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
return;
}
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
return;
default:
- printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+ swim3_err("Unknown state %d\n", fs->state);
return;
}
}
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* scan timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* seek timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- printk(KERN_ERR "swim3: seek timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* settle timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
- return;
+ goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
- return;
+ goto unlock;
}
- printk(KERN_ERR "swim3: seek settle timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek settle timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ unlock:
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
+ unsigned long flags;
int n;
+ swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"),
- (long)blk_rq_pos(fd_req));
- swim3_end_request_cur(-EIO);
+ swim3_err("Timeout %sing sector %ld\n",
+ (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fs->cur_req));
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
+ unsigned long flags;
+ struct request *req = fs->cur_req;
+
+ swim3_dbg("* interrupt, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
- printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
+ swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
- printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+ swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
- printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
- fs->expect_cyl, fs->cur_cyl);
+ swim3_err("Expected cyl %d, got %d\n",
+ fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- blk_update_request(fd_req, 0, n << 9);
+ blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
- printk("swim3: error %sing block %ld (err=%x)\n",
- rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)blk_rq_pos(fd_req), err);
- swim3_end_request_cur(-EIO);
+ swim3_err("Error %sing block %ld (err=%x)\n",
+ rq_data_dir(req) == WRITE? "writ": "read",
+ (long)blk_rq_pos(req), err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
- printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
- printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
- swim3_end_request_cur(-EIO);
+ swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+ swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
break;
}
- if (swim3_end_request(0, fs->scount << 9)) {
+ fs->retries = 0;
+ if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
start_request(fs);
break;
default:
- printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+ swim3_err("Don't know what to do in state %d\n", fs->state);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
}
*/
+/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
- if (fs->state != idle) {
+ swim3_dbg("%s", "-> grab drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
+ if (fs->state != idle && fs->state != available) {
++fs->wanted;
while (fs->state != available) {
+ spin_unlock_irqrestore(&swim3_lock, flags);
if (interruptible && signal_pending(current)) {
--fs->wanted;
- spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
+ spin_lock_irqsave(&swim3_lock, flags);
}
--fs->wanted;
}
fs->state = state;
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
+
return 0;
}
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
+ swim3_dbg("%s", "-> release drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
+
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ struct floppy_state *fs = macio_get_drvdata(mdev);
+ struct swim3 __iomem *sw = fs->swim3;
+
+ if (!fs)
+ return;
+ if (mb_state != MB_FD)
+ return;
+
+ /* Clear state */
+ out_8(&sw->intr_enable, 0);
+ in_8(&sw->intr);
+ in_8(&sw->error);
+}
+
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
+ /* Do this first for message macros */
+ memset(fs, 0, sizeof(*fs));
+ fs->mdev = mdev;
+ fs->index = index;
+
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
- printk(KERN_WARNING "ifd%d: no address for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
- if (macio_irq_count(mdev) < 2) {
- printk(KERN_WARNING "fd%d: no intrs for device %s\n",
- index, swim->full_name);
+ if (macio_irq_count(mdev) < 1) {
+ swim3_err("%s", "No interrupt in device-tree\n");
+ return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
- printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
- printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
- memset(fs, 0, sizeof(*fs));
- spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
- printk("fd%d: couldn't map registers for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
- printk("fd%d: couldn't map DMA for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
- fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+ if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+ swim3_mb_event(mdev, MB_FD);
+
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
- printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
- index, fs->swim3_intr, swim->full_name);
+ swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
-/*
- if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
- printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
- fs->dma_intr);
- return -EBUSY;
- }
-*/
init_timer(&fs->timeout);
- printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+ swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
- int i, rc;
struct gendisk *disk;
+ int index, rc;
+
+ index = floppy_count++;
+ if (index >= MAX_FLOPPIES)
+ return -ENXIO;
/* Add the drive */
- rc = swim3_add_device(mdev, floppy_count);
+ rc = swim3_add_device(mdev, index);
if (rc)
return rc;
+ /* Now register that disk. Same comment about failure handling */
+ disk = disks[index] = alloc_disk(1);
+ if (disk == NULL)
+ return -ENOMEM;
+ disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+ if (disk->queue == NULL) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ disk->queue->queuedata = &floppy_states[index];
- /* Now create the queue if not there yet */
- if (swim3_queue == NULL) {
+ if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
- swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (swim3_queue == NULL) {
- unregister_blkdev(FLOPPY_MAJOR, "fd");
- return 0;
- }
}
- /* Now register that disk. Same comment about failure handling */
- i = floppy_count++;
- disk = disks[i] = alloc_disk(1);
- if (disk == NULL)
- return 0;
-
disk->major = FLOPPY_MAJOR;
- disk->first_minor = i;
+ disk->first_minor = index;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[i];
- disk->queue = swim3_queue;
+ disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", i);
+ sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
.of_match_table = swim3_match,
},
.probe = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = swim3_mb_event,
+#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index c2917ffad2c..34767a6d7f4 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -139,6 +139,8 @@
#define IPMI_WDOG_SET_TIMER 0x24
#define IPMI_WDOG_GET_TIMER 0x25
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
+
/* These are here until the real ones get into the watchdog.h interface. */
#ifndef WDIOC_GETTIMEOUT
#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
if (ipmi_ignore_heartbeat)
return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
mutex_lock(&heartbeat_lock);
+restart:
atomic_set(&heartbeat_tofree, 2);
/*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
/* Wait for the heartbeat to be sent. */
wait_for_completion(&heartbeat_wait);
- if (heartbeat_recv_msg.msg.data[0] != 0) {
+ if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ printk(KERN_ERR PFX ": Unable to restore the IPMI"
+ " watchdog's settings, giving up.\n");
+ rv = -EIO;
+ goto out_unlock;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex.
+ */
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ printk(KERN_ERR PFX ": Unable to send the command to"
+ " set the watchdog's settings, giving up.\n");
+ goto out_unlock;
+ }
+
+ /* We might need a new heartbeat, so do it now */
+ goto restart;
+ } else if (heartbeat_recv_msg.msg.data[0] != 0) {
/*
* Got an error in the heartbeat response. It was already
* reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
+out_unlock:
mutex_unlock(&heartbeat_lock);
return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
- if (msg->msg.data[0] != 0) {
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ printk(KERN_INFO PFX "response: The IPMI controller appears"
+ " to have been reset, will attempt to reinitialize"
+ " the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
- }
ipmi_free_recv_msg(msg);
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 63e19ba56bb..6035ab8d5ef 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -941,7 +941,7 @@ void get_random_bytes(void *buf, int nbytes)
if (!arch_get_random_long(&v))
break;
- memcpy(buf, &v, chunk);
+ memcpy(p, &v, chunk);
p += chunk;
nbytes -= chunk;
}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5c6f56f2144..dcd8babae9e 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block)
else
op.config |= CFG_MID_FRAG;
- writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
- writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
- writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
- writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
- writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ if (first_block) {
+ writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
+ writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
+ writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
+ writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
+ writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
+ }
}
memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 643b055ed3c..8f049103708 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,36 +1,29 @@
-config ARCH_HAS_DEVFREQ
- bool
- depends on ARCH_HAS_OPP
- help
- Denotes that the architecture supports DEVFREQ. If the architecture
- supports multiple OPP entries per device and the frequency of the
- devices with OPPs may be altered dynamically, the architecture
- supports DEVFREQ.
-
menuconfig PM_DEVFREQ
bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
- depends on PM_OPP && ARCH_HAS_DEVFREQ
help
- With OPP support, a device may have a list of frequencies and
- voltages available. DEVFREQ, a generic DVFS framework can be
- registered for a device with OPP support in order to let the
- governor provided to DEVFREQ choose an operating frequency
- based on the OPP's list and the policy given with DEVFREQ.
+ A device may have a list of frequencies and voltages available.
+ devfreq, a generic DVFS framework can be registered for a device
+ in order to let the governor provided to devfreq choose an
+ operating frequency based on the device driver's policy.
- Each device may have its own governor and policy. DEVFREQ can
+ Each device may have its own governor and policy. Devfreq can
reevaluate the device state periodically and/or based on the
- OPP list changes (each frequency/voltage pair in OPP may be
- disabled or enabled).
+ notification to "nb", a notifier block, of devfreq.
- Like some CPUs with CPUFREQ, a device may have multiple clocks.
+ Like some CPUs with CPUfreq, a device may have multiple clocks.
However, because the clock frequencies of a single device are
- determined by the single device's state, an instance of DEVFREQ
+ determined by the single device's state, an instance of devfreq
is attached to a single device and returns a "representative"
- clock frequency from the OPP of the device, which is also attached
- to a device by 1-to-1. The device registering DEVFREQ takes the
- responsiblity to "interpret" the frequency listed in OPP and
+ clock frequency of the device, which is also attached
+ to a device by 1-to-1. The device registering devfreq takes the
+ responsiblity to "interpret" the representative frequency and
to set its every clock accordingly with the "target" callback
- given to DEVFREQ.
+ given to devfreq.
+
+ When OPP is used with the devfreq device, it is recommended to
+ register devfreq's nb to the OPP's notifier head. If OPP is
+ used with the devfreq device, you may use OPP helper
+ functions defined in devfreq.h.
if PM_DEVFREQ
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 5d15b812377..59d24e9cb8c 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -15,7 +15,9 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/opp.h>
#include <linux/devfreq.h>
#include <linux/workqueue.h>
@@ -416,10 +418,14 @@ out:
*/
int devfreq_remove_device(struct devfreq *devfreq)
{
+ bool central_polling;
+
if (!devfreq)
return -EINVAL;
- if (!devfreq->governor->no_central_polling) {
+ central_polling = !devfreq->governor->no_central_polling;
+
+ if (central_polling) {
mutex_lock(&devfreq_list_lock);
while (wait_remove_device == devfreq) {
mutex_unlock(&devfreq_list_lock);
@@ -431,7 +437,7 @@ int devfreq_remove_device(struct devfreq *devfreq)
mutex_lock(&devfreq->lock);
_remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
- if (!devfreq->governor->no_central_polling)
+ if (central_polling)
mutex_unlock(&devfreq_list_lock);
return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ab8f469f5cf..5a99bb3f255 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on ARCH_MX3
+ depends on SOC_IMX31 || SOC_IMX35
select DMA_ENGINE
default y
help
@@ -216,7 +216,7 @@ config PCH_DMA
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 8af8e864a9c..73464a62adf 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
{ .compatible = "fsl,p1020-memory-controller", },
{ .compatible = "fsl,p1021-memory-controller", },
{ .compatible = "fsl,p2020-memory-controller", },
- { .compatible = "fsl,p4080-memory-controller", },
+ { .compatible = "fsl,qoriq-memory-controller", },
{},
};
MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index bcb1126e3d0..153980be4ee 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -585,14 +585,12 @@ int dmi_name_in_serial(const char *str)
}
/**
- * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information.
+ * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
* @str: Case sensitive Name
*/
int dmi_name_in_vendors(const char *str)
{
- static int fields[] = { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_SYS_VENDOR,
- DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_BOARD_VENDOR,
- DMI_BOARD_NAME, DMI_BOARD_VERSION, DMI_NONE };
+ static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
int i;
for (i = 0; fields[i] != DMI_NONE; i++) {
int f = fields[i];
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87f..b0a81173a26 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *timespec, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
timespec->tv_nsec = 0;
get_var_data_locked(efivars, &efivars->walk_entry->var);
size = efivars->walk_entry->var.DataSize;
- memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ *buf = kmalloc(size, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, efivars->walk_entry->var.Data,
+ size);
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
return -1;
}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb10790..2cce44a1d7d 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
ibft_cleanup();
}
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
{
int rc = 0;
+ /*
+ As on UEFI systems the setup_arch()/find_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (!ibft_addr)
+ acpi_find_ibft_region();
+
if (ibft_addr) {
- printk(KERN_INFO "iBFT detected at 0x%llx.\n",
- (u64)isa_virt_to_bus(ibft_addr));
+ pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe723266fd..4da4eb9ae92 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
static const struct {
char *sign;
} ibft_signs[] = {
-#ifdef CONFIG_ACPI
- /*
- * One spec says "IBFT", the other says "iBFT". We have to check
- * for both.
- */
- { ACPI_SIG_IBFT },
-#endif
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
@@ -62,14 +55,6 @@ static const struct {
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
- ibft_addr = (struct acpi_table_ibft *)header;
- return 0;
-}
-#endif /* CONFIG_ACPI */
-
static int __init find_ibft_in_mem(void)
{
unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_addr = (struct acpi_table_ibft *)virt;
+ pr_info("iBFT found at 0x%lx.\n", pos);
goto done;
}
}
@@ -108,20 +94,12 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
-#ifdef CONFIG_ACPI
- int i;
-#endif
ibft_addr = NULL;
-#ifdef CONFIG_ACPI
- for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
- acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!ibft_addr && !efi_enabled)
+ if (!efi_enabled)
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
index f10fc521951..1eedb6f7fda 100644
--- a/drivers/firmware/sigma.c
+++ b/drivers/firmware/sigma.c
@@ -14,13 +14,34 @@
#include <linux/module.h>
#include <linux/sigma.h>
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+ size_t payload = 0;
+
+ switch (sa->instr) {
+ case SIGMA_ACTION_WRITEXBYTES:
+ case SIGMA_ACTION_WRITESINGLE:
+ case SIGMA_ACTION_WRITESAFELOAD:
+ payload = sigma_action_len(sa);
+ break;
+ default:
+ break;
+ }
+
+ payload = ALIGN(payload, 2);
+
+ return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
{
- struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
size_t len = sigma_action_len(sa);
- int ret = 0;
+ int ret;
pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
case SIGMA_ACTION_WRITEXBYTES:
case SIGMA_ACTION_WRITESINGLE:
case SIGMA_ACTION_WRITESAFELOAD:
- if (ssfw->fw->size < ssfw->pos + len)
- return -EINVAL;
ret = i2c_master_send(client, (void *)&sa->addr, len);
if (ret < 0)
return -EINVAL;
break;
-
case SIGMA_ACTION_DELAY:
- ret = 0;
udelay(len);
len = 0;
break;
-
case SIGMA_ACTION_END:
- return 1;
-
+ return 0;
default:
return -EINVAL;
}
- /* when arrive here ret=0 or sent data */
- ssfw->pos += sigma_action_size(sa, len);
- return ssfw->pos == ssfw->fw->size;
+ return 1;
}
static int
process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
{
- pr_debug("%s: processing %p\n", __func__, ssfw);
+ struct sigma_action *sa;
+ size_t size;
+ int ret;
+
+ while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+ sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+ size = sigma_action_size(sa);
+ ssfw->pos += size;
+ if (ssfw->pos > ssfw->fw->size || size == 0)
+ break;
+
+ ret = process_sigma_action(client, sa);
- while (1) {
- int ret = process_sigma_action(client, ssfw);
pr_debug("%s: action returned %i\n", __func__, ret);
- if (ret == 1)
- return 0;
- else if (ret)
+
+ if (ret <= 0)
return ret;
}
+
+ if (ssfw->pos != ssfw->fw->size)
+ return -EINVAL;
+
+ return 0;
}
int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
/* then verify the header */
ret = -EINVAL;
- if (fw->size < sizeof(*ssfw_head))
+
+ /*
+ * Reject too small or unreasonable large files. The upper limit has been
+ * chosen a bit arbitrarily, but it should be enough for all practical
+ * purposes and having the limit makes it easier to avoid integer
+ * overflows later in the loading process.
+ */
+ if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
goto done;
ssfw_head = (void *)fw->data;
if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
goto done;
- crc = crc32(0, fw->data, fw->size);
+ crc = crc32(0, fw->data + sizeof(*ssfw_head),
+ fw->size - sizeof(*ssfw_head));
pr_debug("%s: crc=%x\n", __func__, crc);
- if (crc != ssfw_head->crc)
+ if (crc != le32_to_cpu(ssfw_head->crc))
goto done;
ssfw.pos = sizeof(*ssfw_head);
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index dbcb0bcfd8d..4e018d6a763 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb8b13..f8ce29ef9f8 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
#define DA9052_INPUT 1
#define DA9052_OUTPUT_OPENDRAIN 2
@@ -43,6 +42,9 @@
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
#define DA9052_GPIO_NIBBLE_SHIFT 4
+#define DA9052_IRQ_GPI0 16
+#define DA9052_GPIO_ODD_SHIFT 7
+#define DA9052_GPIO_EVEN_SHIFT 3
struct da9052_gpio {
struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct da9052_gpio *gpio = to_da9052_gpio(gc);
- unsigned char register_value = 0;
int ret;
if (da9052_gpio_port_odd(offset)) {
- if (value) {
- register_value = DA9052_GPIO_ODD_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_ODD_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_ODD_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio odd reg,%d",
ret);
- }
} else {
- if (value) {
- register_value = DA9052_GPIO_EVEN_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_EVEN_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_EVEN_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio even reg,%d",
ret);
- }
}
}
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
- .can_sleep = 1;
- .ngpio = 16;
- .base = -1;
+ .can_sleep = 1,
+ .ngpio = 16,
+ .base = -1,
};
static int __devinit da9052_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e7386925..461958fc226 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
&chip->reg->regs[chip->ch].imask);
}
+static void ioh_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien &= ~(1 << (d->irq - chip->irq_base));
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien |= 1 << (d->irq - chip->irq_base);
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
{
struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
int i, j;
int ret = IRQ_NONE;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++, chip++) {
reg_val = ioread32(&chip->reg->regs[i].istatus);
for (j = 0; j < num_ports[i]; j++) {
if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_mask = ioh_irq_mask;
ct->chip.irq_unmask = ioh_irq_unmask;
ct->chip.irq_set_type = ioh_irq_type;
+ ct->chip.irq_disable = ioh_irq_disable;
+ ct->chip.irq_enable = ioh_irq_enable;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0a7e1..5cd04b65c55 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return 0;
}
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ /* GPIO 28..31 are input only on MPC5121 */
+ if (gpio >= 28)
+ return -EINVAL;
+
+ return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
- gc->direction_output = mpc8xxx_gpio_dir_out;
- if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
- gc->get = mpc8572_gpio_get;
- else
- gc->get = mpc8xxx_gpio_get;
+ gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+ mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+ gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+ mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->to_irq = mpc8xxx_gpio_to_irq;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 147df8ae79d..d3f3e8f5456 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
* Translate OpenFirmware node properties into platform_data
* WARNING: This is DEPRECATED and will be removed eventually!
*/
-void
+static void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
struct device_node *node;
@@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
*invert = *val;
}
#else
-void
+static void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
*gpio_base = -1;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90bd3c1..4102f63230f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
int ret, irq, i;
static DECLARE_BITMAP(init_irq, NR_IRQS);
- pdata = dev->dev.platform_data;
- if (pdata == NULL)
- return -ENODEV;
-
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 405c63b9d53..8323fc38984 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
if (num_clips && clips_ptr) {
+ if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3969f7553fe..d2619d72cec 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -456,6 +456,30 @@ done:
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
@@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- set->mode = NULL;
- set->num_connectors = 0;
+ return drm_crtc_helper_disable(set->crtc);
}
dev = set->crtc->dev;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 68b756253f9..44a5d0ad8b7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
/* Prevent vblank irq processing while disabling vblank irqs,
* so no updates of timestamps or count can happen after we've
* disabled. Needed to prevent races in case of delayed irq's.
- * Disable preemption, so vblank_time_lock is held as short as
- * possible, even under a kernel with PREEMPT_RT patches.
*/
- preempt_disable();
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
dev->driver->disable_vblank(dev, crtc);
@@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
clear_vblank_timestamps(dev, crtc);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
- preempt_enable();
}
static void vblank_disable_fn(unsigned long arg)
@@ -889,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
- /* Disable preemption while holding vblank_time_lock. Do
- * it explicitely to guard against PREEMPT_RT kernel.
- */
- preempt_disable();
spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
if (!dev->vblank_enabled[crtc]) {
/* Enable vblank irqs under vblank_time_lock protection.
@@ -912,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
}
}
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
- preempt_enable();
} else {
if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc..2bb07bca511 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,84 @@
#include "drm.h"
#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
-static DEFINE_MUTEX(exynos_drm_buf_lock);
-
static int lowlevel_buffer_allocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
- (dma_addr_t *)&entry->paddr, GFP_KERNEL);
- if (!entry->paddr) {
+ buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+ &buffer->dma_addr, GFP_KERNEL);
+ if (!buffer->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}
- DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
- (unsigned int)entry->vaddr, entry->paddr, entry->size);
+ DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr,
+ buffer->size);
return 0;
}
static void lowlevel_buffer_deallocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (entry->paddr && entry->vaddr && entry->size)
- dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
- entry->paddr);
+ if (buffer->dma_addr && buffer->size)
+ dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+ (dma_addr_t)buffer->dma_addr);
else
- DRM_DEBUG_KMS("entry data is null.\n");
+ DRM_DEBUG_KMS("buffer data are invalid.\n");
}
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s.\n", __FILE__);
+ DRM_DEBUG_KMS("desired size = 0x%x\n", size);
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
return ERR_PTR(-ENOMEM);
}
- entry->size = size;
+ buffer->size = size;
/*
* allocate memory region with size and set the memory information
- * to vaddr and paddr of a entry object.
+ * to vaddr and dma_addr of a buffer object.
*/
- if (lowlevel_buffer_allocate(dev, entry) < 0) {
- kfree(entry);
- entry = NULL;
+ if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+ kfree(buffer);
+ buffer = NULL;
return ERR_PTR(-ENOMEM);
}
- return entry;
+ return buffer;
}
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (!entry) {
- DRM_DEBUG_KMS("entry is null.\n");
+ if (!buffer) {
+ DRM_DEBUG_KMS("buffer is null.\n");
return;
}
- lowlevel_buffer_deallocate(dev, entry);
+ lowlevel_buffer_deallocate(dev, buffer);
- kfree(entry);
- entry = NULL;
+ kfree(buffer);
+ buffer = NULL;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01..6e91f9caa5d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,15 @@
#ifndef _EXYNOS_DRM_BUF_H_
#define _EXYNOS_DRM_BUF_H_
-/*
- * exynos drm buffer entry structure.
- *
- * @paddr: physical address of allocated memory.
- * @vaddr: kernel virtual address of allocated memory.
- * @size: size of allocated memory.
- */
-struct exynos_drm_buf_entry {
- dma_addr_t paddr;
- void __iomem *vaddr;
- unsigned int size;
-};
-
/* allocate physical memory. */
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size);
-/* get physical memory information of a drm framebuffer. */
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
+/* get memory information of a drm framebuffer. */
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
/* remove allocated physical memory. */
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry);
+ struct exynos_drm_gem_buf *buffer);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e76872..d620b078425 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
struct exynos_drm_connector {
struct drm_connector drm_connector;
+ uint32_t encoder_id;
+ struct exynos_drm_manager *manager;
};
/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
DRM_DEBUG_KMS("%s\n", __FILE__);
mode->clock = timing->pixclock / 1000;
+ mode->vrefresh = timing->refresh;
mode->hdisplay = timing->xres;
mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
mode->vsync_start = mode->vdisplay + timing->upper_margin;
mode->vsync_end = mode->vsync_start + timing->vsync_len;
mode->vtotal = mode->vsync_end + timing->lower_margin;
+
+ if (timing->vmode & FB_VMODE_INTERLACED)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ if (timing->vmode & FB_VMODE_DOUBLE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
}
/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
memset(timing, 0, sizeof(*timing));
timing->pixclock = mode->clock * 1000;
- timing->refresh = mode->vrefresh;
+ timing->refresh = drm_mode_vrefresh(mode);
timing->xres = mode->hdisplay;
timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
unsigned int count;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!display) {
- DRM_DEBUG_KMS("display is null.\n");
+ if (!display_ops) {
+ DRM_DEBUG_KMS("display_ops is null.\n");
return 0;
}
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* P.S. in case of lcd panel, count is always 1 if success
* because lcd panel has only one mode.
*/
- if (display->get_edid) {
+ if (display_ops->get_edid) {
int ret;
void *edid;
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- ret = display->get_edid(manager->dev, connector,
+ ret = display_ops->get_edid(manager->dev, connector,
edid, MAX_EDID);
if (ret < 0) {
DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct fb_videomode *timing;
- if (display->get_timing)
- timing = display->get_timing(manager->dev);
+ if (display_ops->get_timing)
+ timing = display_ops->get_timing(manager->dev);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
struct fb_videomode timing;
int ret = MODE_BAD;
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
convert_to_video_timing(&timing, mode);
- if (display && display->check_timing)
- if (!display->check_timing(manager->dev, (void *)&timing))
+ if (display_ops && display_ops->check_timing)
+ if (!display_ops->check_timing(manager->dev, (void *)&timing))
ret = MODE_OK;
return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- return connector->encoder;
+ obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+ exynos_connector->encoder_id);
+ return NULL;
+ }
+
+ encoder = obj_to_encoder(obj);
+
+ return encoder;
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
static enum drm_connector_status
exynos_drm_connector_detect(struct drm_connector *connector, bool force)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (display && display->is_connected) {
- if (display->is_connected(manager->dev))
+ if (display_ops && display_ops->is_connected) {
+ if (display_ops->is_connected(manager->dev))
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
connector = &exynos_connector->drm_connector;
- switch (manager->display->type) {
+ switch (manager->display_ops->type) {
case EXYNOS_DISPLAY_TYPE_HDMI:
type = DRM_MODE_CONNECTOR_HDMIA;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
default:
type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
if (err)
goto err_connector;
+ exynos_connector->encoder_id = encoder->base.id;
+ exynos_connector->manager = manager;
connector->encoder = encoder;
+
err = drm_mode_connector_attach_encoder(connector, encoder);
if (err) {
DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9337e5e2dbb..ee43cc22085 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,17 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- * - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- * - the unit is screen coordinates.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
-};
-
-/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
exynos_drm_fn_encoder(crtc, overlay,
exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
-static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos)
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned int actual_w;
unsigned int actual_h;
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ buffer = exynos_drm_fb_get_buf(fb);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
- overlay->paddr = entry->paddr;
- overlay->vaddr = entry->vaddr;
+ overlay->dma_addr = buffer->dma_addr;
+ overlay->vaddr = buffer->kvaddr;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
(unsigned long)overlay->vaddr,
- (unsigned long)overlay->paddr);
+ (unsigned long)overlay->dma_addr);
actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- /* TODO */
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO */
+ exynos_drm_fn_encoder(crtc, NULL,
+ exynos_drm_encoder_crtc_disable);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* drm framework doesn't check NULL. */
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
static bool
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2..25f72a62cb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ * - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+};
+
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c1..53e2216de61 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_crtc_helper.h"
#include <drm/exynos_drm.h>
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
exynos_drm_mode_config_init(dev);
/*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_fbdev_fini(dev);
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae7..5e02e6ecc2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,6 +29,7 @@
#ifndef _EXYNOS_DRM_DRV_H_
#define _EXYNOS_DRM_DRV_H_
+#include <linux/module.h>
#include "drm.h"
#define MAX_CRTC 2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
* @bpp: pixel size.(in bit)
- * @paddr: bus(accessed by dma) physical memory address to this overlay
- * and this is physically continuous.
+ * @dma_addr: bus(accessed by dma) address to the memory region allocated
+ * for a overlay.
* @vaddr: virtual memory addresss to this overlay.
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
* @check_timing: check if timing is valid or not.
* @power_on: display device on or off.
*/
-struct exynos_drm_display {
+struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
* @mode_set: convert drm_display_mode to hw specific display mode and
* would be called by encoder->mode_set().
* @commit: set current hw specific display mode to hw.
+ * @disable: disable hardware specific display mode.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
*/
struct exynos_drm_manager_ops {
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*commit)(struct device *subdrv_dev);
+ void (*disable)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
};
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
int pipe;
struct exynos_drm_manager_ops *ops;
struct exynos_drm_overlay_ops *overlay_ops;
- struct exynos_drm_display *display;
+ struct exynos_drm_display_ops *display_ops;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67..153061415ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (manager_ops && manager_ops->commit)
+ manager_ops->commit(manager->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO */
+ if (manager_ops && manager_ops->disable)
+ manager_ops->disable(manager->dev);
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ break;
+ }
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
- if (display && display->power_on)
- display->power_on(manager->dev, mode);
+ DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+ connector->base.id, mode);
+ if (display_ops && display_ops->power_on)
+ display_ops->power_on(manager->dev, mode);
}
}
}
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (manager_ops && manager_ops->commit)
manager_ops->commit(manager->dev);
-
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev);
}
static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_manager *manager;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
+ /*
+ * if crtc is detached from encoder, check pipe,
+ * otherwise check crtc attached to encoder
+ */
+ if (!encoder->crtc) {
+ manager = to_exynos_encoder(encoder)->manager;
+ if (manager->pipe < 0 ||
+ private->crtc[manager->pipe] != crtc)
+ continue;
+ } else {
+ if (encoder->crtc != crtc)
+ continue;
+ }
fn(encoder, data);
}
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int crtc = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * when crtc is detached from encoder, this pipe is used
+ * to select manager operation
+ */
+ manager->pipe = crtc;
- overlay_ops->commit(manager->dev);
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev);
}
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
struct exynos_drm_overlay *overlay = data;
- overlay_ops->mode_set(manager->dev, overlay);
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (overlay_ops && overlay_ops->disable)
+ overlay_ops->disable(manager->dev);
+
+ /*
+ * crtc is already detached from encoder and last
+ * function for detaching is properly done, so
+ * clear pipe from manager to prevent repeated call
+ */
+ if (!encoder->crtc)
+ manager->pipe = -1;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a..a22acfbf0e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd524..5bf4a1ac7f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,7 +29,9 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_buf.h"
#include "exynos_drm_gem.h"
@@ -41,14 +43,14 @@
*
* @fb: drm framebuffer obejct.
* @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @entry: pointer to exynos drm buffer entry object.
- * - containing only the information to physically continuous memory
- * region allocated at default framebuffer creation.
+ * @buffer: pointer to exynos_drm_gem_buffer object.
+ * - contain the memory information to memory region allocated
+ * at default framebuffer creation.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
};
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
* default framebuffer has no gem object so
* a buffer of the default framebuffer should be released at here.
*/
- if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
- exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
+ if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
+ exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
kfree(exynos_fb);
exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
*/
if (!mode_cmd->handle) {
if (!file_priv) {
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
/*
* in case that file_priv is NULL, it allocates
* only buffer and this buffer would be used
* for default framebuffer.
*/
- entry = exynos_drm_buf_create(dev, size);
- if (IS_ERR(entry)) {
- ret = PTR_ERR(entry);
+ buffer = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
goto err_buffer;
}
- exynos_fb->entry = entry;
+ exynos_fb->buffer = buffer;
- DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
- (unsigned long)entry->paddr, size);
+ DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
+ (unsigned long)buffer->dma_addr, size);
goto out;
} else {
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
- size,
- &mode_cmd->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+ &mode_cmd->handle,
+ size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
* so that default framebuffer has no its own gem object,
* only its own buffer object.
*/
- exynos_fb->entry = exynos_gem_obj->entry;
+ exynos_fb->buffer = exynos_gem_obj->buffer;
- DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
- (unsigned long)exynos_fb->entry->paddr, size,
+ DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+ (unsigned long)exynos_fb->buffer->dma_addr, size,
(unsigned int)&exynos_gem_obj->base);
out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
return exynos_drm_fb_init(file_priv, dev, mode_cmd);
}
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry = exynos_fb->entry;
- if (!entry)
+ buffer = exynos_fb->buffer;
+ if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
- (unsigned long)entry->vaddr,
- (unsigned long)entry->paddr);
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr);
- return entry;
+ return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_drm_fb_create,
+ .output_poll_changed = exynos_drm_output_poll_changed,
};
void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 1f4b3d1a771..836f4100818 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,6 +33,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
#define MAX_CONNECTOR 4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
- struct drm_framebuffer *fb,
- unsigned int fb_width,
- unsigned int fb_height)
+ struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
struct drm_device *dev = helper->dev;
struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
- struct exynos_drm_buf_entry *entry;
- unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+ struct exynos_drm_gem_buf *buffer;
+ unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
exynos_fb->fb = fb;
drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ buffer = exynos_drm_fb_get_buf(fb);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitch;
- dev->mode_config.fb_base = entry->paddr;
- fbi->screen_base = entry->vaddr + offset;
- fbi->fix.smem_start = entry->paddr + offset;
+ dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+ fbi->screen_base = buffer->kvaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ ret = exynos_drm_fbdev_update(helper, helper->fb);
if (ret < 0)
fb_dealloc_cmap(&fbi->cmap);
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
}
helper->fb = exynos_fbdev->fb;
- return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ return exynos_drm_fbdev_update(helper, helper->fb);
}
static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
fb_helper = private->fb_helper;
if (fb_helper) {
+ struct list_head temp_list;
+
+ INIT_LIST_HEAD(&temp_list);
+
+ /*
+ * fb_helper is reintialized but kernel fb is reused
+ * so kernel_fb_list need to be backuped and restored
+ */
+ if (!list_empty(&fb_helper->kernel_fb_list))
+ list_replace_init(&fb_helper->kernel_fb_list,
+ &temp_list);
+
drm_fb_helper_fini(fb_helper);
ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
return ret;
}
+ if (!list_empty(&temp_list))
+ list_replace(&temp_list, &fb_helper->kernel_fb_list);
+
ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret < 0) {
DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9..db3b3d9e731 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -64,7 +64,7 @@ struct fimd_win_data {
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
return 0;
}
-static struct exynos_drm_display fimd_display = {
+static struct exynos_drm_display_ops fimd_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.is_connected = fimd_display_is_connected,
.get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
writel(val, ctx->regs + VIDCON0);
}
+static void fimd_disable(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct drm_device *drm_dev = subdrv->drm_dev;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* fimd dma off */
+ val = readl(ctx->regs + VIDCON0);
+ val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
+ writel(val, ctx->regs + VIDCON0);
+
+ /*
+ * if vblank is enabled status with dma off then
+ * it disables vsync interrupt.
+ */
+ if (drm_dev->vblank_enabled[manager->pipe] &&
+ atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
+ drm_vblank_put(drm_dev, manager->pipe);
+
+ /*
+ * if vblank_disable_allowed is 0 then disable
+ * vsync interrupt right now else the vsync interrupt
+ * would be disabled by drm timer once a current process
+ * gives up ownershop of vblank event.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, manager->pipe);
+ }
+}
+
static int fimd_enable_vblank(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
static struct exynos_drm_manager_ops fimd_manager_ops = {
.commit = fimd_commit,
+ .disable = fimd_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
};
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->ovl_height = overlay->crtc_height;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
- win_data->paddr = overlay->paddr + offset;
+ win_data->dma_addr = overlay->dma_addr + offset;
win_data->vaddr = overlay->vaddr + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->paddr,
+ (unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
writel(val, ctx->regs + SHADOWCON);
/* buffer start address */
- val = win_data->paddr;
+ val = (unsigned long)win_data->dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
- val = win_data->paddr + size;
+ val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->paddr, val, size);
+ (unsigned long)win_data->dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
static void fimd_win_disable(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- struct fimd_win_data *win_data;
int win = ctx->default_win;
u32 val;
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
if (win < 0 || win > WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
-
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* VSYNC interrupt */
writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ /*
+ * in case that vblank_disable_allowed is 1, it could induce
+ * the problem that manager->pipe could be -1 because with
+ * disable callback, vsync interrupt isn't disabled and at this moment,
+ * vsync interrupt could occur. the vsync interrupt would be disabled
+ * by timer handler later.
+ */
+ if (manager->pipe == -1)
+ return IRQ_HANDLED;
+
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->irq_enabled = 1;
- /*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(drm_vblank_put function was called)
- */
- drm_dev->vblank_disable_allowed = 1;
-
return 0;
}
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
subdrv->manager.pipe = -1;
subdrv->manager.ops = &fimd_manager_ops;
subdrv->manager.overlay_ops = &fimd_overlay_ops;
- subdrv->manager.display = &fimd_display;
+ subdrv->manager.display_ops = &fimd_display_ops;
subdrv->manager.dev = dev;
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906e..aba0fe47f7e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
}
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle)
+static struct exynos_drm_gem_obj
+ *exynos_drm_gem_init(struct drm_device *drm_dev,
+ struct drm_file *file_priv, unsigned int *handle,
+ unsigned int size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
struct drm_gem_object *obj;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- size = roundup(size, PAGE_SIZE);
-
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
DRM_ERROR("failed to allocate exynos gem object.\n");
return ERR_PTR(-ENOMEM);
}
- /* allocate the new buffer object and memory region. */
- entry = exynos_drm_buf_create(dev, size);
- if (!entry) {
- kfree(exynos_gem_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- exynos_gem_obj->entry = entry;
-
obj = &exynos_gem_obj->base;
- ret = drm_gem_object_init(dev, obj, size);
+ ret = drm_gem_object_init(drm_dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initailize gem object.\n");
- goto err_obj_init;
+ DRM_ERROR("failed to initialize gem object.\n");
+ ret = -EINVAL;
+ goto err_object_init;
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
err_create_mmap_offset:
drm_gem_object_release(obj);
-err_obj_init:
- exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
-
+err_object_init:
kfree(exynos_gem_obj);
return ERR_PTR(ret);
}
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int *handle, unsigned long size)
+{
+
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+ struct exynos_drm_gem_buf *buffer;
+
+ size = roundup(size, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+
+ buffer = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(buffer)) {
+ return ERR_CAST(buffer);
+ }
+
+ exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
+ if (IS_ERR(exynos_gem_obj)) {
+ exynos_drm_buf_destroy(dev, buffer);
+ return exynos_gem_obj;
+ }
+
+ exynos_gem_obj->buffer = buffer;
+
+ return exynos_gem_obj;
+}
+
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
- struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
- DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+ &args->handle, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned long pfn, vm_size;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vm_size = vma->vm_end - vma->vm_start;
/*
- * a entry contains information to physically continuous memory
+ * a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
*/
- entry = exynos_gem_obj->entry;
+ buffer = exynos_gem_obj->buffer;
/* check if user-requested size is valid. */
- if (vm_size > entry->size)
+ if (vm_size > buffer->size)
return -EINVAL;
/*
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+ pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
exynos_gem_obj = to_exynos_gem_obj(gem_obj);
- exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
+ exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
kfree(exynos_gem_obj);
}
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * args->bpp >> 3;
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
+ args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+ pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ PAGE_SHIFT) + page_offset;
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277..ef8797334e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
struct exynos_drm_gem_obj, base)
/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @size: size of allocated memory region.
+ */
+struct exynos_drm_gem_buf {
+ void __iomem *kvaddr;
+ dma_addr_t dma_addr;
+ unsigned long size;
+};
+
+/*
* exynos drm buffer structure.
*
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @entry: pointer to exynos drm buffer entry object.
- * - containing the information to physically
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ * - contain the information to memory region allocated
+ * by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
*
@@ -45,13 +61,13 @@
*/
struct exynos_drm_gem_obj {
struct drm_gem_object base;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
};
/* create a new buffer and get a new gem handle. */
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle);
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int *handle, unsigned long size);
/*
* request gem object creation and buffer allocation as the size
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f40f1ce1d8..004b048c519 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
@@ -636,11 +637,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
+ int ret;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
if (ring->size == 0)
return 0;
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
seq_printf(m, "Ring %s:\n", ring->name);
seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@@ -654,6 +660,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -842,7 +850,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 crstanddelay = I915_READ16(CRSTANDVID);
+ u16 crstanddelay;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ crstanddelay = I915_READ16(CRSTANDVID);
+
+ mutex_unlock(&dev->struct_mutex);
seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
@@ -940,7 +957,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 delayfreq;
- int i;
+ int ret, i;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -948,6 +969,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -962,13 +985,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 inttoext;
- int i;
+ int ret, i;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -977,9 +1006,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL);
- u32 rstdbyctl = I915_READ(RSTDBYCTL);
- u16 crstandvid = I915_READ16(CRSTANDVID);
+ u32 rgvmodectl, rstdbyctl;
+ u16 crstandvid;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ rgvmodectl = I915_READ(MEMMODECTL);
+ rstdbyctl = I915_READ(RSTDBYCTL);
+ crstandvid = I915_READ16(CRSTANDVID);
+
+ mutex_unlock(&dev->struct_mutex);
seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
"yes" : "no");
@@ -1167,9 +1206,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93..a9ae374861e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
diff1 = now - dev_priv->last_time1;
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
+ dev_priv->chipset_power = ret;
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9c2cfe45da..a1103fc6597 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,17 +58,17 @@ module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: false)");
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: true)");
+ "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
-unsigned int i915_enable_fbc __read_mostly = -1;
+int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
-unsigned int i915_panel_use_ssc __read_mostly = -1;
+int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
@@ -107,7 +107,7 @@ static struct drm_driver driver;
extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
- .class = PCI_CLASS_DISPLAY_VGA << 8, \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
}
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ POSTING_READ(FORCEWAKE_MT);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+ udelay(10);
+}
+
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
- __gen6_gt_force_wake_get(dev_priv);
+ dev_priv->display.force_wake_get(dev_priv);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ POSTING_READ(FORCEWAKE_MT);
+}
+
/*
* see gen6_gt_force_wake_get()
*/
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
- __gen6_gt_force_wake_put(dev_priv);
+ dev_priv->display.force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06a37f4fd74..554bef7a3b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
+struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
@@ -126,6 +127,9 @@ struct drm_i915_master_private {
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 16
+/* 16 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 5
struct drm_i915_fence_reg {
struct list_head lru_list;
@@ -168,7 +172,7 @@ struct drm_i915_error_state {
u32 instdone1;
u32 seqno;
u64 bbaddr;
- u64 fence[16];
+ u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_object {
int page_count;
@@ -182,7 +186,7 @@ struct drm_i915_error_state {
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
- s32 fence_reg:5;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
s32 pinned:2;
u32 tiling:2;
u32 dirty:1;
@@ -218,6 +222,8 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -375,7 +381,7 @@ typedef struct drm_i915_private {
struct notifier_block lid_notifier;
int crt_ddc_pin;
- struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -506,7 +512,7 @@ typedef struct drm_i915_private {
u8 saveAR[21];
u8 saveDACMASK;
u8 saveCR[37];
- uint64_t saveFENCE[16];
+ uint64_t saveFENCE[I915_MAX_NUM_FENCES];
u32 saveCURACNTR;
u32 saveCURAPOS;
u32 saveCURABASE;
@@ -707,6 +713,7 @@ typedef struct drm_i915_private {
u64 last_count1;
unsigned long last_time1;
+ unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
@@ -777,10 +784,8 @@ struct drm_i915_gem_object {
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
- *
- * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
*/
- signed int fence_reg:5;
+ signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
/**
* Advice: are the backing pages purgeable?
@@ -997,12 +1002,12 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
-extern unsigned int i915_panel_use_ssc __read_mostly;
+extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
-extern unsigned int i915_enable_fbc __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
+extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@@ -1307,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1351,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d18b07adcff..8359dc77704 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct drm_i915_gem_object *obj = reg->obj;
@@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* so emit a request to do so.
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request)
+ if (request) {
ret = i915_add_request(obj->ring, NULL, request);
- else
+ if (ret)
+ kfree(request);
+ } else
ret = -ENOMEM;
}
@@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
/* On Gen6, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
- for (i = 0; i < 16; i++)
+ for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f..b9da8900ae4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+ /* Disable semaphores on SNB */
+ if (INTEL_INFO(dev)->gen == 6)
+ return 0;
+
+ return 1;
+}
+
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9ee2729fe5c..b40004b5597 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5a09416e611..a26d5b0a369 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,12 +1553,21 @@
*/
#define PP_READY (1 << 30)
#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_ON (1 << 28)
-#define PP_SEQUENCE_OFF (2 << 28)
-#define PP_SEQUENCE_MASK 0x30000000
+#define PP_SEQUENCE_POWER_UP (1 << 28)
+#define PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define PP_SEQUENCE_MASK (3 << 28)
+#define PP_SEQUENCE_SHIFT 28
#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
-#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
#define PP_SEQUENCE_STATE_MASK 0x0000000f
+#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
+#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
+#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
+#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
+#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
+#define PP_SEQUENCE_STATE_RESET (0xf << 0)
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
@@ -3294,10 +3303,10 @@
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
+#define TRANSCODER_CPT(pipe) ((pipe) << 29)
+#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3438,12 +3447,38 @@
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define GEN6_UCGCTL2 0x9404
+# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index f8f602d7665..7886e4fb60e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Fences */
switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 981b1f1c04d..daa5743ccbd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
-
#include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -4669,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
+ * @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
@@ -4680,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- unsigned int *pipe_bpp)
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4711,7 +4715,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
lvds_bpc = 6;
if (lvds_bpc < display_bpc) {
- DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
display_bpc = lvds_bpc;
}
continue;
@@ -4722,7 +4726,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
if (edp_bpc < display_bpc) {
- DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
continue;
@@ -4737,7 +4741,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
/* Don't use an invalid EDID bpc value */
if (connector->display_info.bpc &&
connector->display_info.bpc < display_bpc) {
- DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
display_bpc = connector->display_info.bpc;
}
}
@@ -4748,15 +4752,20 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
*/
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
if (display_bpc > 8 && display_bpc < 12) {
- DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
+ DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
display_bpc = 12;
} else {
- DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
+ DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
display_bpc = 8;
}
}
}
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
@@ -4789,8 +4798,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
display_bpc = min(display_bpc, bpc);
- DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
- bpc, display_bpc);
+ DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
+ bpc, display_bpc);
*pipe_bpp = display_bpc * 3;
@@ -5018,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5479,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
@@ -5671,7 +5690,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
- pipeconf |= PIPECONF_DITHER_TYPE_ST1;
+ pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -7188,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
+ drm_vblank_put(dev, intel_crtc->pipe);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -7211,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
- ret = drm_vblank_get(dev, intel_crtc->pipe);
- if (ret)
- goto cleanup_objs;
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
@@ -7237,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7246,6 +7265,8 @@ cleanup_objs:
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
kfree(work);
return ret;
@@ -7886,6 +7907,31 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /*
+ * Disable rc6 on Sandybridge
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+ return 0;
+ }
+ DRM_DEBUG_DRIVER("RC6 enabled\n");
+ return 1;
+}
+
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7922,7 +7968,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (i915_enable_rc6)
+ if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
@@ -8148,6 +8194,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
@@ -8357,7 +8417,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
- if (!i915_enable_rc6)
+ if (!intel_enable_rc6(dev))
return;
mutex_lock(&dev->struct_mutex);
@@ -8476,6 +8536,28 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09b318b0227..92b041b66e4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,7 +59,6 @@ struct intel_dp {
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
- uint8_t link_status[DP_LINK_STATUS_SIZE];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
@@ -68,7 +67,6 @@ struct intel_dp {
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
- unsigned long panel_off_jiffies;
};
/**
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
- int max_lane_count = 4;
-
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
- max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
- switch (max_lane_count) {
- case 1: case 2: case 4:
- break;
- default:
- max_lane_count = 4;
- }
+ int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
}
return max_lane_count;
}
@@ -214,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
- if (intel_crtc)
+ if (check_bpp)
+ bpp = check_bpp;
+ else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
@@ -239,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -248,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- if (intel_dp_link_required(intel_dp, mode->clock)
- > intel_dp_max_data_rate(max_link_clock, max_lanes))
- return MODE_CLOCK_HIGH;
+ mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(intel_dp,
+ mode->clock, 18);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+ else
+ mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+ }
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -368,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev))
- aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
@@ -678,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -695,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock)
+ if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -768,12 +774,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
continue;
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_dp->base.type == INTEL_OUTPUT_EDP)
+ {
lane_count = intel_dp->lane_count;
break;
- } else if (is_edp(intel_dp)) {
- lane_count = dev_priv->edp.lanes;
- break;
}
}
@@ -810,6 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -822,18 +828,32 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
ironlake_edp_pll_off(encoder);
}
- intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- intel_dp->DP |= intel_dp->color_range;
+ /*
+ * There are four kinds of DP registers:
+ *
+ * IBX PCH
+ * SNB CPU
+ * IVB CPU
+ * CPT PCH
+ *
+ * IBX PCH and CPU are the same for almost everything,
+ * except that the CPU DP PLL is configured in this
+ * register
+ *
+ * CPT PCH is quite different, having many bits moved
+ * to the TRANS_DP_CTL register instead. That
+ * configuration happens (oddly) in ironlake_pch_enable
+ */
+
+ /* Preserve the BIOS-computed detected bit. This is
+ * supposed to be read-only.
+ */
+ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- intel_dp->DP |= DP_SYNC_HS_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- intel_dp->DP |= DP_SYNC_VS_HIGH;
+ /* Handle DP bits in common between all three register formats */
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- else
- intel_dp->DP |= DP_LINK_TRAIN_OFF;
+ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
switch (intel_dp->lane_count) {
case 1:
@@ -852,59 +872,124 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
-
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
-
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- intel_dp->DP |= DP_ENHANCED_FRAMING;
}
- /* CPT DP's pipe select is decided in TRANS_DP_CTL */
- if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
- intel_dp->DP |= DP_PIPEB_SELECT;
+ /* Split out the IBX/CPU vs CPT settings */
+
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
- if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ intel_dp->DP |= intel_dp->color_range;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ if (intel_crtc->pipe == 1)
+ intel_dp->DP |= DP_PIPEB_SELECT;
+
+ if (is_cpu_edp(intel_dp)) {
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ }
+ } else {
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
}
-static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
+#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+ u32 mask,
+ u32 value)
{
- unsigned long off_time;
- unsigned long delay;
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- DRM_DEBUG_KMS("Wait for panel power off time\n");
+ DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
- if (ironlake_edp_have_panel_power(intel_dp) ||
- ironlake_edp_have_panel_vdd(intel_dp))
- {
- DRM_DEBUG_KMS("Panel still on, no delay needed\n");
- return;
+ if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
+ DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+ I915_READ(PCH_PP_STATUS),
+ I915_READ(PCH_PP_CONTROL));
}
+}
- off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay);
- if (time_after(jiffies, off_time)) {
- DRM_DEBUG_KMS("Time already passed");
- return;
- }
- delay = jiffies_to_msecs(off_time - jiffies);
- if (delay > intel_dp->panel_power_down_delay)
- delay = intel_dp->panel_power_down_delay;
- DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay);
- msleep(delay);
+static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power on\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power off time\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+ DRM_DEBUG_KMS("Wait for panel power cycle\n");
+ ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
+{
+ u32 control = I915_READ(PCH_PP_CONTROL);
+
+ control &= ~PANEL_UNLOCK_MASK;
+ control |= PANEL_UNLOCK_REGS;
+ return control;
}
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@@ -921,15 +1006,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
"eDP VDD already requested on\n");
intel_dp->want_panel_vdd = true;
+
if (ironlake_edp_have_panel_vdd(intel_dp)) {
DRM_DEBUG_KMS("eDP VDD already on\n");
return;
}
- ironlake_wait_panel_off(intel_dp);
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ if (!ironlake_edp_have_panel_power(intel_dp))
+ ironlake_wait_panel_power_cycle(intel_dp);
+
+ pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -952,9 +1038,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
u32 pp;
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -962,7 +1046,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
/* Make sure sequencer is idle before allowing subsequent activity */
DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
- intel_dp->panel_off_jiffies = jiffies;
+
+ msleep(intel_dp->panel_power_down_delay);
}
}
@@ -972,9 +1057,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp->base.base.dev;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
}
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -984,7 +1069,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
-
+
intel_dp->want_panel_vdd = false;
if (sync) {
@@ -1000,23 +1085,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
}
}
-/* Returns true if the panel was already on when called */
static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
+ u32 pp;
if (!is_edp(intel_dp))
return;
- if (ironlake_edp_have_panel_power(intel_dp))
+
+ DRM_DEBUG_KMS("Turn eDP power on\n");
+
+ if (ironlake_edp_have_panel_power(intel_dp)) {
+ DRM_DEBUG_KMS("eDP power already on\n");
return;
+ }
- ironlake_wait_panel_off(intel_dp);
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ ironlake_wait_panel_power_cycle(intel_dp);
+ pp = ironlake_get_pp_control(dev_priv);
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
@@ -1025,13 +1112,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
pp |= POWER_TARGET_ON;
+ if (!IS_GEN5(dev))
+ pp |= PANEL_POWER_RESET;
+
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
- 5000))
- DRM_ERROR("panel on wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ ironlake_wait_panel_on(intel_dp);
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1040,46 +1127,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
}
}
-static void ironlake_edp_panel_off(struct drm_encoder *encoder)
+static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
- PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
+ u32 pp;
if (!is_edp(intel_dp))
return;
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
- if (IS_GEN5(dev)) {
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- }
+ DRM_DEBUG_KMS("Turn eDP power off\n");
- intel_dp->panel_off_jiffies = jiffies;
+ WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
- if (IS_GEN5(dev)) {
- pp &= ~POWER_TARGET_ON;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- pp &= ~POWER_TARGET_ON;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- msleep(intel_dp->panel_power_cycle_delay);
-
- if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
- DRM_ERROR("panel off wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ pp = ironlake_get_pp_control(dev_priv);
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
- }
+ ironlake_wait_panel_off(intel_dp);
}
static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1099,9 +1165,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
* allowing it to appear.
*/
msleep(intel_dp->backlight_on_delay);
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -1117,9 +1181,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("\n");
- pp = I915_READ(PCH_PP_CONTROL);
- pp &= ~PANEL_UNLOCK_MASK;
- pp |= PANEL_UNLOCK_REGS;
+ pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -1187,17 +1249,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ ironlake_edp_backlight_off(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+
/* Wake up the sink first */
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
/* Make sure the panel is off before trying to
* change the mode
*/
- ironlake_edp_backlight_off(intel_dp);
- intel_dp_link_down(intel_dp);
- ironlake_edp_panel_off(encoder);
}
static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1211,7 +1274,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
intel_dp_start_link_train(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
-
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
@@ -1230,16 +1292,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
+ ironlake_edp_backlight_off(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+
ironlake_edp_panel_vdd_on(intel_dp);
- if (is_edp(intel_dp))
- ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
- ironlake_edp_panel_off(encoder);
- if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
- ironlake_edp_pll_off(encoder);
ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_off(encoder);
} else {
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_on(encoder);
+
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
@@ -1247,7 +1313,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
- ironlake_edp_backlight_on(intel_dp);
} else
ironlake_edp_panel_vdd_off(intel_dp, false);
ironlake_edp_backlight_on(intel_dp);
@@ -1285,11 +1350,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp)
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
return intel_dp_aux_native_read_retry(intel_dp,
DP_LANE0_1_STATUS,
- intel_dp->link_status,
+ link_status,
DP_LINK_STATUS_SIZE);
}
@@ -1301,27 +1366,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
}
static uint8_t
-intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+intel_get_adjust_request_voltage(uint8_t adjust_request[2],
int lane)
{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
- uint8_t l = intel_dp_link_status(link_status, i);
+ uint8_t l = adjust_request[lane>>1];
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
int lane)
{
- int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
- uint8_t l = intel_dp_link_status(link_status, i);
+ uint8_t l = adjust_request[lane>>1];
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
@@ -1343,34 +1406,63 @@ static char *link_train_names[] = {
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
-#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
}
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp)
+intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
+ uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
+ uint8_t voltage_max;
+ uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
- uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+ uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
if (this_v > v)
v = this_v;
@@ -1378,18 +1470,20 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
p = this_p;
}
- if (v >= I830_DP_VOLTAGE_MAX)
- v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+ voltage_max = intel_dp_voltage_max(intel_dp);
+ if (v >= voltage_max)
+ v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
- if (p >= intel_dp_pre_emphasis_max(v))
- p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
static uint32_t
-intel_dp_signal_levels(uint8_t train_set, int lane_count)
+intel_dp_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -1454,13 +1548,43 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
}
}
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
- int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
- uint8_t l = intel_dp_link_status(link_status, i);
+ uint8_t l = link_status[lane>>1];
return (l >> s) & 0xf;
}
@@ -1485,18 +1609,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp)
+intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
- lane_align = intel_dp_link_status(intel_dp->link_status,
+ lane_align = intel_dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- lane_status = intel_get_lane_status(intel_dp->link_status, lane);
+ lane_status = intel_get_lane_status(link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
@@ -1521,8 +1645,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
- intel_dp->train_set, 4);
- if (ret != 4)
+ intel_dp->train_set,
+ intel_dp->lane_count);
+ if (ret != intel_dp->lane_count)
return false;
return true;
@@ -1538,7 +1663,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
int i;
uint8_t voltage;
bool clock_recovery = false;
- int tries;
+ int voltage_tries, loop_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
@@ -1559,26 +1684,35 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
- tries = 0;
+ voltage_tries = 0;
+ loop_tries = 0;
clock_recovery = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_GEN6(dev) && is_edp(intel_dp)) {
+
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
+ DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1590,10 +1724,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_dp))
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("failed to get link status\n");
break;
+ }
- if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
}
@@ -1602,20 +1739,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == intel_dp->lane_count)
- break;
+ if (i == intel_dp->lane_count) {
+ ++loop_tries;
+ if (loop_tries == 5) {
+ DRM_DEBUG_KMS("too many full retries, give up\n");
+ break;
+ }
+ memset(intel_dp->train_set, 0, 4);
+ voltage_tries = 0;
+ continue;
+ }
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++tries;
- if (tries == 5)
+ ++voltage_tries;
+ if (voltage_tries == 5) {
+ DRM_DEBUG_KMS("too many voltage retries, give up\n");
break;
+ }
} else
- tries = 0;
+ voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
+ intel_get_adjust_train(intel_dp, link_status);
}
intel_dp->DP = DP;
@@ -1638,6 +1785,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
@@ -1645,15 +1793,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN6(dev) && is_edp(intel_dp)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1665,17 +1816,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_dp))
+ if (!intel_dp_get_link_status(intel_dp, link_status))
break;
/* Make sure clock is still ok */
- if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
- if (intel_channel_eq_ok(intel_dp)) {
+ if (intel_channel_eq_ok(intel_dp, link_status)) {
channel_eq = true;
break;
}
@@ -1690,11 +1841,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
}
/* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
+ intel_get_adjust_train(intel_dp, link_status);
++tries;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1724,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@@ -1735,8 +1886,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
- if (is_edp(intel_dp))
- DP |= DP_LINK_TRAIN_OFF;
+ if (is_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
+ DP |= DP_LINK_TRAIN_OFF_CPT;
+ else
+ DP |= DP_LINK_TRAIN_OFF;
+ }
if (!HAS_PCH_CPT(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1822,6 +1977,7 @@ static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
u8 sink_irq_vector;
+ u8 link_status[DP_LINK_STATUS_SIZE];
if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1830,7 +1986,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
/* Try to read receiver status if the link appears to be up */
- if (!intel_dp_get_link_status(intel_dp)) {
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
intel_dp_link_down(intel_dp);
return;
}
@@ -1855,7 +2011,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- if (!intel_channel_eq_ok(intel_dp)) {
+ if (!intel_channel_eq_ok(intel_dp, link_status)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
drm_get_encoder_name(&intel_dp->base.base));
intel_dp_start_link_train(intel_dp);
@@ -2179,7 +2335,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
continue;
intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
+ if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_dp->base.type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
@@ -2321,7 +2478,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
-
+
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
@@ -2354,11 +2511,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
- intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
-
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
+
if (ret) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604b73d..a1b4343814e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,7 @@
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520d..e44191132ac 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 499d4c0dbee..04d79fd1dc9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
- if (IS_PINEVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
- } else {
+ else
max >>= 16;
- if (INTEL_INFO(dev)->gen < 4)
- max &= ~1;
- }
if (is_backlight_combination_mode(dev))
max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (IS_PINEVIEW(dev))
+ if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
- val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (IS_PINEVIEW(dev)) {
- tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
- } else
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
@@ -326,7 +320,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
static int intel_panel_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
- return intel_panel_get_backlight(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->backlight_level;
}
static const struct backlight_ops intel_panel_bl_ops = {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aef..f7b9268df26 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
return status;
}
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- ret = connector_status_disconnected;
- else
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
- if (connector_is_digital == monitor_is_digital) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ddbabefb427..b12fd2c8081 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct nouveau_bo *bo;
+ int ret;
+
+ args->pitch = roundup(args->width * (args->bpp / 8), 256);
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(bo->gem);
+ return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *poffset)
+{
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gem) {
+ struct nouveau_bo *bo = gem->driver_private;
+ *poffset = bo->bo.addr_space_offset;
+ drm_gem_object_unreference_unlocked(gem);
+ return 0;
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9f7bb129526..9791d13c9e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
+ .dumb_create = nouveau_display_dumb_create,
+ .dumb_map_offset = nouveau_display_dumb_map_offset,
+ .dumb_destroy = nouveau_display_dumb_destroy,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 29837da1098..4c0be3a4ed8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t handle);
/* nv10_gpio.c */
int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540ae..960c0ae0c0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, base, size);
+ ret = drm_mm_init(&chan->ramin_heap, base, size - base);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe4..c8a463b76c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
+ nvbe->unmap_pages = false;
}
+
+ nvbe->pages = NULL;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d6..06de250fe61 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index a74e501afd2..ecfafd70cf0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
+ nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
/*
* TP ROP UNKVAL(magic_not_rop_nr)
* 450: 4/0/0/0 2 3
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d7..cb006a718e7 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
continue;
if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->or) {
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
return;
break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87631fede1f..2b97262e3ab 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0: /* 1KB rows */
+ default:
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+ break;
+ case 1: /* 2KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+ break;
+ case 2: /* 4KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+ break;
+ }
+
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
+ } else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d603a3335d..92c9628c572 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
@@ -3271,6 +3276,18 @@ int evergreen_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
}
+
+ /* Don't start up if the MC ucode is missing on BTC parts.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7fdfa8ea757..cd4590aae15 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
u32 npipes;
+ u32 row_size;
/* value we track */
u32 nsamples;
u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
struct radeon_bo *db_s_write_bo;
};
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+ if (tiling_flags & RADEON_TILING_MACRO)
+ return ARRAY_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ return ARRAY_1D_TILED_THIN1;
+ else
+ return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+ switch (nbanks) {
+ case 2:
+ return ADDR_SURF_2_BANK;
+ case 4:
+ return ADDR_SURF_4_BANK;
+ case 8:
+ default:
+ return ADDR_SURF_8_BANK;
+ case 16:
+ return ADDR_SURF_16_BANK;
+ }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+ switch (row_size) {
+ case 1:
+ default:
+ return ADDR_SURF_TILE_SPLIT_1KB;
+ case 2:
+ return ADDR_SURF_TILE_SPLIT_2KB;
+ case 4:
+ return ADDR_SURF_TILE_SPLIT_4KB;
+ }
+}
+
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
@@ -480,21 +519,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
break;
case DB_Z_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
track->db_z_info = radeon_get_ib_value(p, idx);
- ib[idx] &= ~Z_ARRAY_MODE(0xf);
- track->db_z_info &= ~Z_ARRAY_MODE(0xf);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] &= ~Z_ARRAY_MODE(0xf);
+ track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
}
break;
case DB_STENCIL_INFO:
@@ -607,40 +647,34 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_INFO:
case CB_COLOR6_INFO:
case CB_COLOR7_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR8_INFO:
case CB_COLOR9_INFO:
case CB_COLOR10_INFO:
case CB_COLOR11_INFO:
- r = evergreen_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR0_PITCH:
@@ -695,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
break;
case CB_COLOR0_DIM:
case CB_COLOR1_DIM:
@@ -1311,10 +1355,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ ib[idx+1+(i*8)+1] |=
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx+1+(i*8)+6] |=
+ TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ ib[idx+1+(i*8)+7] |=
+ TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ }
+ }
texture = reloc->robj;
/* tex mip base */
r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1414,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
struct evergreen_cs_track *track;
+ u32 tmp;
int r;
if (p->track == NULL) {
@@ -1422,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- track->npipes = p->rdev->config.evergreen.tiling_npipes;
- track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
- track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ tmp = p->rdev->config.cayman.tile_config;
+ else
+ tmp = p->rdev->config.evergreen.tile_config;
+
+ switch (tmp & 0xf) {
+ case 0:
+ track->npipes = 1;
+ break;
+ case 1:
+ default:
+ track->npipes = 2;
+ break;
+ case 2:
+ track->npipes = 4;
+ break;
+ case 3:
+ track->npipes = 8;
+ break;
+ }
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0:
+ track->nbanks = 4;
+ break;
+ case 1:
+ default:
+ track->nbanks = 8;
+ break;
+ case 2:
+ track->nbanks = 16;
+ break;
+ }
+
+ switch ((tmp & 0xf00) >> 8) {
+ case 0:
+ track->group_size = 256;
+ break;
+ case 1:
+ default:
+ track->group_size = 512;
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0:
+ track->row_size = 1;
+ break;
+ case 1:
+ default:
+ track->row_size = 2;
+ break;
+ case 2:
+ track->row_size = 4;
+ break;
+ }
+
p->track = track;
}
do {
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c345..7d7f2155e34 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -42,6 +42,17 @@
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define EVERGREEN_ADDR_SURF_2_BANK 0
+# define EVERGREEN_ADDR_SURF_4_BANK 1
+# define EVERGREEN_ADDR_SURF_8_BANK 2
+# define EVERGREEN_ADDR_SURF_16_BANK 3
+# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +72,24 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d..e00039e59a7 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -899,6 +899,10 @@
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
+# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
+# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
+# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
+# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +955,29 @@
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_ATTRIB 0x28c74
+# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1164,11 @@
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
+# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea4990..bfc08f6320f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ int i;
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */
- while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 400b26df652..c93bc64707e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_TXO_MACRO_TILE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_TXO_MICRO_TILE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
-
- tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ if (p->keep_tiling_flags) {
+ ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+ ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+ } else {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_TXO_MICRO_TILE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
+ if (!p->keep_tiling_flags) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_COLOR_TILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_COLOR_MICROTILE_ENABLE;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_COLOR_TILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
-
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= R300_DEPTHMACROTILE_ENABLE;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_DEPTHMICROTILE_TILED;
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
- tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+ if (!p->keep_tiling_flags) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
- tmp = idx_value & ~(0x7 << 16);
- tmp |= tile_flags;
- ib[idx] = tmp;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_DEPTHMICROTILE_TILED;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+ tmp = idx_value & ~(0x7 << 16);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
+ }
track->zb.pitch = idx_value & 0x3FFC;
track->zb_dirty = true;
break;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0a2e023c155..cb1acffd243 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
- if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!p->keep_tiling_flags &&
+ r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!p->keep_tiling_flags &&
+ r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
mip_offset <<= 8;
word0 = radeon_get_ib_value(p, idx + 0);
- if (tiling_flags & RADEON_TILING_MACRO)
- word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
- word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ }
word1 = radeon_get_ib_value(p, idx + 1);
w0 = G_038000_TEX_WIDTH(word0) + 1;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ if (!p->keep_tiling_flags) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+ }
texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fc5a1d642cb..8227e76b5c7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -611,7 +611,8 @@ struct radeon_cs_parser {
struct radeon_ib *ib;
void *track;
unsigned family;
- int parser_error;
+ int parser_error;
+ bool keep_tiling_flags;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7..3516a6081dc 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
acpi_handle handle;
int ret;
- /* No need to proceed if we're sure that ATIF is not supported */
- if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
- return 0;
-
/* Get the device handle */
handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ return 0;
+
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d2d179267af..d24baf30efc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,6 +62,87 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
+static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
+ ATOM_GPIO_I2C_ASSIGMENT *gpio,
+ u8 index)
+{
+ /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+ if ((rdev->family == CHIP_R420) ||
+ (rdev->family == CHIP_R423) ||
+ (rdev->family == CHIP_RV410)) {
+ if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+ gpio->ucClkMaskShift = 0x19;
+ gpio->ucDataMaskShift = 0x18;
+ }
+ }
+
+ /* some evergreen boards have bad data for this entry */
+ if (ASIC_IS_DCE4(rdev)) {
+ if ((index == 7) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+ (gpio->sucI2cId.ucAccess == 0)) {
+ gpio->sucI2cId.ucAccess = 0x97;
+ gpio->ucDataMaskShift = 8;
+ gpio->ucDataEnShift = 8;
+ gpio->ucDataY_Shift = 8;
+ gpio->ucDataA_Shift = 8;
+ }
+ }
+
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((index == 4) &&
+ (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+}
+
+static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+ struct radeon_i2c_bus_rec i2c;
+
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ if (i2c.mask_clk_reg)
+ i2c.valid = true;
+ else
+ i2c.valid = false;
+
+ return i2c;
+}
+
static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
{
@@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((i == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
-
- /* some DCE3 boards have bad data for this entry */
- if (ASIC_IS_DCE3(rdev)) {
- if ((i == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
- (gpio->sucI2cId.ucAccess == 0x94))
- gpio->sucI2cId.ucAccess = 0x14;
- }
+ radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
if (gpio->sucI2cId.ucAccess == id) {
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
-
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
-
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
- if (i2c.mask_clk_reg)
- i2c.valid = true;
+ i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
}
@@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
int i, num_indices;
char stmp[32];
- memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
-
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
@@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
- i2c.valid = false;
-
- /* some evergreen boards have bad data for this entry */
- if (ASIC_IS_DCE4(rdev)) {
- if ((i == 7) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
- (gpio->sucI2cId.ucAccess == 0)) {
- gpio->sucI2cId.ucAccess = 0x97;
- gpio->ucDataMaskShift = 8;
- gpio->ucDataEnShift = 8;
- gpio->ucDataY_Shift = 8;
- gpio->ucDataA_Shift = 8;
- }
- }
-
- /* some DCE3 boards have bad data for this entry */
- if (ASIC_IS_DCE3(rdev)) {
- if ((i == 4) &&
- (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
- (gpio->sucI2cId.ucAccess == 0x94))
- gpio->sucI2cId.ucAccess = 0x14;
- }
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
+ radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
+ i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
- if (i2c.mask_clk_reg) {
- i2c.valid = true;
+ if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
@@ -1996,14 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+ if (!rdev->pm.power_state[state_index].clock_info)
+ return state_index;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
case 1:
- rdev->pm.power_state[state_index].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
- if (!rdev->pm.power_state[state_index].clock_info)
- return state_index;
- rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2039,11 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
state_index++;
break;
case 2:
- rdev->pm.power_state[state_index].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
- if (!rdev->pm.power_state[state_index].clock_info)
- return state_index;
- rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2080,11 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
state_index++;
break;
case 3:
- rdev->pm.power_state[state_index].clock_info =
- kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
- if (!rdev->pm.power_state[state_index].clock_info)
- return state_index;
- rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ccaa243c144..29afd71e084 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i;
+ unsigned size, i, flags = 0;
if (!cs->num_chunks) {
return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
+ !p->chunks[i].length_dw) {
+ return -EINVAL;
+ }
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].user_ptr, size)) {
return -EFAULT;
}
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ flags = p->chunks[i].kdata[0];
+ }
} else {
p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
+
+ p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a0b35e90948..71499fc3daf 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -53,9 +53,10 @@
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
+ * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 11
+#define KMS_DRIVER_MINOR 12
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a92..4b27efa4405 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return true;
+ return radeon_encoder->encoder_id;
default:
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
}
-
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f6..b1053d64042 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab8..23ae1c60ab3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 617b64678fc..0bb0f5f713e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -574,10 +574,16 @@ retry:
return ret;
spin_lock(&glob->lru_lock);
+
+ if (unlikely(list_empty(&bo->ddestroy))) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+
ret = ttm_bo_reserve_locked(bo, interruptible,
no_wait_reserve, false, 0);
- if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
+ if (unlikely(ret != 0)) {
spin_unlock(&glob->lru_lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cca91a93bd..dc279706ca7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 03bbc2a6f9a..a0c2f12b1e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
- hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ hwversion = ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
+
if (hwversion == 0)
return false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1..66917c6c381 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
- param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+ const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+ param->value =
+ ioread32(fifo_mem +
+ ((fifo->capabilities &
+ SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+ SVGA_FIFO_3D_HWVERSION_REVISED :
+ SVGA_FIFO_3D_HWVERSION));
break;
}
default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out_no_fb;
}
-
vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
- if (!vfb->dmabuf) {
- DRM_ERROR("Framebuffer not dmabuf backed.\n");
- ret = -EINVAL;
- goto out_no_fb;
- }
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 880e285d757..f94b33ae221 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,6 +31,44 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+struct vmw_clip_rect {
+ int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+ int num_rects,
+ struct vmw_clip_rect clip,
+ SVGASignedRect *out_rects,
+ int *out_num)
+{
+ int i, k;
+
+ for (i = 0, k = 0; i < num_rects; i++) {
+ int x1 = max_t(int, clip.x1, rects[i].x1);
+ int y1 = max_t(int, clip.y1, rects[i].y1);
+ int x2 = min_t(int, clip.x2, rects[i].x2);
+ int y2 = min_t(int, clip.y2, rects[i].y2);
+
+ if (x1 >= x2)
+ continue;
+ if (y1 >= y2)
+ continue;
+
+ out_rects[k].left = x1;
+ out_rects[k].top = y1;
+ out_rects[k].right = x2;
+ out_rects[k].bottom = y2;
+ k++;
+ }
+
+ *out_num = k;
+}
+
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+ int ret;
+
+ kmap_offset = 0;
+ kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+ hotspotX, hotspotY);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
+ return ret;
+}
+
+
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
return -EINVAL;
if (handle) {
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- handle, &surface);
- if (!ret) {
- if (!surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- vmw_surface_unreference(&surface);
- return -EINVAL;
- }
- } else {
- ret = vmw_user_dmabuf_lookup(tfile,
- handle, &dmabuf);
- if (ret) {
- DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
- return -EINVAL;
- }
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ handle, &surface, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
}
}
+ /* need to do this before taking down old image */
+ if (surface && !surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ vmw_surface_unreference(&surface);
+ return -EINVAL;
+ }
+
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_num;
- void *virtual;
- bool dummy;
-
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
- kmap_offset = 0;
- kmap_num = (64*64*4) >> PAGE_SHIFT;
-
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
- if (unlikely(ret != 0)) {
- DRM_ERROR("reserve failed\n");
- return -EINVAL;
- }
-
- ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0))
- goto err_unreserve;
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- vmw_cursor_update_image(dev_priv, virtual, 64, 64,
- du->hotspot_x, du->hotspot_y);
-
- ttm_bo_kunmap(&map);
-err_unreserve:
- ttm_bo_unreserve(&dmabuf->base);
-
+ ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+ du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_clip_rect *clips,
unsigned num_clips, int inc)
{
- struct drm_clip_rect *clips_ptr;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *clips_ptr;
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
} *cmd;
SVGASignedRect *blits;
-
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
}
+ /* setup blits pointer */
+ blits = (SVGASignedRect *)&cmd[1];
+
+ /* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
cmd->body.srcRect.bottom = bottom;
clips_ptr = clips;
- blits = (SVGASignedRect *)&cmd[1];
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- blits[i].left = clips_ptr->x1 - left;
- blits[i].right = clips_ptr->x2 - left;
- blits[i].top = clips_ptr->y1 - top;
- blits[i].bottom = clips_ptr->y2 - top;
+ tmp[i].x1 = clips_ptr->x1 - left;
+ tmp[i].x2 = clips_ptr->x2 - left;
+ tmp[i].y1 = clips_ptr->y1 - top;
+ tmp[i].y2 = clips_ptr->y2 - top;
}
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
- int clip_x1 = left - unit->crtc.x;
- int clip_y1 = top - unit->crtc.y;
- int clip_x2 = right - unit->crtc.x;
- int clip_y2 = bottom - unit->crtc.y;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left - unit->crtc.x;
+ clip.y1 = top - unit->crtc.y;
+ clip.x2 = right - unit->crtc.x;
+ clip.y2 = bottom - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
break;
}
+
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* Sanity checks.
*/
+ /* Surface must be marked as a scanout. */
+ if (unlikely(!surface->scanout))
+ return -EINVAL;
+
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+ int move_x, move_y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
clip_x2 <= 0 || clip_y2 <= 0)
continue;
+ /* clip size to crtc size */
+ clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+ clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+ /* translate both src and dest to bring clip into screen */
+ move_x = min_t(int, clip_x1, 0);
+ move_y = min_t(int, clip_y1, 0);
+
+ /* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
- blits[hit_num].body.destRect.left = clip_x1;
- blits[hit_num].body.destRect.top = clip_y1;
+ blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+ blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+ blits[hit_num].body.destRect.left = clip_x1 - move_x;
+ blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
@@ -1003,7 +1093,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
- u64 required_size;
int ret;
/**
@@ -1012,8 +1101,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* requested framebuffer.
*/
- required_size = mode_cmd->pitch * mode_cmd->height;
- if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+ if (!vmw_kms_validate_mode_vram(dev_priv,
+ mode_cmd->pitch,
+ mode_cmd->height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
- /**
- * End conditioned code.
- */
-
- ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
- mode_cmd->handle, &surface);
+ /* returns either a dmabuf or surface */
+ ret = vmw_user_lookup_handle(dev_priv, tfile,
+ mode_cmd->handle,
+ &surface, &bo);
if (ret)
- goto try_dmabuf;
-
- if (!surface->scanout)
- goto err_not_scanout;
-
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
- &vfb, mode_cmd);
-
- /* vmw_user_surface_lookup takes one ref so does new_fb */
- vmw_surface_unreference(&surface);
-
- if (ret) {
- DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- ttm_base_object_unref(&user_obj);
- return ERR_PTR(ret);
- } else
- vfb->user_obj = user_obj;
- return &vfb->base;
-
-try_dmabuf:
- DRM_INFO("%s: trying buffer\n", __func__);
-
- ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
- if (ret) {
- DRM_ERROR("failed to find buffer: %i\n", ret);
- return ERR_PTR(-ENOENT);
- }
-
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- mode_cmd);
+ goto err_out;
+
+ /* Create the new framebuffer depending one what we got back */
+ if (bo)
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ mode_cmd);
+ else if (surface)
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+ surface, &vfb, mode_cmd);
+ else
+ BUG();
- /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
- vmw_dmabuf_unreference(&bo);
+err_out:
+ /* vmw_user_lookup_handle takes one ref so does new_fb */
+ if (bo)
+ vmw_dmabuf_unreference(&bo);
+ if (surface)
+ vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@ try_dmabuf:
vfb->user_obj = user_obj;
return &vfb->base;
-
-err_not_scanout:
- DRM_ERROR("surface not marked as scanout\n");
- /* vmw_user_surface_lookup takes one ref */
- vmw_surface_unreference(&surface);
- ttm_base_object_unref(&user_obj);
-
- return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
+ int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
+ tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+ if (unlikely(tmp == NULL)) {
+ DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free_tmp;
+ }
+
+ left = clips->x;
+ right = clips->x + clips->w;
+ top = clips->y;
+ bottom = clips->y + clips->h;
+
+ for (i = 1; i < num_clips; i++) {
+ left = min_t(int, left, (int)clips[i].x);
+ right = max_t(int, right, (int)clips[i].x + clips[i].w);
+ top = min_t(int, top, (int)clips[i].y);
+ bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = 0;
- cmd->body.srcRect.right = surface->sizes[0].width;
- cmd->body.srcRect.top = 0;
- cmd->body.srcRect.bottom = surface->sizes[0].height;
blits = (SVGASignedRect *)&cmd[1];
+
+ cmd->body.srcRect.left = left;
+ cmd->body.srcRect.right = right;
+ cmd->body.srcRect.top = top;
+ cmd->body.srcRect.bottom = bottom;
+
for (i = 0; i < num_clips; i++) {
- blits[i].left = clips[i].x;
- blits[i].right = clips[i].x + clips[i].w;
- blits[i].top = clips[i].y;
- blits[i].bottom = clips[i].y + clips[i].h;
+ tmp[i].x1 = clips[i].x - left;
+ tmp[i].x2 = clips[i].x + clips[i].w - left;
+ tmp[i].y1 = clips[i].y - top;
+ tmp[i].y2 = clips[i].y + clips[i].h - top;
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
- int clip_x1 = destX - unit->crtc.x;
- int clip_y1 = destY - unit->crtc.y;
- int clip_x2 = clip_x1 + surface->sizes[0].width;
- int clip_y2 = clip_y1 + surface->sizes[0].height;
+ struct vmw_clip_rect clip;
+ int num;
+
+ clip.x1 = left + destX - unit->crtc.x;
+ clip.y1 = top + destY - unit->crtc.y;
+ clip.x2 = right + destX - unit->crtc.x;
+ clip.y2 = bottom + destY - unit->crtc.y;
/* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
+ if (clip.x1 >= unit->crtc.mode.hdisplay ||
+ clip.y1 >= unit->crtc.mode.vdisplay ||
+ clip.x2 <= 0 || clip.y2 <= 0)
continue;
+ /*
+ * In order for the clip rects to be correctly scaled
+ * the src and dest rects needs to be the same size.
+ */
+ cmd->body.destRect.left = clip.x1;
+ cmd->body.destRect.right = clip.x2;
+ cmd->body.destRect.top = clip.y1;
+ cmd->body.destRect.bottom = clip.y2;
+
+ /* create a clip rect of the crtc in dest coords */
+ clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+ clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+ clip.x1 = 0 - clip.x1;
+ clip.y1 = 0 - clip.y1;
+
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
-
cmd->body.destScreenId = unit->unit;
- /*
- * The blit command is a lot more resilient then the
- * readback command when it comes to clip rects. So its
- * okay to go out of bounds.
- */
+ /* clip and write blits to cmd stream */
+ vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
- cmd->body.destRect.left = clip_x1;
- cmd->body.destRect.right = clip_x2;
- cmd->body.destRect.top = clip_y1;
- cmd->body.destRect.bottom = clip_y2;
+ /* if no cliprects hit skip this */
+ if (num == 0)
+ continue;
+ /* recalculate package length */
+ fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
}
kfree(cmd);
+out_free_tmp:
+ kfree(tmp);
return ret;
}
@@ -1809,7 +1913,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
- rects = kzalloc(rects_size, GFP_KERNEL);
+ rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+ GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
@@ -1824,10 +1929,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
for (i = 0; i < arg->num_outputs; ++i) {
- if (rects->x < 0 ||
- rects->y < 0 ||
- rects->x + rects->w > mode_config->max_width ||
- rects->y + rects->h > mode_config->max_height) {
+ if (rects[i].x < 0 ||
+ rects[i].y < 0 ||
+ rects[i].x + rects[i].w > mode_config->max_width ||
+ rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index af8e6e5bd96..e1cb8556355 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -62,9 +62,14 @@ struct vmw_framebuffer {
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
+
/**
* Base class display unit.
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 90c5e392849..8f8dbd43c33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
+ struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
- int i = 0;
+ int i = 0, ret;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
lds->last_num_active = lds->num_active;
+
+ /* Find the first du with a cursor. */
+ list_for_each_entry(entry, &lds->active, active) {
+ du = &entry->base;
+
+ if (!du->cursor_dmabuf)
+ continue;
+
+ ret = vmw_cursor_update_dmabuf(dev_priv,
+ du->cursor_dmabuf,
+ 64, 64,
+ du->hotspot_x,
+ du->hotspot_y);
+ if (ret == 0)
+ break;
+
+ DRM_ERROR("Could not update cursor image\n");
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4cceb3..1c7f09e2681 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
write_unlock(lock);
}
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
+{
+ int ret;
+
+ BUG_ON(*out_surf || *out_buf);
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+ if (!ret)
+ return 0;
+
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+ return ret;
+}
+
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c72f1c0b5e6..111d956d8e7 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -465,31 +465,29 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
while (new_bus) {
new_bridge = new_bus->self;
- if (new_bridge) {
- /* go through list of devices already registered */
- list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
- bus = same_bridge_vgadev->pdev->bus;
- bridge = bus->self;
-
- /* see if the share a bridge with this device */
- if (new_bridge == bridge) {
- /* if their direct parent bridge is the same
- as any bridge of this device then it can't be used
- for that device */
- same_bridge_vgadev->bridge_has_one_vga = false;
- }
+ /* go through list of devices already registered */
+ list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
+ bus = same_bridge_vgadev->pdev->bus;
+ bridge = bus->self;
+
+ /* see if the share a bridge with this device */
+ if (new_bridge == bridge) {
+ /* if their direct parent bridge is the same
+ as any bridge of this device then it can't be used
+ for that device */
+ same_bridge_vgadev->bridge_has_one_vga = false;
+ }
- /* now iterate the previous devices bridge hierarchy */
- /* if the new devices parent bridge is in the other devices
- hierarchy then we can't use it to control this device */
- while (bus) {
- bridge = bus->self;
- if (bridge) {
- if (bridge == vgadev->pdev->bus->self)
- vgadev->bridge_has_one_vga = false;
- }
- bus = bus->parent;
+ /* now iterate the previous devices bridge hierarchy */
+ /* if the new devices parent bridge is in the other devices
+ hierarchy then we can't use it to control this device */
+ while (bus) {
+ bridge = bus->self;
+ if (bridge) {
+ if (bridge == vgadev->pdev->bus->self)
+ vgadev->bridge_has_one_vga = false;
}
+ bus = bus->parent;
}
}
new_bus = new_bus->parent;
@@ -993,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
uc = &priv->cards[i];
}
- if (!uc)
- return -EINVAL;
+ if (!uc) {
+ ret_val = -EINVAL;
+ goto done;
+ }
- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
- return -EINVAL;
+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
+ ret_val = -EINVAL;
+ goto done;
+ }
- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
- return -EINVAL;
+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
+ ret_val = -EINVAL;
+ goto done;
+ }
vga_put(pdev, io_state);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 848a56c0279..af353842f75 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 06ce996b8b6..4a441a6f996 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -266,7 +266,7 @@
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9ec854ae118..91be41f6080 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -315,7 +315,7 @@ config SENSORS_DS1621
config SENSORS_EXYNOS4_TMU
tristate "Temperature sensor on Samsung EXYNOS4"
- depends on EXYNOS4_DEV_TMU
+ depends on ARCH_EXYNOS4
help
If you say yes here you get support for TMU (Thermal Managment
Unit) on SAMSUNG EXYNOS4 series of SoC.
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e8537..5d760f3d21c 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
static struct spi_driver ad7314_driver = {
.driver = {
.name = "ad7314",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7314_probe,
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e18..04450f8bf5d 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f..f2359a0093b 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
.resume = exynos4_tmu_resume,
};
-static int __init exynos4_tmu_driver_init(void)
-{
- return platform_driver_register(&exynos4_tmu_driver);
-}
-module_init(exynos4_tmu_driver_init);
-
-static void __exit exynos4_tmu_driver_exit(void)
-{
- platform_driver_unregister(&exynos4_tmu_driver);
-}
-module_exit(exynos4_tmu_driver_exit);
+module_platform_driver(exynos4_tmu_driver);
MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743a..9ba38f318ff 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
},
};
-static int __init gpio_fan_init(void)
-{
- return platform_driver_register(&gpio_fan_driver);
-}
-
-static void __exit gpio_fan_exit(void)
-{
- platform_driver_unregister(&gpio_fan_driver);
-}
-
-module_init(gpio_fan_init);
-module_exit(gpio_fan_exit);
+module_platform_driver(gpio_fan_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d4340..5253d23361d 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct completion *completion = &hwmon->read_completion;
- unsigned long t;
+ long t;
unsigned long val;
int ret;
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
.remove = __devexit_p(jz4740_hwmon_remove),
.driver = {
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
},
};
-static int __init jz4740_hwmon_init(void)
-{
- return platform_driver_register(&jz4740_hwmon_driver);
-}
-module_init(jz4740_hwmon_init);
-
-static void __exit jz4740_hwmon_exit(void)
-{
- platform_driver_unregister(&jz4740_hwmon_driver);
-}
-module_exit(jz4740_hwmon_exit);
+module_platform_driver(jz4740_hwmon_driver);
MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dce..9b382ec2c3b 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
.id_table = ntc_thermistor_id,
};
-static int __init ntc_thermistor_init(void)
-{
- return platform_driver_register(&ntc_thermistor_driver);
-}
-
-module_init(ntc_thermistor_init);
-
-static void __exit ntc_thermistor_cleanup(void)
-{
- platform_driver_unregister(&ntc_thermistor_driver);
-}
-
-module_exit(ntc_thermistor_cleanup);
+module_platform_driver(ntc_thermistor_driver);
MODULE_DESCRIPTION("NTC Thermistor Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752..f6c26d19f52 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
.remove = __devexit_p(s3c_hwmon_remove),
};
-static int __init s3c_hwmon_init(void)
-{
- return platform_driver_register(&s3c_hwmon_driver);
-}
-
-static void __exit s3c_hwmon_exit(void)
-{
- platform_driver_unregister(&s3c_hwmon_driver);
-}
-
-module_init(s3c_hwmon_init);
-module_exit(s3c_hwmon_exit);
+module_platform_driver(s3c_hwmon_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c2..79b6dabe316 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
.remove = sch5627_remove,
};
-static int __init sch5627_init(void)
-{
- return platform_driver_register(&sch5627_driver);
-}
-
-static void __exit sch5627_exit(void)
-{
- platform_driver_unregister(&sch5627_driver);
-}
+module_platform_driver(sch5627_driver);
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5627_init);
-module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79f..9d5236fb09b 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
.remove = sch5636_remove,
};
-static int __init sch5636_init(void)
-{
- return platform_driver_register(&sch5636_driver);
-}
-
-static void __exit sch5636_exit(void)
-{
- platform_driver_unregister(&sch5636_driver);
-}
+module_platform_driver(sch5636_driver);
MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5636_init);
-module_exit(sch5636_exit);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b16..0018c7dd009 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
},
};
-static int __init twl4030_madc_hwmon_init(void)
-{
- return platform_driver_register(&twl4030_madc_hwmon_driver);
-}
-
-module_init(twl4030_madc_hwmon_init);
-
-static void __exit twl4030_madc_hwmon_exit(void)
-{
- platform_driver_unregister(&twl4030_madc_hwmon_driver);
-}
-
-module_exit(twl4030_madc_hwmon_exit);
+module_platform_driver(twl4030_madc_hwmon_driver);
MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dc..b9a87e89bab 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
.remove = __devexit_p(env_remove),
};
-static int __init env_init(void)
-{
- return platform_driver_register(&env_driver);
-}
-
-static void __exit env_exit(void)
-{
- platform_driver_unregister(&env_driver);
-}
-
-module_init(env_init);
-module_exit(env_exit);
+module_platform_driver(env_driver);
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a47..9b598ed2602 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
},
};
-static int __init wm831x_hwmon_init(void)
-{
- return platform_driver_register(&wm831x_hwmon_driver);
-}
-module_init(wm831x_hwmon_init);
-
-static void __exit wm831x_hwmon_exit(void)
-{
- platform_driver_unregister(&wm831x_hwmon_driver);
-}
-module_exit(wm831x_hwmon_exit);
+module_platform_driver(wm831x_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca8..3ff67edbdc4 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
},
};
-static int __init wm8350_hwmon_init(void)
-{
- return platform_driver_register(&wm8350_hwmon_driver);
-}
-module_init(wm8350_hwmon_init);
-
-static void __exit wm8350_hwmon_exit(void)
-{
- platform_driver_unregister(&wm8350_hwmon_driver);
-}
-module_exit(wm8350_hwmon_exit);
+module_platform_driver(wm8350_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 85584a547c2..525c7345fa0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -488,7 +488,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
if (flags & I2C_M_TEN) {
/* a ten bit address */
- addr = 0xf0 | ((msg->addr >> 7) & 0x03);
+ addr = 0xf0 | ((msg->addr >> 7) & 0x06);
bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
/* try extended address code...*/
ret = try_address(i2c_adap, addr, retries);
@@ -498,7 +498,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
return -ENXIO;
}
/* the remaining 8 bit address */
- ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
+ ret = i2c_outb(i2c_adap, msg->addr & 0xff);
if ((ret != 1) && !nak_ok) {
/* the chip did not ack / xmission error occurred */
dev_err(&i2c_adap->dev, "died at 2nd address code\n");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8cebef49aea..18936ac9d51 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+ KBUILD_MODNAME, adap_info);
+ if (ret) {
+ pch_pci_err(pdev, "request_irq FAILED\n");
+ goto err_request_irq;
+ }
+
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
+ pch_i2c_init(&adap_info->pch_data[i]);
ret = i2c_add_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
- goto err_i2c_add_adapter;
+ goto err_add_adapter;
}
-
- pch_i2c_init(&adap_info->pch_data[i]);
- }
- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, adap_info);
- if (ret) {
- pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
-err_i2c_add_adapter:
+err_add_adapter:
for (j = 0; j < i; j++)
i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+ free_irq(pdev->irq, adap_info);
+err_request_irq:
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc..03b61577888 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a43d0023446..fa23faa20f0 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
- dev->fifo_size = 0;
+
+ dev->fifo_size = (dev->fifo_size / 2);
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
dev->b_hw = 0; /* Disable hardware fixes */
- } else {
- dev->fifo_size = (dev->fifo_size / 2);
+ else
dev->b_hw = 1; /* Enable hardware fixes */
- }
+
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2754cef86a0..4c171808168 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* first, try busy waiting briefly */
do {
+ cpu_relax();
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
#else
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
- return -EINVAL;
+ return 0;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 131079a3e29..1e5606185b4 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -539,8 +539,10 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.type = &i2c_client_type;
client->dev.of_node = info->of_node;
+ /* For 10-bit clients, add an arbitrary offset to avoid collisions */
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
- client->addr);
+ client->addr | ((client->flags & I2C_CLIENT_TEN)
+ ? 0xa000 : 0));
status = device_register(&client->dev);
if (status)
goto out_err;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index c90ce50b619..57a45ce84b2 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -579,7 +579,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
return 0;
}
-int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
+static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 67cbcfa3512..847553fd8b9 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
- * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz
*
* CYPRESS CY82C693 chipset IDE controller
*
@@ -90,7 +90,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
u8 time_16, time_8;
/* select primary or secondary channel */
- if (hwif->index > 0) { /* drive is on the secondary channel */
+ if (drive->dn > 1) { /* drive is on the secondary channel */
dev = pci_get_slot(dev->bus, dev->devfn+1);
if (!dev) {
printk(KERN_ERR "%s: tune_drive: "
@@ -141,7 +141,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
}
- if (hwif->index > 0)
+ if (drive->dn > 1)
pci_dev_put(dev);
}
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 4a697a238e2..8716066a2f2 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -521,8 +521,8 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
d.init_dma = icside_dma_init;
d.port_ops = &icside_v6_port_ops;
+ } else
d.dma_ops = NULL;
- }
ret = ide_host_register(host, &d, hws);
if (ret)
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index b59d04c7205..1892e81fb00 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -331,7 +331,7 @@ static const struct ide_port_ops ich_port_ops = {
.udma_mask = udma, \
}
-#define DECLARE_ICH_DEV(udma) \
+#define DECLARE_ICH_DEV(mwdma, udma) \
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_ich, \
@@ -340,7 +340,7 @@ static const struct ide_port_ops ich_port_ops = {
.port_ops = &ich_port_ops, \
.pio_mask = ATA_PIO4, \
.swdma_mask = ATA_SWDMA2_ONLY, \
- .mwdma_mask = ATA_MWDMA12_ONLY, \
+ .mwdma_mask = mwdma, \
.udma_mask = udma, \
}
@@ -362,13 +362,15 @@ static const struct ide_port_info piix_pci_info[] __devinitdata = {
/* 2: PIIX4 */
DECLARE_PIIX_DEV(ATA_UDMA2),
/* 3: ICH0 */
- DECLARE_ICH_DEV(ATA_UDMA2),
+ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2),
/* 4: ICH */
- DECLARE_ICH_DEV(ATA_UDMA4),
+ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4),
/* 5: PIIX4 */
DECLARE_PIIX_DEV(ATA_UDMA4),
- /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
- DECLARE_ICH_DEV(ATA_UDMA5),
+ /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
+ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5),
+ /* 7: ICH7/7-R, no MWDMA1 */
+ DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5),
};
/**
@@ -438,9 +440,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
#endif
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
{ 0, },
};
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index e53a1b78378..281c9142634 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -113,12 +113,26 @@ static const struct pci_device_id triflex_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, triflex_pci_tbl);
+#ifdef CONFIG_PM
+static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ /*
+ * We must not disable or powerdown the device.
+ * APM bios refuses to suspend if IDE is not accessible.
+ */
+ pci_save_state(dev);
+ return 0;
+}
+#else
+#define triflex_ide_pci_suspend NULL
+#endif
+
static struct pci_driver triflex_pci_driver = {
.name = "TRIFLEX_IDE",
.id_table = triflex_pci_tbl,
.probe = triflex_init_one,
.remove = ide_pci_remove,
- .suspend = ide_pci_suspend,
+ .suspend = triflex_ide_pci_suspend,
.resume = ide_pci_resume,
};
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index eb0e2ccc79a..73d45315940 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -343,7 +343,7 @@ static void ieee802154_fake_setup(struct net_device *dev)
{
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
dev->needed_tailroom = 2; /* FCS */
dev->mtu = 127;
dev->tx_queue_len = 10;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 691276bafd7..1612cfd50f3 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -178,6 +178,25 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
+static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr)
+{
+ struct neighbour *n;
+ int ret;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(dst);
+ if (!n || !(n->nud_state & NUD_VALID)) {
+ if (n)
+ neigh_event_send(n, NULL);
+ ret = -ENODATA;
+ } else {
+ ret = rdma_copy_addr(addr, dst->dev, n->ha);
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int addr4_resolve(struct sockaddr_in *src_in,
struct sockaddr_in *dst_in,
struct rdma_dev_addr *addr)
@@ -185,7 +204,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct rtable *rt;
- struct neighbour *neigh;
struct flowi4 fl4;
int ret;
@@ -214,18 +232,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
goto put;
}
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
- ret = -ENODATA;
- if (neigh)
- goto release;
- goto put;
- }
-
- ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
-release:
- neigh_release(neigh);
+ ret = dst_fetch_ha(&rt->dst, addr);
put:
ip_rt_put(rt);
out:
@@ -238,13 +245,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
struct rdma_dev_addr *addr)
{
struct flowi6 fl6;
- struct neighbour *neigh;
struct dst_entry *dst;
int ret;
memset(&fl6, 0, sizeof fl6);
- ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
- ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
+ fl6.daddr = dst_in->sin6_addr;
+ fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
dst = ip6_route_output(&init_net, NULL, &fl6);
@@ -258,7 +264,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
src_in->sin6_family = AF_INET6;
- ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
+ src_in->sin6_addr = fl6.saddr;
}
if (dst->dev->flags & IFF_LOOPBACK) {
@@ -274,15 +280,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
}
- neigh = dst_get_neighbour(dst);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- if (neigh)
- neigh_event_send(neigh, NULL);
- ret = -ENODATA;
- goto put;
- }
-
- ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+ ret = dst_fetch_ha(dst, addr);
put:
dst_release(dst);
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 75ff821c0af..236a88c1ca8 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2005,11 +2005,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
if (cma_zero_addr(src)) {
dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
if ((src->sa_family = dst->sa_family) == AF_INET) {
- ((struct sockaddr_in *) src)->sin_addr.s_addr =
- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+ ((struct sockaddr_in *)src)->sin_addr =
+ ((struct sockaddr_in *)dst)->sin_addr;
} else {
- ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
- &((struct sockaddr_in6 *) dst)->sin6_addr);
+ ((struct sockaddr_in6 *)src)->sin6_addr =
+ ((struct sockaddr_in6 *)dst)->sin6_addr;
}
}
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.private_data_len = sizeof(struct cma_hdr) +
conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!req.private_data)
return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv->id.ps);
req.private_data_len = offset + conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!private_data)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e60..740dcc065cf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1338,7 +1338,6 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *child_ep, *parent_ep = ctx;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int hwtid = GET_TID(req);
- struct neighbour *neigh;
struct dst_entry *dst;
struct l2t_entry *l2t;
struct rtable *rt;
@@ -1375,8 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- neigh = dst_get_neighbour(dst);
- l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+ l2t = t3_l2t_get(tdev, dst, NULL);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1887,7 +1885,6 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct neighbour *neigh;
struct iwch_ep *ep;
struct rtable *rt;
int err = 0;
@@ -1945,11 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail3;
}
ep->dst = &rt->dst;
-
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c55..0668bb3472d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
- if (mpa_rev_to_use == 1)
+ if (mpa_rev_to_use == 1) {
ep->tried_with_mpa_v1 = 1;
+ ep->retry_with_mpa_v1 = 0;
+ }
if (mpa_rev_to_use == 2) {
mpa->private_data_size +=
@@ -1554,6 +1556,67 @@ static void get_4tuple(struct cpl_pass_accept_req *req,
return;
}
+static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
+ struct c4iw_dev *cdev, bool clear_mpa_v1)
+{
+ struct neighbour *n;
+ int err, step;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(dst);
+ err = -ENODEV;
+ if (!n)
+ goto out;
+ err = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ struct net_device *pdev;
+
+ pdev = ip_dev_find(&init_net, peer_ip);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, pdev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, n->dev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(n->dev);
+ ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(n->dev) * step];
+
+ if (clear_mpa_v1) {
+ ep->retry_with_mpa_v1 = 0;
+ ep->tried_with_mpa_v1 = 0;
+ }
+ }
+ err = 0;
+out:
+ rcu_read_unlock();
+
+ return err;
+}
+
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep, *parent_ep;
@@ -1561,18 +1624,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
- struct neighbour *neigh;
struct dst_entry *dst;
- struct l2t_entry *l2t;
struct rtable *rt;
__be32 local_ip, peer_ip;
__be16 local_port, peer_port;
- struct net_device *pdev;
- u32 tx_chan, smac_idx;
- u16 rss_qid;
- u32 mtu;
- int step;
- int txq_idx, ctrlq_idx;
+ int err;
parent_ep = lookup_stid(t, stid);
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1594,47 +1650,24 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
dst = &rt->dst;
- neigh = dst_get_neighbour(dst);
- if (neigh->dev->flags & IFF_LOOPBACK) {
- pdev = ip_dev_find(&init_net, peer_ip);
- BUG_ON(!pdev);
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
- mtu = pdev->mtu;
- tx_chan = cxgb4_port_chan(pdev);
- smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(pdev) * step;
- ctrlq_idx = cxgb4_port_idx(pdev);
- step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
- rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
- mtu = dst_mtu(dst);
- tx_chan = cxgb4_port_chan(neigh->dev);
- smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
- rss_qid = dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- }
- if (!l2t) {
- printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+
+ child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+ if (!child_ep) {
+ printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
__func__);
dst_release(dst);
goto reject;
}
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+ err = import_ep(child_ep, peer_ip, dst, dev, false);
+ if (err) {
+ printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
- cxgb4_l2t_release(l2t);
dst_release(dst);
+ kfree(child_ep);
goto reject;
}
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -1647,18 +1680,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
- child_ep->l2t = l2t;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- child_ep->tx_chan = tx_chan;
- child_ep->smac_idx = smac_idx;
- child_ep->rss_qid = rss_qid;
- child_ep->mtu = mtu;
- child_ep->txq_idx = txq_idx;
- child_ep->ctrlq_idx = ctrlq_idx;
PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
- tx_chan, smac_idx, rss_qid);
+ child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
@@ -1788,11 +1814,8 @@ static int is_neg_adv_abort(unsigned int status)
static int c4iw_reconnect(struct c4iw_ep *ep)
{
- int err = 0;
struct rtable *rt;
- struct net_device *pdev;
- struct neighbour *neigh;
- int step;
+ int err = 0;
PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
init_timer(&ep->timer);
@@ -1820,45 +1843,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
}
ep->dst = &rt->dst;
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- ep->com.cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- }
- if (!ep->l2t) {
+ err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
+ ep->dst, ep->com.dev, false);
+ if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
@@ -2234,13 +2222,10 @@ err:
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
- int err = 0;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_ep *ep;
struct rtable *rt;
- struct net_device *pdev;
- struct neighbour *neigh;
- int step;
+ int err = 0;
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
@@ -2301,47 +2286,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- ep->retry_with_mpa_v1 = 0;
- ep->tried_with_mpa_v1 = 0;
- }
- if (!ep->l2t) {
+ err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
+ ep->dst, ep->com.dev, true);
+ if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e..0f1607c8325 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
- (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
(*count)++;
if (++ptr == cq->size)
ptr = 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f36da994a85..95c94d8f025 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -109,7 +109,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
in_modifier, op_modifier,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
memcpy(response_mad, outmailbox->buf, 256);
@@ -330,7 +331,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE;
err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
- MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_WRAPPED);
if (err)
err = IB_MAD_RESULT_FAILURE;
else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77f3dbc0aaa..7b445df6a66 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
{
struct mlx4_dev *dev = to_mdev(device)->dev;
- return dev->caps.port_mask & (1 << (port_num - 1)) ?
+ return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
}
@@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
memset(mailbox->buf, 0, 256);
memcpy(mailbox->buf, props->node_desc, 64);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
- MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
@@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
}
err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev->dev, mailbox);
return err;
@@ -899,7 +899,8 @@ static void update_gids_task(struct work_struct *work)
memcpy(gids, gw->gids, sizeof gw->gids);
err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
- 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
printk(KERN_WARNING "set port command failed\n");
else {
@@ -1074,6 +1075,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
printk_once(KERN_INFO "%s", mlx4_ib_version);
+ if (mlx4_is_mfunc(dev)) {
+ printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+ return NULL;
+ }
+
mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
@@ -1244,7 +1250,8 @@ err_reg:
err_counter:
for (; i; --i)
- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+ if (ibdev->counters[i - 1] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
err_map:
iounmap(ibdev->uar_map);
@@ -1275,7 +1282,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
}
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ if (ibdev->counters[p] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a3..b1e6cae5f47 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1348,7 +1348,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
else
netdev = nesvnic->netdev;
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+ rcu_read_lock();
+ neigh = dst_get_neighbour_noref(&rt->dst);
if (neigh) {
if (neigh->nud_state & NUD_VALID) {
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1359,7 +1360,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
neigh->ha, ETH_ALEN)) {
/* Mac address same as in nes_arp_table */
- neigh_release(neigh);
ip_rt_put(rt);
return rc;
}
@@ -1373,13 +1373,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
dst_ip, NES_ARP_ADD);
rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
NES_ARP_RESOLVE);
+ } else {
+ neigh_event_send(neigh, NULL);
}
- neigh_release(neigh);
}
-
- if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
- neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-
+ rcu_read_unlock();
ip_rt_put(rt);
return rc;
}
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index c00d2f3f896..4b3fa711a24 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1589,7 +1589,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
.set_pauseparam = nes_netdev_set_pauseparam,
};
-static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features)
+static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
{
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
@@ -1610,7 +1610,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
-static u32 nes_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1624,7 +1624,7 @@ static u32 nes_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int nes_set_features(struct net_device *netdev, u32 features)
+static int nes_set_features(struct net_device *netdev, netdev_features_t features)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 574600ef5b4..a7403248d83 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
- dd->freectxts++;
+ dd->freectxts--;
ret = 0;
goto bail;
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
- dd->freectxts--;
+ dd->freectxts++;
}
mutex_unlock(&qib_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95d..1d5895941e1 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
SYM_LSB(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- ppd->cpspec->ibcctrl_a = val;
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
set_vls(ppd);
+ /* initially come up DISABLED, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* clear the linkinit cmds */
+ ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
off */
if (ppd->dd->flags & QIB_HAS_QSFP) {
qd->t_insert = get_jiffies_64();
- schedule_work(&qd->work);
+ queue_work(ib_wq, &qd->work);
}
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f..fa71b1e666c 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
- /* Spec says module can take up to two seconds! */
- mask = QSFP_GPIO_MOD_PRS_N;
- if (qd->ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- /* Do not try to wait here. Better to let event handle it */
- if (!qib_qsfp_mod_present(qd->ppd))
- goto bail;
- /* We see a module, but it may be unwise to look yet. Just schedule */
- qd->t_insert = get_jiffies_64();
- queue_work(ib_wq, &qd->work);
-bail:
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997..4115be54ba3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
struct ipoib_ah *ah;
+ struct ib_ah *vah;
ah = kmalloc(sizeof *ah, GFP_KERNEL);
if (!ah)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ah->dev = dev;
ah->last_send = 0;
kref_init(&ah->ref);
- ah->ah = ib_create_ah(pd, attr);
- if (IS_ERR(ah->ah)) {
+ vah = ib_create_ah(pd, attr);
+ if (IS_ERR(vah)) {
kfree(ah);
- ah = NULL;
- } else
+ ah = (struct ipoib_ah *)vah;
+ } else {
+ ah->ah = vah;
ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+ }
return ah;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7567b600023..3514ca05dee 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -171,7 +171,7 @@ static int ipoib_stop(struct net_device *dev)
return 0;
}
-static u32 ipoib_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
- if (ah) {
+ if (!IS_ERR_OR_NULL(ah)) {
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -555,15 +555,14 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
+/* called with rcu_read_lock */
+static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
- struct neighbour *n;
unsigned long flags;
- n = dst_get_neighbour(skb_dst(skb));
neigh = ipoib_neigh_alloc(n, skb->dev);
if (!neigh) {
++dev->stats.tx_dropped;
@@ -636,16 +635,14 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
}
-static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+/* called with rcu_read_lock */
+static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n;
/* Look up path record for unicasts */
- n = dst_get_neighbour(dst);
if (n->ha[4] != 0xff) {
- neigh_add_path(skb, dev);
+ neigh_add_path(skb, n, dev);
return;
}
@@ -720,13 +717,19 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct neighbour *n = NULL;
unsigned long flags;
- if (likely(skb_dst(skb)))
- n = dst_get_neighbour(skb_dst(skb));
-
+ rcu_read_lock();
+ if (likely(skb_dst(skb))) {
+ n = dst_get_neighbour_noref(skb_dst(skb));
+ if (!n) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+ }
if (likely(n)) {
if (unlikely(!*to_ipoib_neigh(n))) {
- ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ ipoib_path_lookup(skb, n, dev);
+ goto unlock;
}
neigh = *to_ipoib_neigh(n);
@@ -748,18 +751,18 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
list_del(&neigh->list);
ipoib_neigh_free(dev, neigh);
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ ipoib_path_lookup(skb, n, dev);
+ goto unlock;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- return NETDEV_TX_OK;
+ goto unlock;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
- return NETDEV_TX_OK;
+ goto unlock;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
phdr->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
- return NETDEV_TX_OK;
+ goto unlock;
}
unicast_arp_send(skb, dev, phdr);
}
}
-
+unlock:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
dst = skb_dst(skb);
n = NULL;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref_raw(dst);
if ((!dst || !n) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
@@ -1218,6 +1222,8 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+ priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a9768635..f7ff9dd66cd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
av.grh.dgid = mcast->mcmember.mgid;
ah = ipoib_create_ah(dev, priv->pd, &av);
- if (!ah) {
- ipoib_warn(priv, "ib_address_create failed\n");
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %ld\n",
+ -PTR_ERR(ah));
+ /* use original error */
+ return PTR_ERR(ah);
} else {
spin_lock_irq(&priv->lock);
mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
skb->dev = dev;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref_raw(dst);
if (!dst || !n) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,8 +725,10 @@ out:
if (mcast && mcast->ah) {
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n = NULL;
+
+ rcu_read_lock();
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (n && !*to_ipoib_neigh(n)) {
struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
skb->dev);
@@ -734,7 +739,7 @@ out:
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
-
+ rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 80793f1608e..06517e60e50 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
{
struct cma3000_accl_data *data = dev_id;
- int datax, datay, dataz;
- u8 ctrl, mode, range, intr_status;
+ int datax, datay, dataz, intr_status;
+ u8 ctrl, mode, range;
intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
if (intr_status < 0)
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 09b93b11a27..e2a9867c19d 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1210,18 +1210,28 @@ static int elantech_reconnect(struct psmouse *psmouse)
*/
static int elantech_set_properties(struct elantech_data *etd)
{
+ /* This represents the version of IC body. */
int ver = (etd->fw_version & 0x0f0000) >> 16;
+ /* Early version of Elan touchpads doesn't obey the rule. */
if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600)
etd->hw_version = 1;
- else if (etd->fw_version < 0x150600)
- etd->hw_version = 2;
- else if (ver == 5)
- etd->hw_version = 3;
- else if (ver == 6)
- etd->hw_version = 4;
- else
- return -1;
+ else {
+ switch (ver) {
+ case 2:
+ case 4:
+ etd->hw_version = 2;
+ break;
+ case 5:
+ etd->hw_version = 3;
+ break;
+ case 6:
+ etd->hw_version = 4;
+ break;
+ default:
+ return -1;
+ }
+ }
/*
* Turn on packet checking by default.
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index c5b12d2e955..86d6f39178b 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -162,7 +162,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
- return -1;
+ goto out;
if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
/* inversion is required */
@@ -261,7 +261,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2);
if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
- return -1;
+ goto out;
if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2);
@@ -309,7 +309,7 @@ static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
};
int val;
- if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1)
+ if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1)
return -EIO;
*btn = buttons[(val & 0x30) >> 4];
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index ed1395ac7b8..2e4af24f8c1 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -2,7 +2,7 @@
* Finger Sensing Pad PS/2 mouse driver.
*
* Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -33,6 +33,7 @@
/* Finger-sensing Pad control registers */
#define FSP_REG_SYSCTL1 0x10
#define FSP_BIT_EN_REG_CLK BIT(5)
+#define FSP_REG_TMOD_STATUS 0x20
#define FSP_REG_OPC_QDOWN 0x31
#define FSP_BIT_EN_OPC_TAG BIT(7)
#define FSP_REG_OPTZ_XLO 0x34
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index c080b828e5d..a6dcd18e9ad 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
@@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
do {
psmouse_reset(psmouse);
+ if (retry) {
+ /*
+ * On some boxes, right after resuming, the touchpad
+ * needs some time to finish initializing (I assume
+ * it needs time to calibrate) and start responding
+ * to Synaptics-specific queries, so let's wait a
+ * bit.
+ */
+ ssleep(1);
+ }
error = synaptics_detect(psmouse, 0);
} while (error && ++retry < 3);
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 4b2a42f9f0b..d4d08bd9205 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/serio.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/mach-types.h>
#include <plat/board-ams-delta.h>
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index bb9f5d31f0d..b4cfc6c8be8 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -431,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
},
+ {
+ /* Newer HP Pavilion dv4 models */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ },
+ },
{ }
};
@@ -560,6 +567,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
},
},
+ {
+ /* Newer HP Pavilion dv4 models */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
+ },
+ },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index da0d8761e77..2ee47d01a3b 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+ { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
+ 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
{ USB_DEVICE_WACOM(0xE6) },
+ { USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c0c7820d4c4..bdc447fd476 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -405,6 +405,9 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
@@ -3524,7 +3527,7 @@ found:
return 0;
}
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
+ intel_iommu_enabled = 1;
+
return 0;
}
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 07c9f189f31..6777ca04947 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
return ir_supported;
}
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
{
if (!intr_remapping_enabled)
return 0;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2fb2963df55..5b5fa5cdaa3 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 33ec9e46777..9021182c4b7 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
case IIOCDOCFINT:
if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
return (-EINVAL); /* invalid driver */
+ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+ sizeof(dioctl.cf_ctrl.msn))
+ return -EINVAL;
+ if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+ sizeof(dioctl.cf_ctrl.fwd_nr))
+ return -EINVAL;
if ((i = cf_command(dioctl.cf_ctrl.drvid,
(cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
dioctl.cf_ctrl.cfproc,
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 04231cb2f03..1793ba1b6a8 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -624,8 +624,6 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
{
isdn_if *iif;
- pr_info("ISDN4Linux interface\n");
-
iif = kmalloc(sizeof *iif, GFP_KERNEL);
if (!iif) {
pr_err("out of memory\n");
@@ -684,6 +682,7 @@ void gigaset_isdn_unregdev(struct cardstate *cs)
*/
void gigaset_isdn_regdrv(void)
{
+ pr_info("ISDN4Linux interface\n");
/* nothing to do */
}
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f73d7f7e02..2339d7396b9 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
char *c,
*e;
+ if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+ sizeof(cfg->drvid))
+ return -EINVAL;
drvidx = -1;
chidx = -1;
strcpy(drvid, cfg->drvid);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 661b692573e..6d5628bb060 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -270,11 +270,8 @@ void led_blink_set(struct led_classdev *led_cdev,
del_timer_sync(&led_cdev->blink_timer);
if (led_cdev->blink_set &&
- !led_cdev->blink_set(led_cdev, delay_on, delay_off)) {
- led_cdev->blink_delay_on = *delay_on;
- led_cdev->blink_delay_off = *delay_off;
+ !led_cdev->blink_set(led_cdev, delay_on, delay_off))
return;
- }
/* blink with 1 Hz as default if nothing specified */
if (!*delay_on && !*delay_off)
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 0dc30ffde5a..595d7319701 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -381,6 +381,11 @@ error:
return PTR_ERR(vqs[i]);
}
+static const char *lg_bus_name(struct virtio_device *vdev)
+{
+ return "";
+}
+
/* The ops structure which hooks everything together. */
static struct virtio_config_ops lguest_config_ops = {
.get_features = lg_get_features,
@@ -392,6 +397,7 @@ static struct virtio_config_ops lguest_config_ops = {
.reset = lg_reset,
.find_vqs = lg_find_vqs,
.del_vqs = lg_del_vqs,
+ .bus_name = lg_bus_name,
};
/*
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721b..6d03774b176 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
+ spin_lock_irq(&bitmap->lock);
for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
+ spin_unlock_irq(&bitmap->lock);
}
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1391,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
- if (bitmap->mddev->degraded)
- /* Never clear bits or update events_cleared when degraded */
- success = 0;
while (sectors) {
sector_t blocks;
@@ -1407,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
return;
}
- if (success &&
+ if (success && !bitmap->mddev->degraded &&
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
@@ -1605,7 +1604,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
+ spin_lock_irq(&bitmap->lock);
bitmap_file_set_bit(bitmap, sec);
+ spin_unlock_irq(&bitmap->lock);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index c3273efd08c..627456542fb 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
return -EINVAL;
rdev->raid_disk = rdev->saved_raid_disk;
+ rdev->saved_raid_disk = -1;
newconf = linear_conf(mddev,mddev->raid_disks+1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e..f47f1f8ac44 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del(&mddev->all_mddevs);
+ list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
- rdev->badblocks.unacked_exist) {
+ (rdev->badblocks.unacked_exist
+ && !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
else {
+ if (mddev->hold_active == UNTIL_IOCTL)
+ mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
- if (mddev->hold_active == UNTIL_IOCTL)
- mddev->hold_active = 0;
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -7342,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- } else
- break;
+ }
}
}
}
@@ -7840,6 +7857,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
s + rdev->data_offset, sectors, acknowledged);
if (rv) {
/* Make sure they get written out promptly */
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 297e2609217..858fdbb7eb0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (dev->written)
s->written++;
rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
if (rdev) {
is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
&first_bad, &bad_sectors);
@@ -3063,12 +3065,18 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (!test_bit(Faulty, &rdev->flags)) {
+ else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
/* in sync if before recovery_offset */
- if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
- set_bit(R5_Insync, &dev->flags);
- }
- if (test_bit(R5_WriteError, &dev->flags)) {
+ set_bit(R5_Insync, &dev->flags);
+ else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ test_bit(R5_Expanded, &dev->flags))
+ /* If we've reshaped into here, we assume it is Insync.
+ * We will shortly update recovery_offset to make
+ * it official.
+ */
+ set_bit(R5_Insync, &dev->flags);
+
+ if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
@@ -3076,7 +3084,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
atomic_inc(&rdev->nr_pending);
diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c
index 7eb1bf75cd0..5d02221e99d 100644
--- a/drivers/media/common/tuners/mxl5007t.c
+++ b/drivers/media/common/tuners/mxl5007t.c
@@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
{
+ u8 buf[2] = { 0xfb, reg };
struct i2c_msg msg[] = {
{ .addr = state->i2c_props.addr, .flags = 0,
- .buf = &reg, .len = 1 },
+ .buf = buf, .len = 2 },
{ .addr = state->i2c_props.addr, .flags = I2C_M_RD,
.buf = val, .len = 1 },
};
diff --git a/drivers/media/common/tuners/tda18218.c b/drivers/media/common/tuners/tda18218.c
index aacfe2387e2..4fc29730a12 100644
--- a/drivers/media/common/tuners/tda18218.c
+++ b/drivers/media/common/tuners/tda18218.c
@@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
LP_Fc = 0;
- LO_Frac = params->frequency + 4000000;
+ LO_Frac = params->frequency + 3000000;
break;
case BANDWIDTH_7_MHZ:
LP_Fc = 1;
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 303f22ea04c..01bb8daf4b0 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -189,7 +189,7 @@ struct ati_remote {
dma_addr_t inbuf_dma;
dma_addr_t outbuf_dma;
- unsigned char old_data[2]; /* Detect duplicate events */
+ unsigned char old_data; /* Detect duplicate events */
unsigned long old_jiffies;
unsigned long acc_jiffies; /* handle acceleration */
unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
/* Translation table from hardware messages to input events. */
static const struct {
short kind;
- unsigned char data1, data2;
+ unsigned char data;
int type;
unsigned int code;
int value;
} ati_remote_tbl[] = {
/* Directional control pad axes */
- {KIND_ACCEL, 0x35, 0x70, EV_REL, REL_X, -1}, /* left */
- {KIND_ACCEL, 0x36, 0x71, EV_REL, REL_X, 1}, /* right */
- {KIND_ACCEL, 0x37, 0x72, EV_REL, REL_Y, -1}, /* up */
- {KIND_ACCEL, 0x38, 0x73, EV_REL, REL_Y, 1}, /* down */
+ {KIND_ACCEL, 0x70, EV_REL, REL_X, -1}, /* left */
+ {KIND_ACCEL, 0x71, EV_REL, REL_X, 1}, /* right */
+ {KIND_ACCEL, 0x72, EV_REL, REL_Y, -1}, /* up */
+ {KIND_ACCEL, 0x73, EV_REL, REL_Y, 1}, /* down */
/* Directional control pad diagonals */
- {KIND_LU, 0x39, 0x74, EV_REL, 0, 0}, /* left up */
- {KIND_RU, 0x3a, 0x75, EV_REL, 0, 0}, /* right up */
- {KIND_LD, 0x3c, 0x77, EV_REL, 0, 0}, /* left down */
- {KIND_RD, 0x3b, 0x76, EV_REL, 0, 0}, /* right down */
+ {KIND_LU, 0x74, EV_REL, 0, 0}, /* left up */
+ {KIND_RU, 0x75, EV_REL, 0, 0}, /* right up */
+ {KIND_LD, 0x77, EV_REL, 0, 0}, /* left down */
+ {KIND_RD, 0x76, EV_REL, 0, 0}, /* right down */
/* "Mouse button" buttons */
- {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
- {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
- {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
- {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+ {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+ {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+ {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+ {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
/* Artificial "doubleclick" events are generated by the hardware.
* They are mapped to the "side" and "extra" mouse buttons here. */
- {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
- {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+ {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+ {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
/* Non-mouse events are handled by rc-core */
- {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+ {KIND_END, 0x00, EV_MAX + 1, 0, 0}
};
/* Local function prototypes */
@@ -397,25 +397,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
}
/*
- * ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
- int i;
-
- for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
- /*
- * Decide if the table entry matches the remote input.
- */
- if (ati_remote_tbl[i].data1 == d1 &&
- ati_remote_tbl[i].data2 == d2)
- return i;
-
- }
- return -1;
-}
-
-/*
* ati_remote_compute_accel
*
* Implements acceleration curve for directional control pad
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
int index = -1;
int acc;
int remote_num;
- unsigned char scancode[2];
+ unsigned char scancode;
+ int i;
+
+ /*
+ * data[0] = 0x14
+ * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+ * data[2] = the key code (with toggle bit in MSB with some models)
+ * data[3] = channel << 4 (the low 4 bits must be zero)
+ */
/* Deal with strange looking inputs */
if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
+ if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+ dbginfo(&ati_remote->interface->dev,
+ "wrong checksum in input: %02x %02x %02x %02x\n",
+ data[0], data[1], data[2], data[3]);
+ return;
+ }
+
/* Mask unwanted remote channels. */
/* note: remote_num is 0-based, channel 1 on remote == 0 here */
remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
- scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
/*
- * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
- * so we have to clear them. The first bit is a bit tricky as the
- * "non-toggled" state depends on remote_num, so we xor it with the
- * second bit which is only used for toggle.
+ * MSB is a toggle code, though only used by some devices
+ * (e.g. SnapStream Firefly)
*/
- scancode[0] ^= (data[2] & 0x80);
-
- scancode[1] = data[2] & ~0x80;
+ scancode = data[2] & 0x7f;
- /* Look up event code index in mouse translation table. */
- index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+ /* Look up event code index in the mouse translation table. */
+ for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+ if (scancode == ati_remote_tbl[i].data) {
+ index = i;
+ break;
+ }
+ }
if (index >= 0) {
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
- remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+ "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+ remote_num, data[2], index, ati_remote_tbl[index].code);
if (!dev)
return; /* no mouse device */
} else
dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
- remote_num, data[1], data[2], scancode[0], scancode[1]);
+ "channel 0x%02x; key data %02x, scancode %02x\n",
+ remote_num, data[2], scancode);
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
unsigned long now = jiffies;
/* Filter duplicate events which happen "too close" together. */
- if (ati_remote->old_data[0] == data[1] &&
- ati_remote->old_data[1] == data[2] &&
+ if (ati_remote->old_data == data[2] &&
time_before(now, ati_remote->old_jiffies +
msecs_to_jiffies(repeat_filter))) {
ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
ati_remote->first_jiffies = now;
}
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
ati_remote->old_jiffies = now;
/* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
if (index < 0) {
/* Not a mouse event, hand it to rc-core. */
- u32 rc_code = (scancode[0] << 8) | scancode[1];
/*
* We don't use the rc-core repeat handling yet as
* it would cause ghost repeats which would be a
* regression for this driver.
*/
- rc_keydown_notimeout(ati_remote->rdev, rc_code,
+ rc_keydown_notimeout(ati_remote->rdev, scancode,
data[2]);
rc_keyup(ati_remote->rdev);
return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
input_sync(dev);
ati_remote->old_jiffies = jiffies;
- ati_remote->old_data[0] = data[1];
- ati_remote->old_data[1] = data[2];
+ ati_remote->old_data = data[2];
}
}
diff --git a/drivers/media/rc/keymaps/rc-ati-x10.c b/drivers/media/rc/keymaps/rc-ati-x10.c
index e1b8b2605c4..81506440ede 100644
--- a/drivers/media/rc/keymaps/rc-ati-x10.c
+++ b/drivers/media/rc/keymaps/rc-ati-x10.c
@@ -27,55 +27,55 @@
#include <media/rc-map.h>
static struct rc_map_table ati_x10[] = {
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xc500, KEY_A },
- { 0xc601, KEY_B },
- { 0xde19, KEY_C },
- { 0xe01b, KEY_D },
- { 0xe621, KEY_E },
- { 0xe823, KEY_F },
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x00, KEY_A },
+ { 0x01, KEY_B },
+ { 0x19, KEY_C },
+ { 0x1b, KEY_D },
+ { 0x21, KEY_E },
+ { 0x23, KEY_F },
- { 0xdd18, KEY_KPENTER }, /* "check" */
- { 0xdb16, KEY_MENU }, /* "menu" */
- { 0xc702, KEY_POWER }, /* Power */
- { 0xc803, KEY_TV }, /* TV */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xca05, KEY_WWW }, /* WEB */
- { 0xcb06, KEY_BOOKMARKS }, /* "book" */
- { 0xcc07, KEY_EDIT }, /* "hand" */
- { 0xe11c, KEY_COFFEE }, /* "timer" */
- { 0xe520, KEY_FRONT }, /* "max" */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe31e, KEY_OK }, /* "OK" */
- { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
- { 0xcd08, KEY_VOLUMEUP }, /* VOL - */
- { 0xcf0a, KEY_MUTE }, /* MUTE */
- { 0xd00b, KEY_CHANNELUP }, /* CH + */
- { 0xd10c, KEY_CHANNELDOWN },/* CH - */
- { 0xec27, KEY_RECORD }, /* ( o) red */
- { 0xea25, KEY_PLAY }, /* ( >) */
- { 0xe924, KEY_REWIND }, /* (<<) */
- { 0xeb26, KEY_FORWARD }, /* (>>) */
- { 0xed28, KEY_STOP }, /* ([]) */
- { 0xee29, KEY_PAUSE }, /* ('') */
- { 0xf02b, KEY_PREVIOUS }, /* (<-) */
- { 0xef2a, KEY_NEXT }, /* (>+) */
- { 0xf22d, KEY_INFO }, /* PLAYING */
- { 0xf32e, KEY_HOME }, /* TOP */
- { 0xf42f, KEY_END }, /* END */
- { 0xf530, KEY_SELECT }, /* SELECT */
+ { 0x18, KEY_KPENTER }, /* "check" */
+ { 0x16, KEY_MENU }, /* "menu" */
+ { 0x02, KEY_POWER }, /* Power */
+ { 0x03, KEY_TV }, /* TV */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x05, KEY_WWW }, /* WEB */
+ { 0x06, KEY_BOOKMARKS }, /* "book" */
+ { 0x07, KEY_EDIT }, /* "hand" */
+ { 0x1c, KEY_COFFEE }, /* "timer" */
+ { 0x20, KEY_FRONT }, /* "max" */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1a, KEY_UP }, /* up */
+ { 0x1e, KEY_OK }, /* "OK" */
+ { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+ { 0x08, KEY_VOLUMEUP }, /* VOL - */
+ { 0x0a, KEY_MUTE }, /* MUTE */
+ { 0x0b, KEY_CHANNELUP }, /* CH + */
+ { 0x0c, KEY_CHANNELDOWN },/* CH - */
+ { 0x27, KEY_RECORD }, /* ( o) red */
+ { 0x25, KEY_PLAY }, /* ( >) */
+ { 0x24, KEY_REWIND }, /* (<<) */
+ { 0x26, KEY_FORWARD }, /* (>>) */
+ { 0x28, KEY_STOP }, /* ([]) */
+ { 0x29, KEY_PAUSE }, /* ('') */
+ { 0x2b, KEY_PREVIOUS }, /* (<-) */
+ { 0x2a, KEY_NEXT }, /* (>+) */
+ { 0x2d, KEY_INFO }, /* PLAYING */
+ { 0x2e, KEY_HOME }, /* TOP */
+ { 0x2f, KEY_END }, /* END */
+ { 0x30, KEY_SELECT }, /* SELECT */
};
static struct rc_map_list ati_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-medion-x10.c b/drivers/media/rc/keymaps/rc-medion-x10.c
index 09e2cc01d11..479cdb89781 100644
--- a/drivers/media/rc/keymaps/rc-medion-x10.c
+++ b/drivers/media/rc/keymaps/rc-medion-x10.c
@@ -25,70 +25,70 @@
#include <media/rc-map.h>
static struct rc_map_table medion_x10[] = {
- { 0xf12c, KEY_TV }, /* TV */
- { 0xf22d, KEY_VCR }, /* VCR */
- { 0xc904, KEY_DVD }, /* DVD */
- { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
- { 0xf32e, KEY_RADIO }, /* RADIO */
- { 0xca05, KEY_DIRECTORY }, /* PHOTO */
- { 0xf42f, KEY_INFO }, /* TV-PREVIEW */
- { 0xf530, KEY_LIST }, /* CHANNEL-LST */
-
- { 0xe01b, KEY_SETUP }, /* SETUP */
- { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
- { 0xcd08, KEY_VOLUMEDOWN }, /* VOL - */
- { 0xce09, KEY_VOLUMEUP }, /* VOL + */
- { 0xd00b, KEY_CHANNELUP }, /* CHAN + */
- { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
- { 0xc500, KEY_MUTE }, /* MUTE */
-
- { 0xf732, KEY_RED }, /* red */
- { 0xf833, KEY_GREEN }, /* green */
- { 0xf934, KEY_YELLOW }, /* yellow */
- { 0xfa35, KEY_BLUE }, /* blue */
- { 0xdb16, KEY_TEXT }, /* TXT */
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
- { 0xe520, KEY_DELETE }, /* DELETE */
-
- { 0xfb36, KEY_KEYBOARD }, /* RENAME */
- { 0xdd18, KEY_SCREEN }, /* SNAPSHOT */
-
- { 0xdf1a, KEY_UP }, /* up */
- { 0xe722, KEY_DOWN }, /* down */
- { 0xe21d, KEY_LEFT }, /* left */
- { 0xe41f, KEY_RIGHT }, /* right */
- { 0xe31e, KEY_OK }, /* OK */
-
- { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
- { 0xfd38, KEY_EDIT }, /* EDIT IMAGE */
-
- { 0xe924, KEY_REWIND }, /* rewind (<<) */
- { 0xea25, KEY_PLAY }, /* play ( >) */
- { 0xeb26, KEY_FORWARD }, /* forward (>>) */
- { 0xec27, KEY_RECORD }, /* record ( o) */
- { 0xed28, KEY_STOP }, /* stop ([]) */
- { 0xee29, KEY_PAUSE }, /* pause ('') */
-
- { 0xe621, KEY_PREVIOUS }, /* prev */
- { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
- { 0xe823, KEY_NEXT }, /* next */
- { 0xde19, KEY_MENU }, /* MENU */
- { 0xff3a, KEY_LANGUAGE }, /* AUDIO */
-
- { 0xc702, KEY_POWER }, /* POWER */
+ { 0x2c, KEY_TV }, /* TV */
+ { 0x2d, KEY_VCR }, /* VCR */
+ { 0x04, KEY_DVD }, /* DVD */
+ { 0x06, KEY_AUDIO }, /* MUSIC */
+
+ { 0x2e, KEY_RADIO }, /* RADIO */
+ { 0x05, KEY_DIRECTORY }, /* PHOTO */
+ { 0x2f, KEY_INFO }, /* TV-PREVIEW */
+ { 0x30, KEY_LIST }, /* CHANNEL-LST */
+
+ { 0x1b, KEY_SETUP }, /* SETUP */
+ { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+ { 0x08, KEY_VOLUMEDOWN }, /* VOL - */
+ { 0x09, KEY_VOLUMEUP }, /* VOL + */
+ { 0x0b, KEY_CHANNELUP }, /* CHAN + */
+ { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+ { 0x00, KEY_MUTE }, /* MUTE */
+
+ { 0x32, KEY_RED }, /* red */
+ { 0x33, KEY_GREEN }, /* green */
+ { 0x34, KEY_YELLOW }, /* yellow */
+ { 0x35, KEY_BLUE }, /* blue */
+ { 0x16, KEY_TEXT }, /* TXT */
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+ { 0x20, KEY_DELETE }, /* DELETE */
+
+ { 0x36, KEY_KEYBOARD }, /* RENAME */
+ { 0x18, KEY_SCREEN }, /* SNAPSHOT */
+
+ { 0x1a, KEY_UP }, /* up */
+ { 0x22, KEY_DOWN }, /* down */
+ { 0x1d, KEY_LEFT }, /* left */
+ { 0x1f, KEY_RIGHT }, /* right */
+ { 0x1e, KEY_OK }, /* OK */
+
+ { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+ { 0x38, KEY_EDIT }, /* EDIT IMAGE */
+
+ { 0x24, KEY_REWIND }, /* rewind (<<) */
+ { 0x25, KEY_PLAY }, /* play ( >) */
+ { 0x26, KEY_FORWARD }, /* forward (>>) */
+ { 0x27, KEY_RECORD }, /* record ( o) */
+ { 0x28, KEY_STOP }, /* stop ([]) */
+ { 0x29, KEY_PAUSE }, /* pause ('') */
+
+ { 0x21, KEY_PREVIOUS }, /* prev */
+ { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+ { 0x23, KEY_NEXT }, /* next */
+ { 0x19, KEY_MENU }, /* MENU */
+ { 0x3a, KEY_LANGUAGE }, /* AUDIO */
+
+ { 0x02, KEY_POWER }, /* POWER */
};
static struct rc_map_list medion_x10_map = {
diff --git a/drivers/media/rc/keymaps/rc-snapstream-firefly.c b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
index ef146520931..c7f33ec719b 100644
--- a/drivers/media/rc/keymaps/rc-snapstream-firefly.c
+++ b/drivers/media/rc/keymaps/rc-snapstream-firefly.c
@@ -22,63 +22,63 @@
#include <media/rc-map.h>
static struct rc_map_table snapstream_firefly[] = {
- { 0xf12c, KEY_ZOOM }, /* Maximize */
- { 0xc702, KEY_CLOSE },
-
- { 0xd20d, KEY_1 },
- { 0xd30e, KEY_2 },
- { 0xd40f, KEY_3 },
- { 0xd510, KEY_4 },
- { 0xd611, KEY_5 },
- { 0xd712, KEY_6 },
- { 0xd813, KEY_7 },
- { 0xd914, KEY_8 },
- { 0xda15, KEY_9 },
- { 0xdc17, KEY_0 },
- { 0xdb16, KEY_BACK },
- { 0xdd18, KEY_KPENTER }, /* ent */
-
- { 0xce09, KEY_VOLUMEUP },
- { 0xcd08, KEY_VOLUMEDOWN },
- { 0xcf0a, KEY_MUTE },
- { 0xd00b, KEY_CHANNELUP },
- { 0xd10c, KEY_CHANNELDOWN },
- { 0xc500, KEY_VENDOR }, /* firefly */
-
- { 0xf32e, KEY_INFO },
- { 0xf42f, KEY_OPTION },
-
- { 0xe21d, KEY_LEFT },
- { 0xe41f, KEY_RIGHT },
- { 0xe722, KEY_DOWN },
- { 0xdf1a, KEY_UP },
- { 0xe31e, KEY_OK },
-
- { 0xe11c, KEY_MENU },
- { 0xe520, KEY_EXIT },
-
- { 0xec27, KEY_RECORD },
- { 0xea25, KEY_PLAY },
- { 0xed28, KEY_STOP },
- { 0xe924, KEY_REWIND },
- { 0xeb26, KEY_FORWARD },
- { 0xee29, KEY_PAUSE },
- { 0xf02b, KEY_PREVIOUS },
- { 0xef2a, KEY_NEXT },
-
- { 0xcb06, KEY_AUDIO }, /* Music */
- { 0xca05, KEY_IMAGES }, /* Photos */
- { 0xc904, KEY_DVD },
- { 0xc803, KEY_TV },
- { 0xcc07, KEY_VIDEO },
-
- { 0xc601, KEY_HELP },
- { 0xf22d, KEY_MODE }, /* Mouse */
-
- { 0xde19, KEY_A },
- { 0xe01b, KEY_B },
- { 0xe621, KEY_C },
- { 0xe823, KEY_D },
+ { 0x2c, KEY_ZOOM }, /* Maximize */
+ { 0x02, KEY_CLOSE },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x16, KEY_BACK },
+ { 0x18, KEY_KPENTER }, /* ent */
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x0a, KEY_MUTE },
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x00, KEY_VENDOR }, /* firefly */
+
+ { 0x2e, KEY_INFO },
+ { 0x2f, KEY_OPTION },
+
+ { 0x1d, KEY_LEFT },
+ { 0x1f, KEY_RIGHT },
+ { 0x22, KEY_DOWN },
+ { 0x1a, KEY_UP },
+ { 0x1e, KEY_OK },
+
+ { 0x1c, KEY_MENU },
+ { 0x20, KEY_EXIT },
+
+ { 0x27, KEY_RECORD },
+ { 0x25, KEY_PLAY },
+ { 0x28, KEY_STOP },
+ { 0x24, KEY_REWIND },
+ { 0x26, KEY_FORWARD },
+ { 0x29, KEY_PAUSE },
+ { 0x2b, KEY_PREVIOUS },
+ { 0x2a, KEY_NEXT },
+
+ { 0x06, KEY_AUDIO }, /* Music */
+ { 0x05, KEY_IMAGES }, /* Photos */
+ { 0x04, KEY_DVD },
+ { 0x03, KEY_TV },
+ { 0x07, KEY_VIDEO },
+
+ { 0x01, KEY_HELP },
+ { 0x2d, KEY_MODE }, /* Mouse */
+
+ { 0x19, KEY_A },
+ { 0x1b, KEY_B },
+ { 0x21, KEY_C },
+ { 0x23, KEY_D },
};
static struct rc_map_list snapstream_firefly_map = {
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 39fc923fc46..1c6015a04f9 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
switch (tv.model) {
case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+ case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
{ USB_DEVICE(0x2040, 0x8200),
.driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+ { USB_DEVICE(0x2040, 0x7260),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+ { USB_DEVICE(0x2040, 0x7213),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
{ },
};
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 881e04c7ffe..2ca10dfec91 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
gspca_dev->usb_err = 0;
/* do the specific subdriver stuff before endpoint selection */
- gspca_dev->alt = 0;
+ intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+ gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
if (gspca_dev->sd_desc->isoc_init) {
ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
if (ret < 0)
goto unlock;
}
- intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
: USB_ENDPOINT_XFER_ISOC;
@@ -957,7 +957,7 @@ retry:
ret = -EIO;
goto out;
}
- alt = ep_tb[--alt_idx].alt;
+ gspca_dev->alt = ep_tb[--alt_idx].alt;
}
}
out:
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 89d09a8914f..82c8817bd32 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -162,7 +162,6 @@ struct m5mols_version {
* @pad: media pad
* @ffmt: current fmt according to resolution type
* @res_type: current resolution type
- * @code: current code
* @irq_waitq: waitqueue for the capture
* @work_irq: workqueue for the IRQ
* @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@ struct m5mols_info {
struct media_pad pad;
struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
int res_type;
- enum v4l2_mbus_pixelcode code;
wait_queue_head_t irq_waitq;
struct work_struct work_irq;
unsigned long flags;
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 05ab3700647..e0f09e53180 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -334,7 +334,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
int ret = -EINVAL;
u8 reg;
- if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+ if (mode < REG_PARAMETER || mode > REG_CAPTURE)
return ret;
ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct m5mols_info *info = to_m5mols(sd);
struct v4l2_mbus_framefmt *format;
- if (fmt->pad != 0)
- return -EINVAL;
-
format = __find_format(info, fh, fmt->which, info->res_type);
if (!format)
return -EINVAL;
@@ -532,9 +529,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
u32 resolution = 0;
int ret;
- if (fmt->pad != 0)
- return -EINVAL;
-
ret = __find_resolution(sd, format, &type, &resolution);
if (ret < 0)
return ret;
@@ -543,13 +537,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
if (!sfmt)
return 0;
- *sfmt = m5mols_default_ffmt[type];
- sfmt->width = format->width;
- sfmt->height = format->height;
+
+ format->code = m5mols_default_ffmt[type].code;
+ format->colorspace = V4L2_COLORSPACE_JPEG;
+ format->field = V4L2_FIELD_NONE;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ *sfmt = *format;
info->resolution = resolution;
- info->code = format->code;
info->res_type = type;
}
@@ -626,13 +621,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
{
struct m5mols_info *info = to_m5mols(sd);
+ u32 code = info->ffmt[info->res_type].code;
if (enable) {
int ret = -EINVAL;
- if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+ if (is_code(code, M5MOLS_RESTYPE_MONITOR))
ret = m5mols_start_monitor(info);
- if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+ if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
ret = m5mols_start_capture(info);
return ret;
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index cf2c0fb95f2..398f96ffd35 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -955,6 +955,7 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
mt9m111->fmt = &mt9m111_colour_fmts[0];
mt9m111->lastpage = -1;
+ mutex_init(&mt9m111->power_lock);
ret = mt9m111_video_probe(client);
if (ret) {
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 32114a3c0ca..7b34b11daf2 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
ret = mt9t112_camera_probe(client);
- if (ret)
+ if (ret) {
kfree(priv);
+ return ret;
+ }
/* Cannot fail: using the default supported pixel code */
mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 9c5c19f142d..ee0d0b39cd1 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -38,6 +38,7 @@
#include <linux/irq.h>
#include <linux/videodev2.h>
#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
vid_dev->num_displays = 0;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
+
+ if (!dssdev->driver) {
+ dev_warn(&pdev->dev, "no driver for display: %s\n",
+ dssdev->name);
+ omap_dss_put_device(dssdev);
+ continue;
+ }
+
vid_dev->displays[vid_dev->num_displays++] = dssdev;
}
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e87ae2f634b..6a6cf388bae 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
index 1d54b86c936..3ea38a8def8 100644
--- a/drivers/media/video/omap24xxcam-dma.c
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
unsigned long flags;
struct sgdma_state *sg_state;
- if ((sglen < 0) || ((sglen > 0) & !sglist))
+ if ((sglen < 0) || ((sglen > 0) && !sglist))
return -EINVAL;
spin_lock_irqsave(&sgdma->lock, flags);
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index b0b0fa5a357..54a4a3f22e2 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
{
struct isp_pipeline *pipe =
to_isp_pipeline(&ccdc->video_out.video.entity);
- struct video_device *vdev = &ccdc->subdev.devnode;
+ struct video_device *vdev = ccdc->subdev.devnode;
struct v4l2_event event;
memset(&event, 0, sizeof(event));
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 68d539456c5..bc0b2c7349b 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
static void isp_stat_queue_event(struct ispstat *stat, int err)
{
- struct video_device *vdev = &stat->subdev.devnode;
+ struct video_device *vdev = stat->subdev.devnode;
struct v4l2_event event;
struct omap3isp_stat_event_status *status = (void *)event.u.data;
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index d1000723c5a..f2290578448 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -26,6 +26,7 @@
#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
index 9f2d26b1d4c..6806345ec2f 100644
--- a/drivers/media/video/ov6650.c
+++ b/drivers/media/video/ov6650.c
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct soc_camera_sense *sense = icd->sense;
struct ov6650 *priv = to_ov6650(client);
bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index c8d91b0cd9b..2cc3b916672 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -98,6 +98,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+ fimc_hw_reset(fimc);
+ cap->buf_index = 0;
+
spin_unlock_irqrestore(&fimc->slock, flags);
if (streaming)
@@ -137,7 +141,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
- if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
return 0;
spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
fimc_hw_set_rotation(ctx);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
fimc_hw_set_out_dma(ctx);
- set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
}
spin_unlock(&ctx->slock);
return ret;
@@ -164,7 +168,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
int min_bufs;
int ret;
- fimc_hw_reset(fimc);
vid_cap->frame_count = 0;
ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
- if (fimc->id == 1 && var->pix_hoff)
+ if (var->min_vsize_align == 1 && !rotation)
align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&fimc->lock);
set_frame_bounds(ff, mf->width, mf->height);
+ fimc->vid_cap.mf = *mf;
ff->fmt = ffmt;
/* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
media_entity_cleanup(&sd->entity);
v4l2_device_unregister_subdev(sd);
kfree(sd);
- sd = NULL;
+ fimc->vid_cap.subdev = NULL;
}
/* Set default format at the sensor and host interface */
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 19ca6db38b2..07c6254faee 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
static struct fimc_fmt fimc_formats[] = {
{
.name = "RGB565",
- .fourcc = V4L2_PIX_FMT_RGB565X,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
.color = S5P_FIMC_RGB565,
.memplanes = 1,
@@ -1038,12 +1038,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
mod_x = 6; /* 64 x 32 pixels tile */
mod_y = 5;
} else {
- if (fimc->id == 1 && variant->pix_hoff)
+ if (variant->min_vsize_align == 1)
mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
else
- mod_y = mod_x;
+ mod_y = ffs(variant->min_vsize_align) - 1;
}
- dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
&pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
/* Get pixel alignment constraints. */
- if (fimc->id == 1 && fimc->variant->pix_hoff)
+ if (fimc->variant->min_vsize_align == 1)
halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
else
- halign = ffs(min_size) - 1;
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
for (i = 0; i < f->fmt->colplanes; i++)
depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@ static int fimc_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
fimc->pdata = pdata;
- set_bit(ST_LPM, &fimc->state);
init_waitqueue_head(&fimc->irq_queue);
spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@ static int fimc_runtime_resume(struct device *dev)
/* Enable clocks and perform basic initalization */
clk_enable(fimc->clock[CLK_GATE]);
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
/* Resume the capture or mem-to-mem device */
if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@ static int fimc_resume(struct device *dev)
return 0;
}
fimc_hw_reset(fimc);
- if (fimc->variant->out_buf_count > 4)
- fimc_hw_set_dma_seq(fimc, 0xF);
spin_unlock_irqrestore(&fimc->slock, flags);
if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
struct fimc_dev *fimc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- fimc_runtime_suspend(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[0],
};
@@ -1849,6 +1843,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1861,6 +1856,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1874,6 +1870,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
+ .min_vsize_align = 1,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1884,6 +1881,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
+ .min_vsize_align = 16,
.out_buf_count = 4,
.pix_limit = &s5p_pix_limit[2],
};
@@ -1898,6 +1896,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[1],
};
@@ -1910,6 +1909,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 2,
+ .min_vsize_align = 1,
.out_buf_count = 32,
.pix_limit = &s5p_pix_limit[3],
};
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index a6936dad5b1..c7f01c47b20 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -377,6 +377,7 @@ struct fimc_pix_limit {
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
* @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
* @out_buf_count: the number of buffers in output DMA sequence
*/
struct samsung_fimc_variant {
@@ -390,6 +391,7 @@ struct samsung_fimc_variant {
u16 min_inp_pixsize;
u16 min_out_pixsize;
u16 hor_offs_align;
+ u16 min_vsize_align;
u16 out_buf_count;
};
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index cc337b1de91..615c862f036 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -220,6 +220,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
s_info->pdata->board_info, NULL);
if (IS_ERR_OR_NULL(sd)) {
+ i2c_put_adapter(adapter);
v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
return NULL;
}
@@ -234,12 +235,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_adapter *adapter;
if (!client)
return;
v4l2_device_unregister_subdev(sd);
+ adapter = client->adapter;
i2c_unregister_device(client);
- i2c_put_adapter(client->adapter);
+ if (adapter)
+ i2c_put_adapter(adapter);
}
static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
static int fimc_md_register_video_nodes(struct fimc_md *fmd)
{
+ struct video_device *vdev;
int i, ret = 0;
for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
if (!fmd->fimc[i])
continue;
- if (fmd->fimc[i]->m2m.vfd)
- ret = video_register_device(fmd->fimc[i]->m2m.vfd,
- VFL_TYPE_GRABBER, -1);
- if (ret)
- break;
- if (fmd->fimc[i]->vid_cap.vfd)
- ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
- VFL_TYPE_GRABBER, -1);
+ vdev = fmd->fimc[i]->m2m.vfd;
+ if (vdev) {
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ break;
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+ }
+
+ vdev = fmd->fimc[i]->vid_cap.vfd;
+ if (vdev == NULL)
+ continue;
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
}
return ret;
@@ -502,7 +514,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (WARN(csis == NULL,
"MIPI-CSI interface specified "
"but s5p-csis module is not loaded!\n"))
- continue;
+ return -EINVAL;
ret = media_entity_create_link(&sensor->entity, 0,
&csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
struct fimc_md *fmd;
int ret;
- if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
- return -EINVAL;
-
fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
if (!fmd)
return -ENOMEM;
@@ -782,9 +791,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
if (ret)
goto err3;
- ret = fimc_md_register_sensor_entities(fmd);
- if (ret)
- goto err3;
+ if (pdev->dev.platform_data) {
+ ret = fimc_md_register_sensor_entities(fmd);
+ if (ret)
+ goto err3;
+ }
ret = fimc_md_create_links(fmd);
if (ret)
goto err3;
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 20e664e3416..44f5c2d1920 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
cfg = readl(dev->regs + S5P_CIGCTRL);
cfg &= ~S5P_CIGCTRL_SWRST;
writel(cfg, dev->regs + S5P_CIGCTRL);
+
+ if (dev->variant->out_buf_count > 4)
+ fimc_hw_set_dma_seq(dev, 0xF);
}
static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *src_frame = &ctx->s_frame;
struct fimc_frame *dst_frame = &ctx->d_frame;
- u32 cfg = 0;
+
+ u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+ cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+ S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+ S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+ S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+ S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
fimc_hw_set_scaler(ctx);
cfg = readl(dev->regs + S5P_CISCCTRL);
+ cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
if (variant->has_mainscaler_ext) {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
writel(cfg, dev->regs + S5P_CIEXTEN);
} else {
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
writel(cfg, dev->regs + S5P_CISCCTRL);
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index 1e8cdb77d4b..dff9dc79879 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
.num_planes = 1,
},
{
- .name = "H264 Encoded Stream",
+ .name = "H263 Encoded Stream",
.fourcc = V4L2_PIX_FMT_H263,
.codec_mode = S5P_FIMV_CODEC_H263_ENC,
.type = MFC_FMT_ENC,
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index e16d3a4bc1d..b47d0c06ecf 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -16,6 +16,7 @@
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/version.h>
#include <linux/timer.h>
#include <media/videobuf2-dma-contig.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index f390682629c..c51decfcae1 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
ret = sh_mobile_ceu_soft_reset(pcdev);
csi2_sd = find_csi2(pcdev);
- if (csi2_sd)
- csi2_sd->grp_id = (long)icd;
+ if (csi2_sd) {
+ csi2_sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(csi2_sd, icd);
+ }
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
{
if (pcdev->csi2_pdev) {
struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
- if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+ if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
return csi2_sd;
}
@@ -1089,8 +1091,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
/* Try 2560x1920, 1280x960, 640x480, 320x240 */
mf.width = 2560 >> shift;
mf.height = 1920 >> shift;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
shift++;
@@ -1389,7 +1392,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
bool ceu_1to1;
int ret;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
s_mbus_fmt, mf);
if (ret < 0)
return ret;
@@ -1426,8 +1430,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
tmp_h = min(2 * tmp_h, max_height);
mf->width = tmp_w;
mf->height = tmp_h;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- s_mbus_fmt, mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, mf);
dev_geo(dev, "Camera scaled to %ux%u\n",
mf->width, mf->height);
if (ret < 0) {
@@ -1580,8 +1585,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
}
if (interm_width < icd->user_width || interm_height < icd->user_height) {
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
- s_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ s_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1867,7 +1873,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
mf.code = xlate->code;
mf.colorspace = pix->colorspace;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+ video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
@@ -1891,8 +1898,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
*/
mf.width = 2560;
mf.height = 1920;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
- try_mbus_fmt, &mf);
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd), video,
+ try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
dev_err(icd->parent,
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index ea4f0473ed3..8a652b53ff7 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
- struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
.flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
static int sh_csi2_client_connect(struct sh_csi2 *priv)
{
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+ struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
struct device *dev = v4l2_get_subdevdata(&priv->subdev);
struct v4l2_mbus_config cfg;
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index b72580c3895..62e4312515c 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
}
sd = soc_camera_to_subdev(icd);
- sd->grp_id = (long)icd;
+ sd->grp_id = soc_camera_grp_id(icd);
+ v4l2_set_subdev_hostdata(sd, icd);
if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
goto ectrl;
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index 43c0ebb8195..b7b2d3483fd 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -4,7 +4,7 @@
* Debugfs support for the AB5500 MFD driver
*/
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/mfd/ab5500/ab5500.h>
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1e9173804ed..d3d572b2317 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+#ifdef CONFIG_DEBUG_FS
static struct resource __devinitdata ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
.flags = IORESOURCE_IRQ,
},
};
+#endif
static struct resource __devinitdata ab8500_usb_resources[] = {
{
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f1d88483112..8d816cce832 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
ret = __adp5520_read(chip->client, reg, &reg_val);
- if (!ret && ((reg_val & bit_mask) == 0)) {
+ if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = __adp5520_write(chip->client, reg, reg_val);
}
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 1b79c37fd59..1924b857a0f 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __da903x_write(chip->client, reg, reg_val);
}
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
+ free_irq(client->irq, chip);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 1e9ee533eac..ef39528088f 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -16,6 +16,7 @@
*/
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index bba26d96c24..a5ddf31b60c 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
if (ret)
goto out;
- if ((reg_val & bit_mask) == 0) {
+ if ((reg_val & bit_mask) != bit_mask) {
reg_val |= bit_mask;
ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 6f5b8cf2f65..c1da84bc157 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
goto out;
}
- data &= mask;
+ data &= ~mask;
err = tps65910_i2c_write(tps65910, reg, 1, &data);
if (err)
dev_err(tps65910->dev, "write to reg %x failed\n", reg);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index bfbd66021af..61e70cfaa77 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/*
* [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl_map[mod_no].sid;
- twl = &twl_modules[sid];
-
if (unlikely(!inuse)) {
- pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+ pr_err("%s: not initialized\n", DRIVER_NAME);
return -EPERM;
}
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
+
mutex_lock(&twl->xfer_lock);
/* [MSG1] fill the register address data */
msg = &twl->xfer_msg[0];
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index f062c8cc6c3..29f11e0765f 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -432,6 +432,7 @@ struct sih_agent {
u32 edge_change;
struct mutex irq_lock;
+ char *irq_name;
};
/*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
* Generic handler for SIH interrupts ... we "know" this is called
* in task context, with IRQs enabled.
*/
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
{
struct sih_agent *agent = irq_get_handler_data(irq);
const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
pr_err("twl4030: %s SIH, read ISR error %d\n",
sih->name, isr);
/* REVISIT: recover; eventually mask it all, etc */
- return;
+ return IRQ_HANDLED;
}
while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
sih->name, irq);
}
+ return IRQ_HANDLED;
}
static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
activate_irq(irq);
}
- status = irq_base;
twl4030_irq_next += i;
/* replace generic PIH handler (handle_simple_irq) */
irq = sih_mod + twl4030_irq_base;
irq_set_handler_data(irq, agent);
- irq_set_chained_handler(irq, handle_twl4030_sih);
+ agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+ status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+ agent->irq_name ?: sih->name, NULL);
pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
irq, irq_base, twl4030_irq_next - 1);
- return status;
+ return status < 0 ? status : irq_base;
}
/* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
}
/* install an irq handler to demultiplex the TWL4030 interrupt */
- status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
- "TWL4030-PIH", NULL);
+ status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+ IRQF_ONESHOT,
+ "TWL4030-PIH", NULL);
if (status < 0) {
pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 5d6ba132837..61894fced8e 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
switch (wm8994->type) {
case WM8958:
+ case WM1811:
ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
if (ret < 0) {
dev_err(dev, "Failed to read power status: %d\n", ret);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d593878d66d..5664696f2d3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -472,7 +472,7 @@ config BMP085
module will be called bmp085.
config PCH_PHUB
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
depends on PCI
help
This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
@@ -480,12 +480,13 @@ config PCH_PHUB
processor. The Topcliff has MAC address and Option ROM data in SROM.
This driver can access MAC address and Option ROM data in SROM.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor's IOH,
+ ML7213/ML7223/ML7831.
+ ML7213 which is for IVI(In-Vehicle Infotainment) use.
+ ML7223 IOH is for MP(Media Phone) use.
+ ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
To compile this driver as a module, choose M here: the module will
be called pch_phub.
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index a662f5987b6..82b2cb77ae1 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -100,7 +100,7 @@ enum dpot_devid {
AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
BRDAC0, 7, 28),
- AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
+ AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
BRDAC0, 8, 29),
AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
BRDAC0 | BRDAC1, 8, 30),
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 7ce6065dc20..eb5cd28bc6d 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -945,8 +945,7 @@ static int fpga_of_remove(struct platform_device *op)
/* CTL-CPLD Version Register */
#define CTL_CPLD_VERSION 0x2000
-static int fpga_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int fpga_of_probe(struct platform_device *op)
{
struct device_node *of_node = op->dev.of_node;
struct device *this_device;
@@ -1107,7 +1106,7 @@ static struct of_device_id fpga_of_match[] = {
{},
};
-static struct of_platform_driver fpga_of_driver = {
+static struct platform_driver fpga_of_driver = {
.probe = fpga_of_probe,
.remove = fpga_of_remove,
.driver = {
@@ -1124,12 +1123,12 @@ static struct of_platform_driver fpga_of_driver = {
static int __init fpga_init(void)
{
led_trigger_register_simple("fpga", &ledtrig_fpga);
- return of_register_platform_driver(&fpga_of_driver);
+ return platform_driver_register(&fpga_of_driver);
}
static void __exit fpga_exit(void)
{
- of_unregister_platform_driver(&fpga_of_driver);
+ platform_driver_unregister(&fpga_of_driver);
led_trigger_unregister_simple(ledtrig_fpga);
}
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 3965821fef1..14e974b2a78 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -1249,8 +1249,7 @@ static bool dma_filter(struct dma_chan *chan, void *data)
return true;
}
-static int data_of_probe(struct platform_device *op,
- const struct of_device_id *match)
+static int data_of_probe(struct platform_device *op)
{
struct device_node *of_node = op->dev.of_node;
struct device *this_device;
@@ -1401,7 +1400,7 @@ static struct of_device_id data_of_match[] = {
{},
};
-static struct of_platform_driver data_of_driver = {
+static struct platform_driver data_of_driver = {
.probe = data_of_probe,
.remove = data_of_remove,
.driver = {
@@ -1417,12 +1416,12 @@ static struct of_platform_driver data_of_driver = {
static int __init data_init(void)
{
- return of_register_platform_driver(&data_of_driver);
+ return platform_driver_register(&data_of_driver);
}
static void __exit data_exit(void)
{
- of_unregister_platform_driver(&data_of_driver);
+ platform_driver_unregister(&data_of_driver);
}
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 26cf12ca7f5..701edf65897 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -85,7 +85,7 @@ config EEPROM_93XX46
config EEPROM_DIGSY_MTC_CFG
bool "DigsyMTC display configuration EEPROMs device"
- depends on PPC_MPC5200_GPIO && GPIOLIB && SPI_GPIO
+ depends on GPIO_MPC5200 && SPI_GPIO
help
This option enables access to display configuration EEPROMs
on digsy_mtc board. You have to additionally select Microwire
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 7b33de95c4b..0ff4b02177b 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -63,6 +63,7 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
eeprom->reg_data_out = 0;
eeprom->reg_data_clock = 0;
eeprom->reg_chip_select = 1;
+ eeprom->drive_data = 1;
eeprom->register_write(eeprom);
/*
@@ -101,6 +102,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
+ eeprom->drive_data = 1;
/*
* Start writing all bits.
@@ -140,6 +142,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
+ eeprom->drive_data = 0;
/*
* Start reading all bits.
@@ -231,3 +234,88 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
+/**
+ * eeprom_93cx6_wren - set the write enable state
+ * @eeprom: Pointer to eeprom structure
+ * @enable: true to enable writes, otherwise disable writes
+ *
+ * Set the EEPROM write enable state to either allow or deny
+ * writes depending on the @enable value.
+ */
+void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
+{
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ /* create command to enable/disable */
+
+ command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
+ command <<= (eeprom->width - 2);
+
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
+
+/**
+ * eeprom_93cx6_write - write data to the EEPROM
+ * @eeprom: Pointer to eeprom structure
+ * @addr: Address to write data to.
+ * @data: The data to write to address @addr.
+ *
+ * Write the @data to the specified @addr in the EEPROM and
+ * waiting for the device to finish writing.
+ *
+ * Note, since we do not expect large number of write operations
+ * we delay in between parts of the operation to avoid using excessive
+ * amounts of CPU time busy waiting.
+ */
+void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
+{
+ int timeout = 100;
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
+ command |= addr;
+
+ /* send write command */
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ /* send data */
+ eeprom_93cx6_write_bits(eeprom, data, 16);
+
+ /* get ready to check for busy */
+ eeprom->drive_data = 0;
+ eeprom->reg_chip_select = 1;
+ eeprom->register_write(eeprom);
+
+ /* wait at-least 250ns to get DO to be the busy signal */
+ usleep_range(1000, 2000);
+
+ /* wait for DO to go high to signify finish */
+
+ while (true) {
+ eeprom->register_read(eeprom);
+
+ if (eeprom->reg_data_out)
+ break;
+
+ usleep_range(1000, 2000);
+
+ if (--timeout <= 0) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ break;
+ }
+ }
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index dee33addcae..10fc4785dba 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,10 +41,10 @@
#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset
(Intel EG20T PCH)*/
#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address
- offset(OKI SEMICONDUCTOR ML7213)
+ offset(LAPIS Semicon ML7213)
*/
#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address
- offset(OKI SEMICONDUCTOR ML7223)
+ offset(LAPIS Semicon ML7223)
*/
/* MAX number of INT_REDUCE_CONTROL registers */
@@ -73,6 +73,9 @@
#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */
#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
+
/* SROM ACCESS Macro */
#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
@@ -115,6 +118,7 @@
* @pch_mac_start_address: MAC address area start address
* @pch_opt_rom_start_address: Option ROM start address
* @ioh_type: Save IOH type
+ * @pdev: pointer to pci device struct
*/
struct pch_phub_reg {
u32 phub_id_reg;
@@ -136,6 +140,7 @@ struct pch_phub_reg {
u32 pch_mac_start_address;
u32 pch_opt_rom_start_address;
int ioh_type;
+ struct pci_dev *pdev;
};
/* SROM SPEC for MAC address assignment offset */
@@ -471,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
int retval;
int i;
- if (chip->ioh_type == 1) /* EG20T */
+ if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
retval = pch_phub_gbe_serial_rom_conf(chip);
else /* ML7223 */
retval = pch_phub_gbe_serial_rom_conf_mp(chip);
@@ -498,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
unsigned int orom_size;
int ret;
int err;
+ ssize_t rom_size;
struct pch_phub_reg *chip =
dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -509,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
}
/* Get Rom signature */
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ goto exrom_map_err;
+
pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
(unsigned char *)&rom_signature);
rom_signature &= 0xff;
@@ -539,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
goto return_err;
}
return_ok:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
mutex_unlock(&pch_phub_mutex);
return addr_offset;
return_err:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+exrom_map_err:
mutex_unlock(&pch_phub_mutex);
return_err_nomutex:
return err;
@@ -555,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
int err;
unsigned int addr_offset;
int ret;
+ ssize_t rom_size;
struct pch_phub_reg *chip =
dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -571,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
goto return_ok;
}
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address) {
+ err = -ENOMEM;
+ goto exrom_map_err;
+ }
+
for (addr_offset = 0; addr_offset < count; addr_offset++) {
if (PCH_PHUB_OROM_SIZE < off + addr_offset)
goto return_ok;
@@ -585,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
}
return_ok:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
mutex_unlock(&pch_phub_mutex);
return addr_offset;
return_err:
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
+
+exrom_map_err:
mutex_unlock(&pch_phub_mutex);
return err;
}
@@ -598,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
{
u8 mac[8];
struct pch_phub_reg *chip = dev_get_drvdata(dev);
+ ssize_t rom_size;
+
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ return -ENOMEM;
pch_phub_read_gbe_mac_addr(chip, mac);
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
return sprintf(buf, "%pM\n", mac);
}
@@ -608,6 +638,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u8 mac[6];
+ ssize_t rom_size;
struct pch_phub_reg *chip = dev_get_drvdata(dev);
if (count != 18)
@@ -617,7 +648,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
(u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3],
(u32 *)&mac[4], (u32 *)&mac[5]);
+ chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
+ if (!chip->pch_phub_extrom_base_address)
+ return -ENOMEM;
+
pch_phub_write_gbe_mac_addr(chip, mac);
+ pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
return count;
}
@@ -640,7 +676,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
int retval;
int ret;
- ssize_t rom_size;
struct pch_phub_reg *chip;
chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
@@ -677,19 +712,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
"in pch_phub_base_address variable is %p\n", __func__,
chip->pch_phub_base_address);
- if (id->driver_data != 3) {
- chip->pch_phub_extrom_base_address =\
- pci_map_rom(pdev, &rom_size);
- if (chip->pch_phub_extrom_base_address == 0) {
- dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__);
- ret = -ENOMEM;
- goto err_pci_map;
- }
- dev_dbg(&pdev->dev, "%s : "
- "pci_map_rom SUCCESS and value in "
- "pch_phub_extrom_base_address variable is %p\n",
- __func__, chip->pch_phub_extrom_base_address);
- }
+ chip->pdev = pdev; /* Save pci device struct */
if (id->driver_data == 1) { /* EG20T PCH */
const char *board_name;
@@ -763,6 +786,22 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
chip->pch_opt_rom_start_address =\
PCH_PHUB_ROM_START_ADDR_ML7223;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
+ } else if (id->driver_data == 5) { /* ML7831 */
+ retval = sysfs_create_file(&pdev->dev.kobj,
+ &dev_attr_pch_mac.attr);
+ if (retval)
+ goto err_sysfs_create;
+
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
+ if (retval)
+ goto exit_bin_attr;
+
+ /* set the prefech value */
+ iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
+ /* set the interrupt delay value */
+ iowrite32(0x25, chip->pch_phub_base_address + 0x44);
+ chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
+ chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
}
chip->ioh_type = id->driver_data;
@@ -773,8 +812,6 @@ exit_bin_attr:
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
err_sysfs_create:
- pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
-err_pci_map:
pci_iounmap(pdev, chip->pch_phub_base_address);
err_pci_iomap:
pci_release_regions(pdev);
@@ -792,7 +829,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev)
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
- pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address);
pci_iounmap(pdev, chip->pch_phub_base_address);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -847,6 +883,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, },
{ }
};
MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
@@ -873,5 +910,5 @@ static void __exit pch_phub_pci_exit(void)
module_init(pch_phub_pci_init);
module_exit(pch_phub_pci_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 42f067347bc..3fac67a5204 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -576,7 +576,7 @@ xpnet_init(void)
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
- xpnet_device->features = NETIF_F_NO_CSUM;
+ xpnet_device->features = NETIF_F_HW_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index cfbddbef11d..43d073bc1d9 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void)
}
module_exit(spear_pcie_gadget_exit);
-MODULE_ALIAS("pcie-gadget-spear");
+MODULE_ALIAS("platform:pcie-gadget-spear");
MODULE_AUTHOR("Pratyush Anand");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1cb21f9530..1e0e27cbe98 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some Micron MMC cards needs longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5278ffb20e7..950b97d7412 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
data->timeout_clks = 0;
}
}
+
+ /*
+ * Some cards require longer data read timeout than indicated in CSD.
+ * Address this by setting the read timeout to a "reasonably high"
+ * value. For the cards tested, 300ms has proven enough. If necessary,
+ * this value can be increased if other problematic cards require this.
+ */
+ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+ data->timeout_ns = 300000000;
+ data->timeout_clks = 0;
+ }
+
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
mmc_host_clk_release(host);
}
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int timeout;
+ unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+ int err = 0;
+
+ card = host->card;
+
+ /*
+ * Send power notify command only if card
+ * is mmc and notify state is powered ON
+ */
+ if (card && mmc_card_mmc(card) &&
+ (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+ if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+ notify_type = EXT_CSD_POWER_OFF_SHORT;
+ timeout = card->ext_csd.generic_cmd6_time;
+ card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+ } else {
+ notify_type = EXT_CSD_POWER_OFF_LONG;
+ timeout = card->ext_csd.power_off_longtime;
+ card->poweroff_notify_state = MMC_POWEROFF_LONG;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+
+ if (err && err != -EBADMSG)
+ pr_err("Device failed to respond within %d poweroff "
+ "time. Forcefully powering down the device\n",
+ timeout);
+
+ /* Set the card state to no notification after the poweroff */
+ card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+ }
+}
+
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
- struct mmc_card *card;
- unsigned int notify_type;
- unsigned int timeout;
- int err;
-
mmc_host_clk_hold(host);
- card = host->card;
host->ios.clock = 0;
host->ios.vdd = 0;
- if (card && mmc_card_mmc(card) &&
- (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
- if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
- notify_type = EXT_CSD_POWER_OFF_SHORT;
- timeout = card->ext_csd.generic_cmd6_time;
- card->poweroff_notify_state = MMC_POWEROFF_SHORT;
- } else {
- notify_type = EXT_CSD_POWER_OFF_LONG;
- timeout = card->ext_csd.power_off_longtime;
- card->poweroff_notify_state = MMC_POWEROFF_LONG;
- }
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
-
- if (err && err != -EBADMSG)
- pr_err("Device failed to respond within %d poweroff "
- "time. Forcefully powering down the device\n",
- timeout);
-
- /* Set the card state to no notification after the poweroff */
- card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
- }
+ mmc_poweroff_notify(host);
/*
* Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
mmc_bus_get(host);
- if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+ if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
err = host->bus_ops->sleep(host);
mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
* pre-claim the host.
*/
if (mmc_try_claim_host(host)) {
- if (host->bus_ops->suspend)
+ if (host->bus_ops->suspend) {
+ /*
+ * For eMMC 4.5 device send notify command
+ * before sleep, because in sleep state eMMC 4.5
+ * devices respond to only RESET and AWAKE cmd
+ */
+ mmc_poweroff_notify(host);
err = host->bus_ops->suspend(host);
+ }
+ mmc_do_release_host(host);
+
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
* We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
host->pm_flags = 0;
err = 0;
}
- mmc_do_release_host(host);
} else {
err = -EBUSY;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e8a5eb38748..d31c78b72b0 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -302,17 +302,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
- /*
- * Enable runtime power management by default. This flag was added due
- * to runtime power management causing disruption for some users, but
- * the power on/off code has been improved since then.
- *
- * We'll enable this flag by default as an experiment, and if no
- * problems are reported, we will follow up later and remove the flag
- * altogether.
- */
- host->caps = MMC_CAP_POWER_OFF_CARD;
-
return host;
free:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dbf421a6279..d240427c124 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* set the notification byte in the ext_csd register of device
*/
if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
- (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+ (card->ext_csd.rev >= 6)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
- }
- if (!err)
- card->poweroff_notify_state = MMC_POWERED_ON;
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->poweroff_notify_state = MMC_POWERED_ON;
+ }
/*
* Activate high speed (if supported)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 50b5f9926f6..0726e59fd41 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -675,7 +675,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
unsigned int status)
{
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -754,8 +755,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
}
if (!cmd->data || cmd->error) {
- if (host->data)
+ if (host->data) {
+ /* Terminate the DMA transfer */
+ if (dma_inprogress(host))
+ mmci_dma_data_error(host);
mmci_stop_data(host);
+ }
mmci_request_end(host, cmd->mrq);
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
@@ -955,8 +960,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
data = host->data;
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
- MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+ MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+ MCI_DATABLOCKEND) && data)
mmci_data_irq(host, data, status);
cmd = host->cmd;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 325ea61e12d..8e0fbe99404 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
"failed to config DMA channel. Falling back to PIO\n");
dma_release_channel(host->dma);
host->do_dma = 0;
+ host->dma = NULL;
}
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 101cd31c822..d5fe43d53c5 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
+ host->data->host_cookie = 0;
}
host->data = NULL;
}
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct mmc_data *data = mrq->data;
if (host->use_dma) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 4b920b7621c..b4257e70061 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <linux/module.h>
#include <mach/cns3xxx.h>
#include "sdhci-pltfm.h"
@@ -108,13 +109,10 @@ static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_cns3xxx_probe,
.remove = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_cns3xxx_init(void)
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index f2d29dca442..a81312c91f7 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -82,13 +82,10 @@ static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_dove_probe,
.remove = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_dove_init(void)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 4b976f00ea8..38ebc4ea259 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -599,14 +599,11 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.name = "sdhci-esdhc-imx",
.owner = THIS_MODULE,
.of_match_table = imx_esdhc_dt_ids,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.id_table = imx_esdhc_devtype,
.probe = sdhci_esdhc_imx_probe,
.remove = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_esdhc_imx_init(void)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 59e9d003e58..01e5f627e0f 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -125,13 +125,10 @@ static struct platform_driver sdhci_esdhc_driver = {
.name = "sdhci-esdhc",
.owner = THIS_MODULE,
.of_match_table = sdhci_esdhc_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_esdhc_probe,
.remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_esdhc_init(void)
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 9b0d794a4f6..3619adc7d9f 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -87,13 +87,10 @@ static struct platform_driver sdhci_hlwd_driver = {
.name = "sdhci-hlwd",
.owner = THIS_MODULE,
.of_match_table = sdhci_hlwd_of_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_hlwd_probe,
.remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_hlwd_init(void)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index d833d9c2f7e..6878a94626b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -54,8 +54,7 @@ struct sdhci_pci_fixes {
int (*probe_slot) (struct sdhci_pci_slot *);
void (*remove_slot) (struct sdhci_pci_slot *, int);
- int (*suspend) (struct sdhci_pci_chip *,
- pm_message_t);
+ int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
};
@@ -549,7 +548,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
jmicron_enable_mmc(slot->host, 0);
}
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i;
@@ -993,8 +992,9 @@ static struct sdhci_ops sdhci_pci_ops = {
#ifdef CONFIG_PM
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (!slot)
continue;
- ret = sdhci_suspend_host(slot->host, state);
+ ret = sdhci_suspend_host(slot->host);
if (ret) {
for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
+ ret = chip->fixes->suspend(chip);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
pci_set_power_state(pdev, PCI_D3hot);
} else {
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
}
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
int i, ret;
@@ -1099,7 +1100,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
- pm_message_t state = { .event = PM_EVENT_SUSPEND };
int i, ret;
chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
}
if (chip->fixes && chip->fixes->suspend) {
- ret = chip->fixes->suspend(chip, state);
+ ret = chip->fixes->suspend(chip);
if (ret) {
for (i = chip->num_slots - 1; i >= 0; i--)
sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
+ .suspend = sdhci_pci_suspend,
+ .resume = sdhci_pci_resume,
.runtime_suspend = sdhci_pci_runtime_suspend,
.runtime_resume = sdhci_pci_runtime_resume,
.runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@ static struct pci_driver sdhci_driver = {
.id_table = pci_ids,
.probe = sdhci_pci_probe,
.remove = __devexit_p(sdhci_pci_remove),
- .suspend = sdhci_pci_suspend,
- .resume = sdhci_pci_resume,
.driver = {
.pm = &sdhci_pci_pm_ops
},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index a9e12ea0558..03970bcb349 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, state);
+ return sdhci_suspend_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
#endif /* CONFIG_PM */
static int __init sdhci_pltfm_drv_init(void)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 3a9fc3f4084..37e0e184a0b 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
extern int sdhci_pltfm_unregister(struct platform_device *pdev);
#ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index d4bf6d30c7b..7a039c3cb1f 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -218,13 +218,10 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
.remove = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_pxav2_init(void)
{
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index cff4ad3e7a5..15673a7ee6a 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -264,13 +264,10 @@ static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
.owner = THIS_MODULE,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav3_probe,
.remove = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_pxav3_init(void)
{
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3d00e722efc..0d33ff0d67f 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -622,33 +622,38 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
- return sdhci_suspend_host(host, pm);
+ return sdhci_suspend_host(host);
}
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
{
- struct sdhci_host *host = platform_get_drvdata(dev);
+ struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+ .suspend = sdhci_s3c_suspend,
+ .resume = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
#else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
#endif
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove = __devexit_p(sdhci_s3c_remove),
- .suspend = sdhci_s3c_suspend,
- .resume = sdhci_s3c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
+ .pm = SDHCI_S3C_PMOPS,
},
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 89699e861fc..e2e18d3f949 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -318,13 +318,10 @@ static struct platform_driver sdhci_tegra_driver = {
.name = "sdhci-tegra",
.owner = THIS_MODULE,
.of_match_table = sdhci_tegra_dt_match,
+ .pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_tegra_probe,
.remove = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-#endif
};
static int __init sdhci_tegra_init(void)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6d8eea32354..19ed580f2ca 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2327,7 +2327,7 @@ out:
#ifdef CONFIG_PM
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0a5b65460d8..a04d4d0c6fd 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -374,7 +374,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
#ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
extern int sdhci_resume_host(struct sdhci_host *host);
extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 369366c8e20..d5505f3fe2a 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->power) {
pm_runtime_put(&host->pd->dev);
host->power = false;
- if (p->down_pwr)
+ if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
p->down_pwr(host->pd);
}
host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index d85a60cda16..4208b395806 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (host->set_pwr)
+ if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
host->set_pwr(host->pdev, 0);
if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
pdata->power) {
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index e8f6e65183d..2ec978bc32b 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
static int firmware_rom_wait_states = 0x1C;
#endif
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
MODULE_PARM_DESC(firmware_rom_wait_states,
"ROM wait states byte=RRRIIEEE (Reserved Internal External)");
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 94f55348972..45876d0e5b8 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
if (!err)
dev_info(&pdev->dev, "registered mtd device\n");
- /* add the whole device. */
- err = mtd_device_register(info->mtd, NULL, 0);
- if (err)
- dev_err(&pdev->dev, "failed to register the entire device\n");
+ if (pdata->nr_partitions) {
+ /* add the whole device. */
+ err = mtd_device_register(info->mtd, NULL, 0);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register the entire device\n");
+ }
+ }
return err;
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 411a17df9fc..2a25b6789af 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
}
info->mtd->owner = THIS_MODULE;
- mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+ mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 071b63420f0..493ec2fcf97 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -21,9 +21,9 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
-
#include "gpmi-nand.h"
/* add our owner bbt descriptor */
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ee1713907b9..f8aacf48ecd 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
if (!flash_np)
return -ENODEV;
- ppdata->of_node = flash_np;
+ ppdata.of_node = flash_np;
ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
dev_name(&ndfc->ofdev->dev), flash_np->name);
if (!ndfc->mtd.name) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 583f66cd5bb..9845afb37cc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -125,6 +125,8 @@ config IFB
'ifb1' etc.
Look at the iproute2 documentation directory for usage etc
+source "drivers/net/team/Kconfig"
+
config MACVLAN
tristate "MAC-VLAN support (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -241,10 +243,14 @@ source "drivers/atm/Kconfig"
source "drivers/net/caif/Kconfig"
+source "drivers/net/dsa/Kconfig"
+
source "drivers/net/ethernet/Kconfig"
source "drivers/net/fddi/Kconfig"
+source "drivers/net/hippi/Kconfig"
+
config NET_SB1000
tristate "General Instruments Surfboard 1000"
depends on PNP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa877cd2b13..1988881853a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_RIONET) += rionet.o
+obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a73d9dc80ff..84fb6349a59 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
menuconfig ARCNET
depends on NETDEVICES && (ISA || PCI || PCMCIA)
- bool "ARCnet support"
+ tristate "ARCnet support"
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
deleted file mode 100644
index 027a0ee7d85..00000000000
--- a/drivers/net/bonding/bond_ipv6.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright(c) 2008 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/if_vlan.h>
-#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/addrconf.h>
-#include <net/netns/generic.h>
-#include "bonding.h"
-
-/*
- * Assign bond->master_ipv6 to the next IPv6 address in the list, or
- * zero it out if there are none.
- */
-static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
-{
- struct inet6_dev *idev;
-
- if (!dev)
- return;
-
- idev = in6_dev_get(dev);
- if (!idev)
- return;
-
- read_lock_bh(&idev->lock);
- if (!list_empty(&idev->addr_list)) {
- struct inet6_ifaddr *ifa
- = list_first_entry(&idev->addr_list,
- struct inet6_ifaddr, if_list);
- ipv6_addr_copy(addr, &ifa->addr);
- } else
- ipv6_addr_set(addr, 0, 0, 0, 0);
-
- read_unlock_bh(&idev->lock);
-
- in6_dev_put(idev);
-}
-
-static void bond_na_send(struct net_device *slave_dev,
- struct in6_addr *daddr,
- int router,
- unsigned short vlan_id)
-{
- struct in6_addr mcaddr;
- struct icmp6hdr icmp6h = {
- .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
- };
- struct sk_buff *skb;
-
- icmp6h.icmp6_router = router;
- icmp6h.icmp6_solicited = 0;
- icmp6h.icmp6_override = 1;
-
- addrconf_addr_solict_mult(daddr, &mcaddr);
-
- pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
- slave_dev->name, &mcaddr, daddr);
-
- skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
- ND_OPT_TARGET_LL_ADDR);
-
- if (!skb) {
- pr_err("NA packet allocation failed\n");
- return;
- }
-
- if (vlan_id) {
- /* The Ethernet header is not present yet, so it is
- * too early to insert a VLAN tag. Force use of an
- * out-of-line tag here and let dev_hard_start_xmit()
- * insert it if the slave hardware can't.
- */
- skb = __vlan_hwaccel_put_tag(skb, vlan_id);
- if (!skb) {
- pr_err("failed to insert VLAN tag\n");
- return;
- }
- }
-
- ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h);
-}
-
-/*
- * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on
- * the bonding master. This will help the switch learn our address
- * if in active-backup mode.
- *
- * Caller must hold curr_slave_lock for read or better
- */
-void bond_send_unsolicited_na(struct bonding *bond)
-{
- struct slave *slave = bond->curr_active_slave;
- struct vlan_entry *vlan;
- struct inet6_dev *idev;
- int is_router;
-
- pr_debug("%s: bond %s slave %s\n", bond->dev->name,
- __func__, slave ? slave->dev->name : "NULL");
-
- if (!slave || !bond->send_unsol_na ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
- return;
-
- bond->send_unsol_na--;
-
- idev = in6_dev_get(bond->dev);
- if (!idev)
- return;
-
- is_router = !!idev->cnf.forwarding;
-
- in6_dev_put(idev);
-
- if (!ipv6_addr_any(&bond->master_ipv6))
- bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0);
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (!ipv6_addr_any(&vlan->vlan_ipv6)) {
- bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router,
- vlan->vlan_id);
- }
- }
-}
-
-/*
- * bond_inet6addr_event: handle inet6addr notifier chain events.
- *
- * We keep track of device IPv6 addresses primarily to use as source
- * addresses in NS probes.
- *
- * We track one IPv6 for the main device (if it has one).
- */
-static int bond_inet6addr_event(struct notifier_block *this,
- unsigned long event,
- void *ptr)
-{
- struct inet6_ifaddr *ifa = ptr;
- struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
- struct bonding *bond;
- struct vlan_entry *vlan;
- struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
-
- list_for_each_entry(bond, &bn->dev_list, bond_list) {
- if (bond->dev == event_dev) {
- switch (event) {
- case NETDEV_UP:
- if (ipv6_addr_any(&bond->master_ipv6))
- ipv6_addr_copy(&bond->master_ipv6,
- &ifa->addr);
- return NOTIFY_OK;
- case NETDEV_DOWN:
- if (ipv6_addr_equal(&bond->master_ipv6,
- &ifa->addr))
- bond_glean_dev_ipv6(bond->dev,
- &bond->master_ipv6);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev,
- vlan->vlan_id);
- rcu_read_unlock();
- if (vlan_dev == event_dev) {
- switch (event) {
- case NETDEV_UP:
- if (ipv6_addr_any(&vlan->vlan_ipv6))
- ipv6_addr_copy(&vlan->vlan_ipv6,
- &ifa->addr);
- return NOTIFY_OK;
- case NETDEV_DOWN:
- if (ipv6_addr_equal(&vlan->vlan_ipv6,
- &ifa->addr))
- bond_glean_dev_ipv6(vlan_dev,
- &vlan->vlan_ipv6);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block bond_inet6addr_notifier = {
- .notifier_call = bond_inet6addr_event,
-};
-
-void bond_register_ipv6_notifier(void)
-{
- register_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
-void bond_unregister_ipv6_notifier(void)
-{
- unregister_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b0c57725648..435984ad8b2 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -428,27 +428,34 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
* @bond_dev: bonding net device that got called
* @vid: vlan id being added
*/
-static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *stop_at;
int i, res;
bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_add_vid) {
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
- }
+ res = vlan_vid_add(slave->dev, vid);
+ if (res)
+ goto unwind;
}
res = bond_add_vlan(bond, vid);
if (res) {
pr_err("%s: Error: Failed to add vlan id %d\n",
bond_dev->name, vid);
+ return res;
}
+
+ return 0;
+
+unwind:
+ /* unwind from head to the slave that failed */
+ stop_at = slave;
+ bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+ vlan_vid_del(slave->dev, vid);
+
+ return res;
}
/**
@@ -456,56 +463,48 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
* @bond_dev: bonding net device that got called
* @vid: vlan id being removed
*/
-static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int i, res;
- bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_kill_vid) {
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
- }
- }
+ bond_for_each_slave(bond, slave, i)
+ vlan_vid_del(slave->dev, vid);
res = bond_del_vlan(bond, vid);
if (res) {
pr_err("%s: Error: Failed to remove vlan id %d\n",
bond_dev->name, vid);
+ return res;
}
+
+ return 0;
}
static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
{
struct vlan_entry *vlan;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_add_vid))
- return;
+ int res;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ res = vlan_vid_add(slave_dev, vlan->vlan_id);
+ if (res)
+ pr_warning("%s: Failed to add vlan id %d to device %s\n",
+ bond->dev->name, vlan->vlan_id,
+ slave_dev->name);
+ }
}
static void bond_del_vlans_from_slave(struct bonding *bond,
struct net_device *slave_dev)
{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct vlan_entry *vlan;
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_kill_vid))
- return;
-
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
if (!vlan->vlan_id)
continue;
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+ vlan_vid_del(slave_dev, vlan->vlan_id);
}
}
@@ -1325,11 +1324,12 @@ static int bond_sethwaddr(struct net_device *bond_dev,
return 0;
}
-static u32 bond_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t bond_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct slave *slave;
struct bonding *bond = netdev_priv(dev);
- u32 mask;
+ netdev_features_t mask;
int i;
read_lock(&bond->lock);
@@ -1363,7 +1363,7 @@ static void bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
- u32 vlan_features = BOND_VLAN_FEATURES;
+ netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
int i;
@@ -1822,7 +1822,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
"but new slave device does not support netpoll.\n",
bond_dev->name);
res = -EBUSY;
- goto err_close;
+ goto err_detach;
}
}
#endif
@@ -1831,7 +1831,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = bond_create_slave_symlinks(bond_dev, slave_dev);
if (res)
- goto err_close;
+ goto err_detach;
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
@@ -1852,6 +1852,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
err_dest_symlinks:
bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_detach:
+ write_lock_bh(&bond->lock);
+ bond_detach_slave(bond, new_slave);
+ write_unlock_bh(&bond->lock);
+
err_close:
dev_close(slave_dev);
@@ -1897,7 +1902,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr addr;
- u32 old_features = bond_dev->features;
+ netdev_features_t old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -2553,30 +2558,6 @@ re_arm:
}
}
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
- struct in_device *idev;
- struct in_ifaddr *ifa;
- __be32 addr = 0;
-
- if (!dev)
- return 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (!idev)
- goto out;
-
- ifa = idev->ifa_list;
- if (!ifa)
- goto out;
-
- addr = ifa->ifa_local;
-out:
- rcu_read_unlock();
- return addr;
-}
-
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
{
struct vlan_entry *vlan;
@@ -3322,6 +3303,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
struct bonding *bond;
struct vlan_entry *vlan;
+ /* we only care about primary address */
+ if(ifa->ifa_flags & IFA_F_SECONDARY)
+ return NOTIFY_DONE;
+
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (bond->dev == event_dev) {
switch (event) {
@@ -3329,7 +3314,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
bond->master_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- bond->master_ip = bond_glean_dev_ip(bond->dev);
+ bond->master_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -3345,8 +3330,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
vlan->vlan_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- vlan->vlan_ip =
- bond_glean_dev_ip(vlan_dev);
+ vlan->vlan_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -4360,7 +4344,7 @@ static void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+ bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
bond_dev->features |= bond_dev->hw_features;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5a20804fdec..4ef7e2fd9fe 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -319,6 +319,13 @@ static ssize_t bonding_store_mode(struct device *d,
goto out;
}
+ if (bond->slave_cnt > 0) {
+ pr_err("unable to update mode of %s because it has slaves.\n",
+ bond->dev->name);
+ ret = -EPERM;
+ goto out;
+ }
+
new_value = bond_parse_parm(buf, bond_mode_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid mode value %.*s.\n",
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 073352517ad..0a4fc62a381 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -117,15 +117,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
dev_dbg(&cfhsi->ndev->dev, "%s.\n",
__func__);
-
- ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
- if (ret) {
- dev_warn(&cfhsi->ndev->dev,
- "%s: can't wake up HSI interface: %d.\n",
- __func__, ret);
- return ret;
- }
-
do {
ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
&fifo_occupancy);
@@ -168,8 +159,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
}
} while (1);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
-
return ret;
}
@@ -944,7 +933,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
/* Create HSI frame. */
len = cfhsi_tx_frm(desc, cfhsi);
- BUG_ON(!len);
+ WARN_ON(!len);
/* Set up new transfer. */
res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 23406e62c0b..8a3054b8481 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -38,15 +38,15 @@ MODULE_ALIAS_LDISC(N_CAIF);
/*This list is protected by the rtnl lock. */
static LIST_HEAD(ser_list);
-static int ser_loop;
+static bool ser_loop;
module_param(ser_loop, bool, S_IRUGO);
MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
-static int ser_use_stx = 1;
+static bool ser_use_stx = true;
module_param(ser_use_stx, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
-static int ser_use_fcs = 1;
+static bool ser_use_fcs = true;
module_param(ser_use_fcs, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
@@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser)
skb_pull(skb, tty_wr);
if (skb->len == 0) {
struct sk_buff *tmp = skb_dequeue(&ser->head);
- BUG_ON(tmp != skb);
+ WARN_ON(tmp != skb);
if (in_interrupt())
dev_kfree_skb_irq(skb);
else
@@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty)
ser = tty->disc_data;
BUG_ON(ser == NULL);
- BUG_ON(ser->tty != tty);
+ WARN_ON(ser->tty != tty);
handle_tx(ser);
}
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index d4b26fb24ed..5b2041319a3 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_ON);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
list_entry(pshm_drv->rx_full_list.next, struct buf_list,
list);
list_del_init(&pbuf->list);
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Retrieve pointer to start of the packet descriptor area. */
pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -336,7 +337,11 @@ static void shm_rx_work_func(struct work_struct *rx_work)
/* Get a suitable CAIF packet and copy in data. */
skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
frm_pck_len + 1);
- BUG_ON(skb == NULL);
+
+ if (skb == NULL) {
+ pr_info("OOM: Try next frame in descriptor\n");
+ break;
+ }
p = skb_put(skb, frm_pck_len);
memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
@@ -360,6 +365,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
pck_desc++;
}
+ spin_lock_irqsave(&pshm_drv->lock, flags);
list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +418,6 @@ static void shm_tx_work_func(struct work_struct *tx_work)
if (skb == NULL)
goto send_msg;
-
/* Check the available no. of buffers in the empty list */
list_for_each(pos, &pshm_drv->tx_empty_list)
avail_emptybuff++;
@@ -421,9 +426,11 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_OFF);
+ spin_lock_irqsave(&pshm_drv->lock, flags);
}
/*
* We simply return back to the caller if we do not have space
@@ -469,6 +476,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
}
skb = skb_dequeue(&pshm_drv->sk_qhead);
+ if (skb == NULL)
+ break;
/* Copy in CAIF frame. */
skb_copy_bits(skb, 0, pbuf->desc_vptr +
pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +486,7 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
frmlen;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
/* Fill in the shared memory packet descriptor area. */
pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +521,11 @@ send_msg:
static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
{
struct shmdrv_layer *pshm_drv;
- unsigned long flags = 0;
pshm_drv = netdev_priv(shm_netdev);
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
skb_queue_tail(&pshm_drv->sk_qhead, skb);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
/* Schedule Tx work queue. for deferred processing of skbs*/
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +610,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
(NR_TX_BUF * TX_BUF_SZ);
+ spin_lock_init(&pshm_drv->lock);
INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +645,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
if (pshm_dev->shm_loopback)
- tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+ tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
else
tx_buf->desc_vptr =
ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +669,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
rx_buf->len = RX_BUF_SZ;
if (pshm_dev->shm_loopback)
- rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+ rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
else
rx_buf->desc_vptr =
ioremap(rx_buf->phy_addr, RX_BUF_SZ);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 05e791f46ae..96391c36fa7 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("CAIF SPI driver");
/* Returns the number of padding bytes for alignment. */
#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
-static int spi_loop;
+static bool spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
- cfspi->xfer.va_tx,
+ cfspi->xfer.va_tx[0],
(cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
@@ -599,48 +599,11 @@ static int cfspi_close(struct net_device *dev)
netif_stop_queue(dev);
return 0;
}
-static const struct net_device_ops cfspi_ops = {
- .ndo_open = cfspi_open,
- .ndo_stop = cfspi_close,
- .ndo_start_xmit = cfspi_xmit
-};
-static void cfspi_setup(struct net_device *dev)
+static int cfspi_init(struct net_device *dev)
{
+ int res = 0;
struct cfspi *cfspi = netdev_priv(dev);
- dev->features = 0;
- dev->netdev_ops = &cfspi_ops;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- dev->tx_queue_len = 0;
- dev->mtu = SPI_MAX_PAYLOAD_SIZE;
- dev->destructor = free_netdev;
- skb_queue_head_init(&cfspi->qhead);
- skb_queue_head_init(&cfspi->chead);
- cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfspi->cfdev.use_frag = false;
- cfspi->cfdev.use_stx = false;
- cfspi->cfdev.use_fcs = false;
- cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
- struct cfspi *cfspi = NULL;
- struct net_device *ndev;
- struct cfspi_dev *dev;
- int res;
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
- ndev = alloc_netdev(sizeof(struct cfspi),
- "cfspi%d", cfspi_setup);
- if (!ndev)
- return -ENOMEM;
-
- cfspi = netdev_priv(ndev);
- netif_stop_queue(ndev);
- cfspi->ndev = ndev;
- cfspi->pdev = pdev;
/* Set flow info. */
cfspi->flow_off_sent = 0;
@@ -656,16 +619,11 @@ int cfspi_spi_probe(struct platform_device *pdev)
cfspi->slave_talked = false;
}
- /* Assign the SPI device. */
- cfspi->dev = dev;
- /* Assign the device ifc to this SPI interface. */
- dev->ifc = &cfspi->ifc;
-
/* Allocate DMA buffers. */
- cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
- if (!cfspi->xfer.va_tx) {
+ cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+ if (!cfspi->xfer.va_tx[0]) {
res = -ENODEV;
- goto err_dma_alloc_tx;
+ goto err_dma_alloc_tx_0;
}
cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
@@ -714,6 +672,87 @@ int cfspi_spi_probe(struct platform_device *pdev)
/* Schedule the work queue. */
queue_work(cfspi->wq, &cfspi->work);
+ return 0;
+
+ err_create_wq:
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+ dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+ return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+
+ /* Remove from list. */
+ spin_lock(&cfspi_list_lock);
+ list_del(&cfspi->list);
+ spin_unlock(&cfspi_list_lock);
+
+ cfspi->ndev = NULL;
+ /* Free DMA buffers. */
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ set_bit(SPI_TERMINATE, &cfspi->state);
+ wake_up_interruptible(&cfspi->wait);
+ destroy_workqueue(cfspi->wq);
+ /* Destroy debugfs directory and files. */
+ dev_debugfs_rem(cfspi);
+ return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+ .ndo_open = cfspi_open,
+ .ndo_stop = cfspi_close,
+ .ndo_init = cfspi_init,
+ .ndo_uninit = cfspi_uninit,
+ .ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &cfspi_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+ dev->tx_queue_len = 0;
+ dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&cfspi->qhead);
+ skb_queue_head_init(&cfspi->chead);
+ cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+ cfspi->cfdev.use_frag = false;
+ cfspi->cfdev.use_stx = false;
+ cfspi->cfdev.use_fcs = false;
+ cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+ struct cfspi *cfspi = NULL;
+ struct net_device *ndev;
+ struct cfspi_dev *dev;
+ int res;
+ dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+ ndev = alloc_netdev(sizeof(struct cfspi),
+ "cfspi%d", cfspi_setup);
+ if (!dev)
+ return -ENODEV;
+
+ cfspi = netdev_priv(ndev);
+ netif_stop_queue(ndev);
+ cfspi->ndev = ndev;
+ cfspi->pdev = pdev;
+
+ /* Assign the SPI device. */
+ cfspi->dev = dev;
+ /* Assign the device ifc to this SPI interface. */
+ dev->ifc = &cfspi->ifc;
+
/* Register network device. */
res = register_netdev(ndev);
if (res) {
@@ -723,15 +762,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
return res;
err_net_reg:
- dev_debugfs_rem(cfspi);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- err_create_wq:
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
- dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- err_dma_alloc_tx:
free_netdev(ndev);
return res;
@@ -739,34 +769,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
int cfspi_spi_remove(struct platform_device *pdev)
{
- struct list_head *list_node;
- struct list_head *n;
- struct cfspi *cfspi = NULL;
- struct cfspi_dev *dev;
-
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
- spin_lock(&cfspi_list_lock);
- list_for_each_safe(list_node, n, &cfspi_list) {
- cfspi = list_entry(list_node, struct cfspi, list);
- /* Find the corresponding device. */
- if (cfspi->dev == dev) {
- /* Remove from list. */
- list_del(list_node);
- /* Free DMA buffers. */
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- /* Destroy debugfs directory and files. */
- dev_debugfs_rem(cfspi);
- unregister_netdev(cfspi->ndev);
- spin_unlock(&cfspi_list_lock);
- return 0;
- }
- }
- spin_unlock(&cfspi_list_lock);
- return -ENODEV;
+ /* Everything is done in cfspi_uninit(). */
+ return 0;
}
static void __exit cfspi_exit_module(void)
@@ -777,7 +781,7 @@ static void __exit cfspi_exit_module(void)
list_for_each_safe(list_node, n, &cfspi_list) {
cfspi = list_entry(list_node, struct cfspi, list);
- platform_device_unregister(cfspi->pdev);
+ unregister_netdev(cfspi->ndev);
}
/* Destroy sysfs files. */
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index f6c98fb4a51..ab45758c49a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -116,6 +116,8 @@ source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/c_can/Kconfig"
+source "drivers/net/can/cc770/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 24ebfe8d758..938be37b670 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -14,6 +14,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_C_CAN) += c_can/
+obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 044ea0647b0..6ea905c2cf6 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1383,18 +1383,7 @@ static struct platform_driver at91_can_driver = {
.id_table = at91_can_id_table,
};
-static int __init at91_can_module_init(void)
-{
- return platform_driver_register(&at91_can_driver);
-}
-
-static void __exit at91_can_module_exit(void)
-{
- platform_driver_unregister(&at91_can_driver);
-}
-
-module_init(at91_can_module_init);
-module_exit(at91_can_module_exit);
+module_platform_driver(at91_can_driver);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a1c5abc38cd..349e0fabb63 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -676,17 +676,7 @@ static struct platform_driver bfin_can_driver = {
},
};
-static int __init bfin_can_init(void)
-{
- return platform_driver_register(&bfin_can_driver);
-}
-module_init(bfin_can_init);
-
-static void __exit bfin_can_exit(void)
-{
- platform_driver_unregister(&bfin_can_driver);
-}
-module_exit(bfin_can_exit);
+module_platform_driver(bfin_can_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 0b5c6f8bdd3..5e1a5ff6476 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -197,17 +197,7 @@ static struct platform_driver c_can_plat_driver = {
.remove = __devexit_p(c_can_plat_remove),
};
-static int __init c_can_plat_init(void)
-{
- return platform_driver_register(&c_can_plat_driver);
-}
-module_init(c_can_plat_init);
-
-static void __exit c_can_plat_exit(void)
-{
- platform_driver_unregister(&c_can_plat_driver);
-}
-module_exit(c_can_plat_exit);
+module_platform_driver(c_can_plat_driver);
MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
new file mode 100644
index 00000000000..22c07a8c8b4
--- /dev/null
+++ b/drivers/net/can/cc770/Kconfig
@@ -0,0 +1,21 @@
+menuconfig CAN_CC770
+ tristate "Bosch CC770 and Intel AN82527 devices"
+ depends on CAN_DEV && HAS_IOMEM
+
+if CAN_CC770
+
+config CAN_CC770_ISA
+ tristate "ISA Bus based legacy CC770 driver"
+ ---help---
+ This driver adds legacy support for CC770 and AN82527 chips
+ connected to the ISA bus using I/O port, memory mapped or
+ indirect access.
+
+config CAN_CC770_PLATFORM
+ tristate "Generic Platform Bus based CC770 driver"
+ ---help---
+ This driver adds support for the CC770 and AN82527 chips
+ connected to the "platform bus" (Linux abstraction for directly
+ to the processor attached devices).
+
+endif
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
new file mode 100644
index 00000000000..9fb8321b33e
--- /dev/null
+++ b/drivers/net/can/cc770/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Bosch CC770 CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_CC770) += cc770.o
+obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
+obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
new file mode 100644
index 00000000000..76689674764
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.c
@@ -0,0 +1,881 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
+
+/*
+ * The CC770 is a CAN controller from Bosch, which is 100% compatible
+ * with the AN82527 from Intel, but with "bugs" being fixed and some
+ * additional functionality, mainly:
+ *
+ * 1. RX and TX error counters are readable.
+ * 2. Support of silent (listen-only) mode.
+ * 3. Message object 15 can receive all types of frames, also RTR and EFF.
+ *
+ * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf",
+ * which explains in detail the compatibility between the CC770 and the
+ * 82527. This driver use the additional functionality 3. on real CC770
+ * devices. Unfortunately, the CC770 does still not store the message
+ * identifier of received remote transmission request frames and
+ * therefore it's set to 0.
+ *
+ * The message objects 1..14 can be used for TX and RX while the message
+ * objects 15 is optimized for RX. It has a shadow register for reliable
+ * data receiption under heavy bus load. Therefore it makes sense to use
+ * this message object for the needed use case. The frame type (EFF/SFF)
+ * for the message object 15 can be defined via kernel module parameter
+ * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
+ * otherwise 11 bit SFF messages.
+ */
+static int msgobj15_eff;
+module_param(msgobj15_eff, int, S_IRUGO);
+MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
+ "(default: 11-bit standard frames)");
+
+static int i82527_compat;
+module_param(i82527_compat, int, S_IRUGO);
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+ "without using additional functions");
+
+/*
+ * This driver uses the last 5 message objects 11..15. The definitions
+ * and structure below allows to configure and assign them to the real
+ * message object.
+ */
+static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
+ [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX,
+ [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF,
+ [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR,
+ [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR |
+ CC770_OBJ_FLAG_EFF,
+ [CC770_OBJ_TX] = 0,
+};
+
+static struct can_bittiming_const cc770_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+static inline int intid2obj(unsigned int intid)
+{
+ if (intid == 2)
+ return 0;
+ else
+ return MSGOBJ_LAST + 2 - intid;
+}
+
+static void enable_all_objs(const struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 msgcfg;
+ unsigned char obj_flags;
+ unsigned int o, mo;
+
+ for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+ obj_flags = priv->obj_flags[o];
+ mo = obj2msgobj(o);
+
+ if (obj_flags & CC770_OBJ_FLAG_RX) {
+ /*
+ * We don't need extra objects for RTR and EFF if
+ * the additional CC770 functions are enabled.
+ */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ if (o > 0)
+ continue;
+ netdev_dbg(dev, "Message object %d for "
+ "RX data, RTR, SFF and EFF\n", mo);
+ } else {
+ netdev_dbg(dev,
+ "Message object %d for RX %s %s\n",
+ mo, obj_flags & CC770_OBJ_FLAG_RTR ?
+ "RTR" : "data",
+ obj_flags & CC770_OBJ_FLAG_EFF ?
+ "EFF" : "SFF");
+ }
+
+ if (obj_flags & CC770_OBJ_FLAG_EFF)
+ msgcfg = MSGCFG_XTD;
+ else
+ msgcfg = 0;
+ if (obj_flags & CC770_OBJ_FLAG_RTR)
+ msgcfg |= MSGCFG_DIR;
+
+ cc770_write_reg(priv, msgobj[mo].config, msgcfg);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+
+ if (obj_flags & CC770_OBJ_FLAG_RTR)
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | CPUUPD_SET |
+ TXRQST_RES | RMTPND_RES);
+ else
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ } else {
+ netdev_dbg(dev, "Message object %d for "
+ "TX data, RTR, SFF and EFF\n", mo);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES |
+ CPUUPD_RES | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ }
+ }
+}
+
+static void disable_all_objs(const struct cc770_priv *priv)
+{
+ int o, mo;
+
+ for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+ mo = obj2msgobj(o);
+
+ if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) {
+ if (o > 0 && priv->control_normal_mode & CTRL_EAF)
+ continue;
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ } else {
+ /* Clear message object for send */
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES |
+ CPUUPD_RES | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ }
+ }
+}
+
+static void set_reset_mode(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Enable configuration and puts chip in bus-off, disable interrupts */
+ cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI);
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register */
+ cc770_write_reg(priv, status, 0);
+
+ /* Disable all used message objects */
+ disable_all_objs(priv);
+}
+
+static void set_normal_mode(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register and pre-set last error code */
+ cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+ /* Enable all used message objects*/
+ enable_all_objs(dev);
+
+ /*
+ * Clear bus-off, interrupts only for errors,
+ * not for status change
+ */
+ cc770_write_reg(priv, control, priv->control_normal_mode);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static void chipset_init(struct cc770_priv *priv)
+{
+ int mo, id, data;
+
+ /* Enable configuration and put chip in bus-off, disable interrupts */
+ cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI));
+
+ /* Set CLKOUT divider and slew rates */
+ cc770_write_reg(priv, clkout, priv->clkout);
+
+ /* Configure CPU interface / CLKOUT enable */
+ cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+ /* Set bus configuration */
+ cc770_write_reg(priv, bus_config, priv->bus_config);
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register */
+ cc770_write_reg(priv, status, 0);
+
+ /* Clear and invalidate message objects */
+ for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) {
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ INTPND_UNC | RXIE_RES |
+ TXIE_RES | MSGVAL_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ INTPND_RES | RXIE_RES |
+ TXIE_RES | MSGVAL_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ for (data = 0; data < 8; data++)
+ cc770_write_reg(priv, msgobj[mo].data[data], 0);
+ for (id = 0; id < 4; id++)
+ cc770_write_reg(priv, msgobj[mo].id[id], 0);
+ cc770_write_reg(priv, msgobj[mo].config, 0);
+ }
+
+ /* Set all global ID masks to "don't care" */
+ cc770_write_reg(priv, global_mask_std[0], 0);
+ cc770_write_reg(priv, global_mask_std[1], 0);
+ cc770_write_reg(priv, global_mask_ext[0], 0);
+ cc770_write_reg(priv, global_mask_ext[1], 0);
+ cc770_write_reg(priv, global_mask_ext[2], 0);
+ cc770_write_reg(priv, global_mask_ext[3], 0);
+
+}
+
+static int cc770_probe_chip(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Enable configuration, put chip in bus-off, disable ints */
+ cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI);
+ /* Configure cpu interface / CLKOUT disable */
+ cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+ /*
+ * Check if hardware reset is still inactive or maybe there
+ * is no chip in this address space
+ */
+ if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) {
+ netdev_info(dev, "probing @0x%p failed (reset)\n",
+ priv->reg_base);
+ return -ENODEV;
+ }
+
+ /* Write and read back test pattern (some arbitrary values) */
+ cc770_write_reg(priv, msgobj[1].data[1], 0x25);
+ cc770_write_reg(priv, msgobj[2].data[3], 0x52);
+ cc770_write_reg(priv, msgobj[10].data[6], 0xc3);
+ if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) ||
+ (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) ||
+ (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) {
+ netdev_info(dev, "probing @0x%p failed (pattern)\n",
+ priv->reg_base);
+ return -ENODEV;
+ }
+
+ /* Check if this chip is a CC770 supporting additional functions */
+ if (cc770_read_reg(priv, control) & CTRL_EAF)
+ priv->control_normal_mode |= CTRL_EAF;
+
+ return 0;
+}
+
+static void cc770_start(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* leave reset mode */
+ if (priv->can.state != CAN_STATE_STOPPED)
+ set_reset_mode(dev);
+
+ /* leave reset mode */
+ set_normal_mode(dev);
+}
+
+static int cc770_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ cc770_start(dev);
+ netif_wake_queue(dev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cc770_set_bittiming(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+
+ cc770_write_reg(priv, bit_timing_0, btr0);
+ cc770_write_reg(priv, bit_timing_1, btr1);
+
+ return 0;
+}
+
+static int cc770_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ bec->txerr = cc770_read_reg(priv, tx_error_counter);
+ bec->rxerr = cc770_read_reg(priv, rx_error_counter);
+
+ return 0;
+}
+
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ u8 dlc, rtr;
+ u32 id;
+ int i;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ if ((cc770_read_reg(priv,
+ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+ netdev_err(dev, "TX register is still occupied!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ netif_stop_queue(dev);
+
+ dlc = cf->can_dlc;
+ id = cf->can_id;
+ if (cf->can_id & CAN_RTR_FLAG)
+ rtr = 0;
+ else
+ rtr = MSGCFG_DIR;
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+ if (id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config,
+ (dlc << 4) | rtr | MSGCFG_XTD);
+ cc770_write_reg(priv, msgobj[mo].id[3], id << 3);
+ cc770_write_reg(priv, msgobj[mo].id[2], id >> 5);
+ cc770_write_reg(priv, msgobj[mo].id[1], id >> 13);
+ cc770_write_reg(priv, msgobj[mo].id[0], id >> 21);
+ } else {
+ id &= CAN_SFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr);
+ cc770_write_reg(priv, msgobj[mo].id[0], id >> 3);
+ cc770_write_reg(priv, msgobj[mo].id[1], id << 5);
+ }
+
+ for (i = 0; i < dlc; i++)
+ cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+
+ stats->tx_bytes += dlc;
+
+ can_put_echo_skb(skb, dev, 0);
+
+ /*
+ * HM: We had some cases of repeated IRQs so make sure the
+ * INT is acknowledged I know it's already further up, but
+ * doing again fixed the issue
+ */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ return NETDEV_TX_OK;
+}
+
+static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 config;
+ u32 id;
+ int i;
+
+ skb = alloc_can_skb(dev, &cf);
+ if (!skb)
+ return;
+
+ config = cc770_read_reg(priv, msgobj[mo].config);
+
+ if (ctrl1 & RMTPND_SET) {
+ /*
+ * Unfortunately, the chip does not store the real message
+ * identifier of the received remote transmission request
+ * frame. Therefore we set it to 0.
+ */
+ cf->can_id = CAN_RTR_FLAG;
+ if (config & MSGCFG_XTD)
+ cf->can_id |= CAN_EFF_FLAG;
+ cf->can_dlc = 0;
+ } else {
+ if (config & MSGCFG_XTD) {
+ id = cc770_read_reg(priv, msgobj[mo].id[3]);
+ id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8;
+ id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16;
+ id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24;
+ id >>= 3;
+ id |= CAN_EFF_FLAG;
+ } else {
+ id = cc770_read_reg(priv, msgobj[mo].id[1]);
+ id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8;
+ id >>= 5;
+ }
+
+ cf->can_id = id;
+ cf->can_dlc = get_can_dlc((config & 0xf0) >> 4);
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
+ }
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static int cc770_err(struct net_device *dev, u8 status)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 lec;
+
+ netdev_dbg(dev, "status interrupt (%#x)\n", status);
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Use extended functions of the CC770 */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ cf->data[6] = cc770_read_reg(priv, tx_error_counter);
+ cf->data[7] = cc770_read_reg(priv, rx_error_counter);
+ }
+
+ if (status & STAT_BOFF) {
+ /* Disable interrupts */
+ cc770_write_reg(priv, control, CTRL_INI);
+ cf->can_id |= CAN_ERR_BUSOFF;
+ priv->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(dev);
+ } else if (status & STAT_WARN) {
+ cf->can_id |= CAN_ERR_CRTL;
+ /* Only the CC770 does show error passive */
+ if (cf->data[7] > 127) {
+ cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE |
+ CAN_ERR_CRTL_TX_PASSIVE;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ } else {
+ cf->data[1] = CAN_ERR_CRTL_RX_WARNING |
+ CAN_ERR_CRTL_TX_WARNING;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ }
+ } else {
+ /* Back to error avtive */
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ lec = status & STAT_LEC_MASK;
+ if (lec < 7 && lec > 0) {
+ if (lec == STAT_LEC_ACK) {
+ cf->can_id |= CAN_ERR_ACK;
+ } else {
+ cf->can_id |= CAN_ERR_PROT;
+ switch (lec) {
+ case STAT_LEC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case STAT_LEC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case STAT_LEC_BIT1:
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+ case STAT_LEC_BIT0:
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+ case STAT_LEC_CRC:
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+ }
+ }
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 0;
+}
+
+static int cc770_status_interrupt(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 status;
+
+ status = cc770_read_reg(priv, status);
+ /* Reset the status register including RXOK and TXOK */
+ cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+ if (status & (STAT_WARN | STAT_BOFF) ||
+ (status & STAT_LEC_MASK) != STAT_LEC_MASK) {
+ cc770_err(dev, status);
+ return status & STAT_BOFF;
+ }
+
+ return 0;
+}
+
+static void cc770_rx_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
+ u8 ctrl1;
+ int n = CC770_MAX_MSG;
+
+ while (n--) {
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+
+ if (!(ctrl1 & NEWDAT_SET)) {
+ /* Check for RTR if additional functions are enabled */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) &
+ INTPND_SET))
+ break;
+ } else {
+ break;
+ }
+ }
+
+ if (ctrl1 & MSGLST_SET) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+ if (mo < MSGOBJ_LAST)
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_UNC | RMTPND_UNC);
+ cc770_rx(dev, mo, ctrl1);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ }
+}
+
+static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ unsigned int mo = obj2msgobj(o);
+ u8 ctrl0, ctrl1;
+ int n = CC770_MAX_MSG;
+
+ while (n--) {
+ ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0);
+ if (!(ctrl0 & INTPND_SET))
+ break;
+
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+ cc770_rx(dev, mo, ctrl1);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | CPUUPD_SET |
+ TXRQST_RES | RMTPND_RES);
+ }
+}
+
+static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
+
+ /* Nothing more to send, switch off interrupts */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+ /*
+ * We had some cases of repeated IRQ so make sure the
+ * INT is acknowledged
+ */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ stats->tx_packets++;
+ can_get_echo_skb(dev, 0);
+ netif_wake_queue(dev);
+}
+
+irqreturn_t cc770_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 intid;
+ int o, n = 0;
+
+ /* Shared interrupts and IRQ off? */
+ if (priv->can.state == CAN_STATE_STOPPED)
+ return IRQ_NONE;
+
+ if (priv->pre_irq)
+ priv->pre_irq(priv);
+
+ while (n < CC770_MAX_IRQ) {
+ /* Read the highest pending interrupt request */
+ intid = cc770_read_reg(priv, interrupt);
+ if (!intid)
+ break;
+ n++;
+
+ if (intid == 1) {
+ /* Exit in case of bus-off */
+ if (cc770_status_interrupt(dev))
+ break;
+ } else {
+ o = intid2obj(intid);
+
+ if (o >= CC770_OBJ_MAX) {
+ netdev_err(dev, "Unexpected interrupt id %d\n",
+ intid);
+ continue;
+ }
+
+ if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR)
+ cc770_rtr_interrupt(dev, o);
+ else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX)
+ cc770_rx_interrupt(dev, o);
+ else
+ cc770_tx_interrupt(dev, o);
+ }
+ }
+
+ if (priv->post_irq)
+ priv->post_irq(priv);
+
+ if (n >= CC770_MAX_IRQ)
+ netdev_dbg(dev, "%d messages handled in ISR", n);
+
+ return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int cc770_open(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* set chip into reset mode */
+ set_reset_mode(dev);
+
+ /* common open */
+ err = open_candev(dev);
+ if (err)
+ return err;
+
+ err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags,
+ dev->name, dev);
+ if (err) {
+ close_candev(dev);
+ return -EAGAIN;
+ }
+
+ /* init and start chip */
+ cc770_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int cc770_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ set_reset_mode(dev);
+
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ return 0;
+}
+
+struct net_device *alloc_cc770dev(int sizeof_priv)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+
+ dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv,
+ CC770_ECHO_SKB_MAX);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &cc770_bittiming_const;
+ priv->can.do_set_bittiming = cc770_set_bittiming;
+ priv->can.do_set_mode = cc770_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+ memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+
+ if (sizeof_priv)
+ priv->priv = (void *)priv + sizeof(struct cc770_priv);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_cc770dev);
+
+void free_cc770dev(struct net_device *dev)
+{
+ free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_cc770dev);
+
+static const struct net_device_ops cc770_netdev_ops = {
+ .ndo_open = cc770_open,
+ .ndo_stop = cc770_close,
+ .ndo_start_xmit = cc770_start_xmit,
+};
+
+int register_cc770dev(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = cc770_probe_chip(dev);
+ if (err)
+ return err;
+
+ dev->netdev_ops = &cc770_netdev_ops;
+
+ dev->flags |= IFF_ECHO; /* we support local echo */
+
+ /* Should we use additional functions? */
+ if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) {
+ priv->can.do_get_berr_counter = cc770_get_berr_counter;
+ priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE;
+ netdev_dbg(dev, "i82527 mode with additional functions\n");
+ } else {
+ priv->control_normal_mode = CTRL_IE | CTRL_EIE;
+ netdev_dbg(dev, "strict i82527 compatibility mode\n");
+ }
+
+ chipset_init(priv);
+ set_reset_mode(dev);
+
+ return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_cc770dev);
+
+void unregister_cc770dev(struct net_device *dev)
+{
+ set_reset_mode(dev);
+ unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_cc770dev);
+
+static __init int cc770_init(void)
+{
+ if (msgobj15_eff) {
+ cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF;
+ cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF;
+ }
+
+ pr_info("CAN netdevice driver\n");
+
+ return 0;
+}
+module_init(cc770_init);
+
+static __exit void cc770_exit(void)
+{
+ pr_info("driver removed\n");
+}
+module_exit(cc770_exit);
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
new file mode 100644
index 00000000000..a1739db98d9
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.h
@@ -0,0 +1,203 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CC770_DEV_H
+#define CC770_DEV_H
+
+#include <linux/can/dev.h>
+
+struct cc770_msgobj {
+ u8 ctrl0;
+ u8 ctrl1;
+ u8 id[4];
+ u8 config;
+ u8 data[8];
+ u8 dontuse; /* padding */
+} __packed;
+
+struct cc770_regs {
+ union {
+ struct cc770_msgobj msgobj[16]; /* Message object 1..15 */
+ struct {
+ u8 control; /* Control Register */
+ u8 status; /* Status Register */
+ u8 cpu_interface; /* CPU Interface Register */
+ u8 dontuse1;
+ u8 high_speed_read[2]; /* High Speed Read */
+ u8 global_mask_std[2]; /* Standard Global Mask */
+ u8 global_mask_ext[4]; /* Extended Global Mask */
+ u8 msg15_mask[4]; /* Message 15 Mask */
+ u8 dontuse2[15];
+ u8 clkout; /* Clock Out Register */
+ u8 dontuse3[15];
+ u8 bus_config; /* Bus Configuration Register */
+ u8 dontuse4[15];
+ u8 bit_timing_0; /* Bit Timing Register byte 0 */
+ u8 dontuse5[15];
+ u8 bit_timing_1; /* Bit Timing Register byte 1 */
+ u8 dontuse6[15];
+ u8 interrupt; /* Interrupt Register */
+ u8 dontuse7[15];
+ u8 rx_error_counter; /* Receive Error Counter */
+ u8 dontuse8[15];
+ u8 tx_error_counter; /* Transmit Error Counter */
+ u8 dontuse9[31];
+ u8 p1_conf;
+ u8 dontuse10[15];
+ u8 p2_conf;
+ u8 dontuse11[15];
+ u8 p1_in;
+ u8 dontuse12[15];
+ u8 p2_in;
+ u8 dontuse13[15];
+ u8 p1_out;
+ u8 dontuse14[15];
+ u8 p2_out;
+ u8 dontuse15[15];
+ u8 serial_reset_addr;
+ };
+ };
+} __packed;
+
+/* Control Register (0x00) */
+#define CTRL_INI 0x01 /* Initialization */
+#define CTRL_IE 0x02 /* Interrupt Enable */
+#define CTRL_SIE 0x04 /* Status Interrupt Enable */
+#define CTRL_EIE 0x08 /* Error Interrupt Enable */
+#define CTRL_EAF 0x20 /* Enable additional functions */
+#define CTRL_CCE 0x40 /* Change Configuration Enable */
+
+/* Status Register (0x01) */
+#define STAT_LEC_STUFF 0x01 /* Stuff error */
+#define STAT_LEC_FORM 0x02 /* Form error */
+#define STAT_LEC_ACK 0x03 /* Acknowledgement error */
+#define STAT_LEC_BIT1 0x04 /* Bit1 error */
+#define STAT_LEC_BIT0 0x05 /* Bit0 error */
+#define STAT_LEC_CRC 0x06 /* CRC error */
+#define STAT_LEC_MASK 0x07 /* Last Error Code mask */
+#define STAT_TXOK 0x08 /* Transmit Message Successfully */
+#define STAT_RXOK 0x10 /* Receive Message Successfully */
+#define STAT_WAKE 0x20 /* Wake Up Status */
+#define STAT_WARN 0x40 /* Warning Status */
+#define STAT_BOFF 0x80 /* Bus Off Status */
+
+/*
+ * CPU Interface Register (0x02)
+ * Clock Out Register (0x1f)
+ * Bus Configuration Register (0x2f)
+ *
+ * see include/linux/can/platform/cc770.h
+ */
+
+/* Message Control Register 0 (Base Address + 0x0) */
+#define INTPND_RES 0x01 /* No Interrupt pending */
+#define INTPND_SET 0x02 /* Interrupt pending */
+#define INTPND_UNC 0x03
+#define RXIE_RES 0x04 /* Receive Interrupt Disable */
+#define RXIE_SET 0x08 /* Receive Interrupt Enable */
+#define RXIE_UNC 0x0c
+#define TXIE_RES 0x10 /* Transmit Interrupt Disable */
+#define TXIE_SET 0x20 /* Transmit Interrupt Enable */
+#define TXIE_UNC 0x30
+#define MSGVAL_RES 0x40 /* Message Invalid */
+#define MSGVAL_SET 0x80 /* Message Valid */
+#define MSGVAL_UNC 0xc0
+
+/* Message Control Register 1 (Base Address + 0x01) */
+#define NEWDAT_RES 0x01 /* No New Data */
+#define NEWDAT_SET 0x02 /* New Data */
+#define NEWDAT_UNC 0x03
+#define MSGLST_RES 0x04 /* No Message Lost */
+#define MSGLST_SET 0x08 /* Message Lost */
+#define MSGLST_UNC 0x0c
+#define CPUUPD_RES 0x04 /* No CPU Updating */
+#define CPUUPD_SET 0x08 /* CPU Updating */
+#define CPUUPD_UNC 0x0c
+#define TXRQST_RES 0x10 /* No Transmission Request */
+#define TXRQST_SET 0x20 /* Transmission Request */
+#define TXRQST_UNC 0x30
+#define RMTPND_RES 0x40 /* No Remote Request Pending */
+#define RMTPND_SET 0x80 /* Remote Request Pending */
+#define RMTPND_UNC 0xc0
+
+/* Message Configuration Register (Base Address + 0x06) */
+#define MSGCFG_XTD 0x04 /* Extended Identifier */
+#define MSGCFG_DIR 0x08 /* Direction is Transmit */
+
+#define MSGOBJ_FIRST 1
+#define MSGOBJ_LAST 15
+
+#define CC770_IO_SIZE 0x100
+#define CC770_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
+#define CC770_MAX_MSG 4 /* max. number of messages handled in ISR */
+
+#define CC770_ECHO_SKB_MAX 1
+
+#define cc770_read_reg(priv, member) \
+ priv->read_reg(priv, offsetof(struct cc770_regs, member))
+
+#define cc770_write_reg(priv, member, value) \
+ priv->write_reg(priv, offsetof(struct cc770_regs, member), value)
+
+/*
+ * Message objects and flags used by this driver
+ */
+#define CC770_OBJ_FLAG_RX 0x01
+#define CC770_OBJ_FLAG_RTR 0x02
+#define CC770_OBJ_FLAG_EFF 0x04
+
+enum {
+ CC770_OBJ_RX0 = 0, /* for receiving normal messages */
+ CC770_OBJ_RX1, /* for receiving normal messages */
+ CC770_OBJ_RX_RTR0, /* for receiving remote transmission requests */
+ CC770_OBJ_RX_RTR1, /* for receiving remote transmission requests */
+ CC770_OBJ_TX, /* for sending messages */
+ CC770_OBJ_MAX
+};
+
+#define obj2msgobj(o) (MSGOBJ_LAST - (o)) /* message object 11..15 */
+
+/*
+ * CC770 private data structure
+ */
+struct cc770_priv {
+ struct can_priv can; /* must be the first member */
+ struct sk_buff *echo_skb;
+
+ /* the lower-layer is responsible for appropriate locking */
+ u8 (*read_reg)(const struct cc770_priv *priv, int reg);
+ void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val);
+ void (*pre_irq)(const struct cc770_priv *priv);
+ void (*post_irq)(const struct cc770_priv *priv);
+
+ void *priv; /* for board-specific data */
+ struct net_device *dev;
+
+ void __iomem *reg_base; /* ioremap'ed address to registers */
+ unsigned long irq_flags; /* for request_irq() */
+
+ unsigned char obj_flags[CC770_OBJ_MAX];
+ u8 control_normal_mode; /* Control register for normal mode */
+ u8 cpu_interface; /* CPU interface register */
+ u8 clkout; /* Clock out register */
+ u8 bus_config; /* Bus conffiguration register */
+};
+
+struct net_device *alloc_cc770dev(int sizeof_priv);
+void free_cc770dev(struct net_device *dev);
+int register_cc770dev(struct net_device *dev);
+void unregister_cc770dev(struct net_device *dev);
+
+#endif /* CC770_DEV_H */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
new file mode 100644
index 00000000000..4be5fe2c40a
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -0,0 +1,367 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus.
+ * The I/O port or memory address and the IRQ number must be specified via
+ * module parameters:
+ *
+ * insmod cc770_isa.ko port=0x310,0x380 irq=7,11
+ *
+ * for ISA devices using I/O ports or:
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11
+ *
+ * for memory mapped ISA devices.
+ *
+ * Indirect access via address and data port is supported as well:
+ *
+ * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11
+ *
+ * Furthermore, the following mode parameter can be defined:
+ *
+ * clk: External oscillator clock frequency (default=16000000 [16 MHz])
+ * cir: CPU interface register (default=0x40 [DSC])
+ * bcr: Bus configuration register (default=0x40 [CBY])
+ * cor: Clockout register (default=0x00)
+ *
+ * Note: for clk, cir, bcr and cor, the first argument re-defines the
+ * default for all other devices, e.g.:
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000
+ *
+ * is equivalent to
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define MAXDEV 8
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus");
+MODULE_LICENSE("GPL v2");
+
+#define CLK_DEFAULT 16000000 /* 16 MHz */
+#define COR_DEFAULT 0x00
+#define BCR_DEFAULT BUSCFG_CBY
+
+static unsigned long port[MAXDEV];
+static unsigned long mem[MAXDEV];
+static int __devinitdata irq[MAXDEV];
+static int __devinitdata clk[MAXDEV];
+static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+
+module_param_array(port, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(port, "I/O port number");
+
+module_param_array(mem, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(mem, "I/O memory address");
+
+module_param_array(indirect, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
+
+module_param_array(irq, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(irq, "IRQ number");
+
+module_param_array(clk, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(clk, "External oscillator clock frequency "
+ "(default=16000000 [16 MHz])");
+
+module_param_array(cir, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])");
+
+module_param_array(cor, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cor, "Clockout register (default=0x00)");
+
+module_param_array(bcr, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
+
+#define CC770_IOSIZE 0x20
+#define CC770_IOSIZE_INDIRECT 0x02
+
+static struct platform_device *cc770_isa_devs[MAXDEV];
+
+static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return readb(priv->reg_base + reg);
+}
+
+static void cc770_isa_mem_write_reg(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ writeb(val, priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return inb((unsigned long)priv->reg_base + reg);
+}
+
+static void cc770_isa_port_write_reg(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv,
+ int reg)
+{
+ unsigned long base = (unsigned long)priv->reg_base;
+
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ unsigned long base = (unsigned long)priv->reg_base;
+
+ outb(reg, base);
+ outb(val, base + 1);
+}
+
+static int __devinit cc770_isa_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+ void __iomem *base = NULL;
+ int iosize = CC770_IOSIZE;
+ int idx = pdev->id;
+ int err;
+ u32 clktmp;
+
+ dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+ if (mem[idx]) {
+ if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+ base = ioremap_nocache(mem[idx], iosize);
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ } else {
+ if (indirect[idx] > 0 ||
+ (indirect[idx] == -1 && indirect[0] > 0))
+ iosize = CC770_IOSIZE_INDIRECT;
+ if (!request_region(port[idx], iosize, KBUILD_MODNAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+ }
+
+ dev = alloc_cc770dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_unmap;
+ }
+ priv = netdev_priv(dev);
+
+ dev->irq = irq[idx];
+ priv->irq_flags = IRQF_SHARED;
+ if (mem[idx]) {
+ priv->reg_base = base;
+ dev->base_addr = mem[idx];
+ priv->read_reg = cc770_isa_mem_read_reg;
+ priv->write_reg = cc770_isa_mem_write_reg;
+ } else {
+ priv->reg_base = (void __iomem *)port[idx];
+ dev->base_addr = port[idx];
+
+ if (iosize == CC770_IOSIZE_INDIRECT) {
+ priv->read_reg = cc770_isa_port_read_reg_indirect;
+ priv->write_reg = cc770_isa_port_write_reg_indirect;
+ } else {
+ priv->read_reg = cc770_isa_port_read_reg;
+ priv->write_reg = cc770_isa_port_write_reg;
+ }
+ }
+
+ if (clk[idx])
+ clktmp = clk[idx];
+ else if (clk[0])
+ clktmp = clk[0];
+ else
+ clktmp = CLK_DEFAULT;
+ priv->can.clock.freq = clktmp;
+
+ if (cir[idx] != 0xff) {
+ priv->cpu_interface = cir[idx];
+ } else if (cir[0] != 0xff) {
+ priv->cpu_interface = cir[0];
+ } else {
+ /* The system clock may not exceed 10 MHz */
+ if (clktmp > 10000000) {
+ priv->cpu_interface |= CPUIF_DSC;
+ clktmp /= 2;
+ }
+ /* The memory clock may not exceed 8 MHz */
+ if (clktmp > 8000000)
+ priv->cpu_interface |= CPUIF_DMC;
+ }
+
+ if (priv->cpu_interface & CPUIF_DSC)
+ priv->can.clock.freq /= 2;
+
+ if (bcr[idx] != 0xff)
+ priv->bus_config = bcr[idx];
+ else if (bcr[0] != 0xff)
+ priv->bus_config = bcr[0];
+ else
+ priv->bus_config = BCR_DEFAULT;
+
+ if (cor[idx] != 0xff)
+ priv->clkout = cor[idx];
+ else if (cor[0] != 0xff)
+ priv->clkout = cor[0];
+ else
+ priv->clkout = COR_DEFAULT;
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_cc770dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "couldn't register device (err=%d)\n", err);
+ goto exit_unmap;
+ }
+
+ dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
+ priv->reg_base, dev->irq);
+ return 0;
+
+ exit_unmap:
+ if (mem[idx])
+ iounmap(base);
+ exit_release:
+ if (mem[idx])
+ release_mem_region(mem[idx], iosize);
+ else
+ release_region(port[idx], iosize);
+ exit:
+ return err;
+}
+
+static int __devexit cc770_isa_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct cc770_priv *priv = netdev_priv(dev);
+ int idx = pdev->id;
+
+ unregister_cc770dev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (mem[idx]) {
+ iounmap(priv->reg_base);
+ release_mem_region(mem[idx], CC770_IOSIZE);
+ } else {
+ if (priv->read_reg == cc770_isa_port_read_reg_indirect)
+ release_region(port[idx], CC770_IOSIZE_INDIRECT);
+ else
+ release_region(port[idx], CC770_IOSIZE);
+ }
+ free_cc770dev(dev);
+
+ return 0;
+}
+
+static struct platform_driver cc770_isa_driver = {
+ .probe = cc770_isa_probe,
+ .remove = __devexit_p(cc770_isa_remove),
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init cc770_isa_init(void)
+{
+ int idx, err;
+
+ for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+ if ((port[idx] || mem[idx]) && irq[idx]) {
+ cc770_isa_devs[idx] =
+ platform_device_alloc(KBUILD_MODNAME, idx);
+ if (!cc770_isa_devs[idx]) {
+ err = -ENOMEM;
+ goto exit_free_devices;
+ }
+ err = platform_device_add(cc770_isa_devs[idx]);
+ if (err) {
+ platform_device_put(cc770_isa_devs[idx]);
+ goto exit_free_devices;
+ }
+ pr_debug("platform device %d: port=%#lx, mem=%#lx, "
+ "irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+ } else if (idx == 0 || port[idx] || mem[idx]) {
+ pr_err("insufficient parameters supplied\n");
+ err = -EINVAL;
+ goto exit_free_devices;
+ }
+ }
+
+ err = platform_driver_register(&cc770_isa_driver);
+ if (err)
+ goto exit_free_devices;
+
+ pr_info("driver for max. %d devices registered\n", MAXDEV);
+
+ return 0;
+
+exit_free_devices:
+ while (--idx >= 0) {
+ if (cc770_isa_devs[idx])
+ platform_device_unregister(cc770_isa_devs[idx]);
+ }
+
+ return err;
+}
+module_init(cc770_isa_init);
+
+static void __exit cc770_isa_exit(void)
+{
+ int idx;
+
+ platform_driver_unregister(&cc770_isa_driver);
+ for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+ if (cc770_isa_devs[idx])
+ platform_device_unregister(cc770_isa_devs[idx]);
+ }
+}
+module_exit(cc770_isa_exit);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
new file mode 100644
index 00000000000..53115eee807
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -0,0 +1,272 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the platform bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * If platform data are used you should have similar definitions
+ * in your board-specific code:
+ *
+ * static struct cc770_platform_data myboard_cc770_pdata = {
+ * .osc_freq = 16000000,
+ * .cir = 0x41,
+ * .cor = 0x20,
+ * .bcr = 0x40,
+ * };
+ *
+ * Please see include/linux/can/platform/cc770.h for description of
+ * above fields.
+ *
+ * If the device tree is used, you need a CAN node definition in your
+ * DTS file similar to:
+ *
+ * can@3,100 {
+ * compatible = "bosch,cc770";
+ * reg = <3 0x100 0x80>;
+ * interrupts = <2 0>;
+ * interrupt-parent = <&mpic>;
+ * bosch,external-clock-frequency = <16000000>;
+ * };
+ *
+ * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further
+ * information.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define DRV_NAME "cc770_platform"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
+MODULE_LICENSE("GPL v2");
+
+#define CC770_PLATFORM_CAN_CLOCK 16000000
+
+static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg);
+}
+
+static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
+ u8 val)
+{
+ iowrite8(val, priv->reg_base + reg);
+}
+
+static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const u32 *prop;
+ int prop_size;
+ u32 clkext;
+
+ prop = of_get_property(np, "bosch,external-clock-frequency",
+ &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ clkext = *prop;
+ else
+ clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+ priv->can.clock.freq = clkext;
+
+ /* The system clock may not exceed 10 MHz */
+ if (priv->can.clock.freq > 10000000) {
+ priv->cpu_interface |= CPUIF_DSC;
+ priv->can.clock.freq /= 2;
+ }
+
+ /* The memory clock may not exceed 8 MHz */
+ if (priv->can.clock.freq > 8000000)
+ priv->cpu_interface |= CPUIF_DMC;
+
+ if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+ priv->cpu_interface |= CPUIF_DMC;
+ if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+ priv->cpu_interface |= CPUIF_MUX;
+
+ if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ priv->bus_config |= BUSCFG_CBY;
+ if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+ priv->bus_config |= BUSCFG_DR0;
+ if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+ priv->bus_config |= BUSCFG_DR1;
+ if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+ priv->bus_config |= BUSCFG_DT1;
+ if (of_get_property(np, "bosch,polarity-dominant", NULL))
+ priv->bus_config |= BUSCFG_POL;
+
+ prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
+ u32 cdv = clkext / *prop;
+ int slew;
+
+ if (cdv > 0 && cdv < 16) {
+ priv->cpu_interface |= CPUIF_CEN;
+ priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
+
+ prop = of_get_property(np, "bosch,slew-rate",
+ &prop_size);
+ if (prop && (prop_size == sizeof(u32))) {
+ slew = *prop;
+ } else {
+ /* Determine default slew rate */
+ slew = (CLKOUT_SL_MASK >>
+ CLKOUT_SL_SHIFT) -
+ ((cdv * clkext - 1) / 8000000);
+ if (slew < 0)
+ slew = 0;
+ }
+ priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
+ CLKOUT_SL_MASK;
+ } else {
+ dev_dbg(&pdev->dev, "invalid clock-out-frequency\n");
+ }
+ }
+
+ return 0;
+}
+
+static int __devinit cc770_get_platform_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
+{
+
+ struct cc770_platform_data *pdata = pdev->dev.platform_data;
+
+ priv->can.clock.freq = pdata->osc_freq;
+ if (priv->cpu_interface | CPUIF_DSC)
+ priv->can.clock.freq /= 2;
+ priv->clkout = pdata->cor;
+ priv->bus_config = pdata->bcr;
+ priv->cpu_interface = pdata->cir;
+
+ return 0;
+}
+
+static int __devinit cc770_platform_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+ struct resource *mem;
+ resource_size_t mem_size;
+ void __iomem *base;
+ int err, irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq <= 0)
+ return -ENODEV;
+
+ mem_size = resource_size(mem);
+ if (!request_mem_region(mem->start, mem_size, pdev->name))
+ return -EBUSY;
+
+ base = ioremap(mem->start, mem_size);
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release_mem;
+ }
+
+ dev = alloc_cc770dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_unmap_mem;
+ }
+
+ dev->irq = irq;
+ priv = netdev_priv(dev);
+ priv->read_reg = cc770_platform_read_reg;
+ priv->write_reg = cc770_platform_write_reg;
+ priv->irq_flags = IRQF_SHARED;
+ priv->reg_base = base;
+
+ if (pdev->dev.of_node)
+ err = cc770_get_of_node_data(pdev, priv);
+ else if (pdev->dev.platform_data)
+ err = cc770_get_platform_data(pdev, priv);
+ else
+ err = -ENODEV;
+ if (err)
+ goto exit_free_cc770;
+
+ dev_dbg(&pdev->dev,
+ "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x "
+ "bus_config=0x%02x clkout=0x%02x\n",
+ priv->reg_base, dev->irq, priv->can.clock.freq,
+ priv->cpu_interface, priv->bus_config, priv->clkout);
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_cc770dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "couldn't register CC700 device (err=%d)\n", err);
+ goto exit_free_cc770;
+ }
+
+ return 0;
+
+exit_free_cc770:
+ free_cc770dev(dev);
+exit_unmap_mem:
+ iounmap(base);
+exit_release_mem:
+ release_mem_region(mem->start, mem_size);
+
+ return err;
+}
+
+static int __devexit cc770_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ unregister_cc770dev(dev);
+ iounmap(priv->reg_base);
+ free_cc770dev(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return 0;
+}
+
+static struct of_device_id __devinitdata cc770_platform_table[] = {
+ {.compatible = "bosch,cc770"}, /* CC770 from Bosch */
+ {.compatible = "intc,82527"}, /* AN82527 from Intel CP */
+ {},
+};
+
+static struct platform_driver cc770_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cc770_platform_table,
+ },
+ .probe = cc770_platform_probe,
+ .remove = __devexit_p(cc770_platform_remove),
+};
+
+module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 25695bde054..120f1ab5a2c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -454,7 +454,7 @@ static void can_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
}
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index e02337953f4..165a4c79802 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1060,20 +1060,7 @@ static struct platform_driver flexcan_driver = {
.remove = __devexit_p(flexcan_remove),
};
-static int __init flexcan_init(void)
-{
- pr_info("%s netdevice driver\n", DRV_NAME);
- return platform_driver_register(&flexcan_driver);
-}
-
-static void __exit flexcan_exit(void)
-{
- platform_driver_unregister(&flexcan_driver);
- pr_info("%s: driver removed\n", DRV_NAME);
-}
-
-module_init(flexcan_init);
-module_exit(flexcan_exit);
+module_platform_driver(flexcan_driver);
MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
"Marc Kleine-Budde <kernel@pengutronix.de>");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 32778d56d33..08c893cb789 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1803,20 +1803,9 @@ static struct platform_driver ican3_driver = {
.remove = __devexit_p(ican3_remove),
};
-static int __init ican3_init(void)
-{
- return platform_driver_register(&ican3_driver);
-}
-
-static void __exit ican3_exit(void)
-{
- platform_driver_unregister(&ican3_driver);
-}
+module_platform_driver(ican3_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:janz-ican3");
-
-module_init(ican3_init);
-module_exit(ican3_exit);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5fedc337556..5caa572d71e 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -411,17 +411,7 @@ static struct platform_driver mpc5xxx_can_driver = {
#endif
};
-static int __init mpc5xxx_can_init(void)
-{
- return platform_driver_register(&mpc5xxx_can_driver);
-}
-module_init(mpc5xxx_can_init);
-
-static void __exit mpc5xxx_can_exit(void)
-{
- platform_driver_unregister(&mpc5xxx_can_driver);
-};
-module_exit(mpc5xxx_can_exit);
+module_platform_driver(mpc5xxx_can_driver);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index ec4a3119e2c..1c82dd8b896 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -581,7 +581,10 @@ static int mscan_open(struct net_device *dev)
priv->open_time = jiffies;
- clrbits8(&regs->canctl1, MSCAN_LISTEN);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ setbits8(&regs->canctl1, MSCAN_LISTEN);
+ else
+ clrbits8(&regs->canctl1, MSCAN_LISTEN);
ret = mscan_start(dev);
if (ret)
@@ -690,7 +693,8 @@ struct net_device *alloc_mscandev(void)
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
priv->can.do_set_mode = mscan_do_set_mode;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
for (i = 0; i < TX_QUEUE_SIZE; i++) {
priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index fe9e64d476e..36e9d594069 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -6,7 +6,6 @@ if CAN_SJA1000
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
- depends on ISA
---help---
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 905bce0b3a4..2c7f5036f57 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 496223e9e2f..90c5c2dfd2f 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/isa.h>
+#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
@@ -44,9 +44,9 @@ static unsigned long port[MAXDEV];
static unsigned long mem[MAXDEV];
static int __devinitdata irq[MAXDEV];
static int __devinitdata clk[MAXDEV];
-static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
module_param_array(port, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(port, "I/O port number");
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(port, "I/O port number");
module_param_array(mem, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(mem, "I/O memory address");
-module_param_array(indirect, byte, NULL, S_IRUGO);
+module_param_array(indirect, int, NULL, S_IRUGO);
MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
module_param_array(irq, int, NULL, S_IRUGO);
@@ -75,6 +75,8 @@ MODULE_PARM_DESC(ocr, "Output control register "
#define SJA1000_IOSIZE 0x20
#define SJA1000_IOSIZE_INDIRECT 0x02
+static struct platform_device *sja1000_isa_devs[MAXDEV];
+
static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg)
{
return readb(priv->reg_base + reg);
@@ -115,26 +117,18 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
outb(val, base + 1);
}
-static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx)
-{
- if (port[idx] || mem[idx]) {
- if (irq[idx])
- return 1;
- } else if (idx)
- return 0;
-
- dev_err(pdev, "insufficient parameters supplied\n");
- return 0;
-}
-
-static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
+static int __devinit sja1000_isa_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sja1000_priv *priv;
void __iomem *base = NULL;
int iosize = SJA1000_IOSIZE;
+ int idx = pdev->id;
int err;
+ dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+
if (mem[idx]) {
if (!request_mem_region(mem[idx], iosize, DRV_NAME)) {
err = -EBUSY;
@@ -189,31 +183,31 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
else
priv->can.clock.freq = CLK_DEFAULT / 2;
- if (ocr[idx] != -1)
- priv->ocr = ocr[idx] & 0xff;
- else if (ocr[0] != -1)
- priv->ocr = ocr[0] & 0xff;
+ if (ocr[idx] != 0xff)
+ priv->ocr = ocr[idx];
+ else if (ocr[0] != 0xff)
+ priv->ocr = ocr[0];
else
priv->ocr = OCR_DEFAULT;
- if (cdr[idx] != -1)
- priv->cdr = cdr[idx] & 0xff;
- else if (cdr[0] != -1)
- priv->cdr = cdr[0] & 0xff;
+ if (cdr[idx] != 0xff)
+ priv->cdr = cdr[idx];
+ else if (cdr[0] != 0xff)
+ priv->cdr = cdr[0];
else
priv->cdr = CDR_DEFAULT;
- dev_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, pdev);
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
err = register_sja1000dev(dev);
if (err) {
- dev_err(pdev, "registering %s failed (err=%d)\n",
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
goto exit_unmap;
}
- dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n",
+ dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
DRV_NAME, priv->reg_base, dev->irq);
return 0;
@@ -229,13 +223,14 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
return err;
}
-static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
+static int __devexit sja1000_isa_remove(struct platform_device *pdev)
{
- struct net_device *dev = dev_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
+ int idx = pdev->id;
unregister_sja1000dev(dev);
- dev_set_drvdata(pdev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
if (mem[idx]) {
iounmap(priv->reg_base);
@@ -251,29 +246,70 @@ static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
return 0;
}
-static struct isa_driver sja1000_isa_driver = {
- .match = sja1000_isa_match,
+static struct platform_driver sja1000_isa_driver = {
.probe = sja1000_isa_probe,
.remove = __devexit_p(sja1000_isa_remove),
.driver = {
.name = DRV_NAME,
+ .owner = THIS_MODULE,
},
};
static int __init sja1000_isa_init(void)
{
- int err = isa_register_driver(&sja1000_isa_driver, MAXDEV);
+ int idx, err;
+
+ for (idx = 0; idx < MAXDEV; idx++) {
+ if ((port[idx] || mem[idx]) && irq[idx]) {
+ sja1000_isa_devs[idx] =
+ platform_device_alloc(DRV_NAME, idx);
+ if (!sja1000_isa_devs[idx]) {
+ err = -ENOMEM;
+ goto exit_free_devices;
+ }
+ err = platform_device_add(sja1000_isa_devs[idx]);
+ if (err) {
+ platform_device_put(sja1000_isa_devs[idx]);
+ goto exit_free_devices;
+ }
+ pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, "
+ "irq=%d\n",
+ DRV_NAME, idx, port[idx], mem[idx], irq[idx]);
+ } else if (idx == 0 || port[idx] || mem[idx]) {
+ pr_err("%s: insufficient parameters supplied\n",
+ DRV_NAME);
+ err = -EINVAL;
+ goto exit_free_devices;
+ }
+ }
+
+ err = platform_driver_register(&sja1000_isa_driver);
+ if (err)
+ goto exit_free_devices;
+
+ pr_info("Legacy %s driver for max. %d devices registered\n",
+ DRV_NAME, MAXDEV);
+
+ return 0;
+
+exit_free_devices:
+ while (--idx >= 0) {
+ if (sja1000_isa_devs[idx])
+ platform_device_unregister(sja1000_isa_devs[idx]);
+ }
- if (!err)
- printk(KERN_INFO
- "Legacy %s driver for max. %d devices registered\n",
- DRV_NAME, MAXDEV);
return err;
}
static void __exit sja1000_isa_exit(void)
{
- isa_unregister_driver(&sja1000_isa_driver);
+ int idx;
+
+ platform_driver_unregister(&sja1000_isa_driver);
+ for (idx = 0; idx < MAXDEV; idx++) {
+ if (sja1000_isa_devs[idx])
+ platform_device_unregister(sja1000_isa_devs[idx]);
+ }
}
module_init(sja1000_isa_init);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index c3dd9d09be5..f2683eb6a3d 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -220,14 +220,4 @@ static struct platform_driver sja1000_ofp_driver = {
.remove = __devexit_p(sja1000_ofp_remove),
};
-static int __init sja1000_ofp_init(void)
-{
- return platform_driver_register(&sja1000_ofp_driver);
-}
-module_init(sja1000_ofp_init);
-
-static void __exit sja1000_ofp_exit(void)
-{
- return platform_driver_unregister(&sja1000_ofp_driver);
-};
-module_exit(sja1000_ofp_exit);
+module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d9fadc489b3..4f50145f648 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -185,15 +185,4 @@ static struct platform_driver sp_driver = {
},
};
-static int __init sp_init(void)
-{
- return platform_driver_register(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
- platform_driver_unregister(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
+module_platform_driver(sp_driver);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a979b006f45..3f1ebcc2cb8 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -387,7 +387,7 @@ static void slc_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
}
/******************************************
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 09a8b86cf1a..a7c77c744ee 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -874,21 +874,9 @@ static struct platform_driver softing_driver = {
.remove = __devexit_p(softing_pdev_remove),
};
-MODULE_ALIAS("platform:softing");
-
-static int __init softing_start(void)
-{
- return platform_driver_register(&softing_driver);
-}
-
-static void __exit softing_stop(void)
-{
- platform_driver_unregister(&softing_driver);
-}
-
-module_init(softing_start);
-module_exit(softing_stop);
+module_platform_driver(softing_driver);
+MODULE_ALIAS("platform:softing");
MODULE_DESCRIPTION("Softing DPRAM CAN driver");
MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2adc294f512..df809e3f130 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1037,20 +1037,7 @@ static struct platform_driver ti_hecc_driver = {
.resume = ti_hecc_resume,
};
-static int __init ti_hecc_init_driver(void)
-{
- printk(KERN_INFO DRV_DESC "\n");
- return platform_driver_register(&ti_hecc_driver);
-}
-
-static void __exit ti_hecc_exit_driver(void)
-{
- printk(KERN_INFO DRV_DESC " unloaded\n");
- platform_driver_unregister(&ti_hecc_driver);
-}
-
-module_exit(ti_hecc_exit_driver);
-module_init(ti_hecc_init_driver);
+module_platform_driver(ti_hecc_driver);
MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index f93e2d6fc88..ea2d9428593 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -63,7 +63,7 @@ MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
* See Documentation/networking/can.txt for details.
*/
-static int echo; /* echo testing. Default: 0 (Off) */
+static bool echo; /* echo testing. Default: 0 (Off) */
module_param(echo, bool, S_IRUGO);
MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
new file mode 100644
index 00000000000..dd151d53d50
--- /dev/null
+++ b/drivers/net/dsa/Kconfig
@@ -0,0 +1,36 @@
+menu "Distributed Switch Architecture drivers"
+ depends on NET_DSA
+
+config NET_DSA_MV88E6XXX
+ tristate
+ default n
+
+config NET_DSA_MV88E6060
+ tristate "Marvell 88E6060 ethernet switch chip support"
+ select NET_DSA_TAG_TRAILER
+ ---help---
+ This enables support for the Marvell 88E6060 ethernet switch
+ chip.
+
+config NET_DSA_MV88E6XXX_NEED_PPU
+ bool
+ default n
+
+config NET_DSA_MV88E6131
+ tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_MV88E6XXX_NEED_PPU
+ select NET_DSA_TAG_DSA
+ ---help---
+ This enables support for the Marvell 88E6085/6095/6095F/6131
+ ethernet switch chips.
+
+config NET_DSA_MV88E6123_61_65
+ tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_TAG_EDSA
+ ---help---
+ This enables support for the Marvell 88E6123/6161/6165
+ ethernet switch chips.
+
+endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
new file mode 100644
index 00000000000..f3bda05536c
--- /dev/null
+++ b/drivers/net/dsa/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
+mv88e6xxx_drv-y += mv88e6xxx.o
+ifdef CONFIG_NET_DSA_MV88E6123_61_65
+mv88e6xxx_drv-y += mv88e6123_61_65.o
+endif
+ifdef CONFIG_NET_DSA_MV88E6131
+mv88e6xxx_drv-y += mv88e6131.o
+endif
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
new file mode 100644
index 00000000000..7fc4e81d4d4
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.c
@@ -0,0 +1,293 @@
+/*
+ * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#define REG_PORT(p) (8 + (p))
+#define REG_GLOBAL 0x0f
+
+static int reg_read(struct dsa_switch *ds, int addr, int reg)
+{
+ return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg);
+}
+
+#define REG_READ(addr, reg) \
+ ({ \
+ int __ret; \
+ \
+ __ret = reg_read(ds, addr, reg); \
+ if (__ret < 0) \
+ return __ret; \
+ __ret; \
+ })
+
+
+static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr,
+ reg, val);
+}
+
+#define REG_WRITE(addr, reg, val) \
+ ({ \
+ int __ret; \
+ \
+ __ret = reg_write(ds, addr, reg, val); \
+ if (__ret < 0) \
+ return __ret; \
+ })
+
+static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ ret &= 0xfff0;
+ if (ret == 0x0600)
+ return "Marvell 88E6060";
+ }
+
+ return NULL;
+}
+
+static int mv88e6060_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 6; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0xa130);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0x8000) == 0x0000)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6060_setup_global(struct dsa_switch *ds)
+{
+ /*
+ * Disable discarding of frames with excessive collisions,
+ * set the maximum frame size to 1536 bytes, and mask all
+ * interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x0800);
+
+ /*
+ * Enable automatic address learning, set the address
+ * database size to 1024 entries, and set the default aging
+ * time to 5 minutes.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x2130);
+
+ return 0;
+}
+
+static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
+{
+ int addr = REG_PORT(p);
+
+ /*
+ * Do not force flow control, disable Ingress and Egress
+ * Header tagging, disable VLAN tunneling, and set the port
+ * state to Forwarding. Additionally, if this is the CPU
+ * port, enable Ingress and Egress Trailer tagging mode.
+ */
+ REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the CPU port.
+ */
+ REG_WRITE(addr, 0x06,
+ ((p & 0xf) << 12) |
+ (dsa_is_cpu_port(ds, p) ?
+ ds->phys_port_mask :
+ (1 << ds->dst->cpu_port)));
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ return 0;
+}
+
+static int mv88e6060_setup(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ ret = mv88e6060_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise atu */
+
+ ret = mv88e6060_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 6; i++) {
+ ret = mv88e6060_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
+ REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+
+ return 0;
+}
+
+static int mv88e6060_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 5)
+ return port;
+ return -1;
+}
+
+static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr;
+
+ addr = mv88e6060_port_to_phy_addr(port);
+ if (addr == -1)
+ return 0xffff;
+
+ return reg_read(ds, addr, regnum);
+}
+
+static int
+mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ int addr;
+
+ addr = mv88e6060_port_to_phy_addr(port);
+ if (addr == -1)
+ return 0xffff;
+
+ return reg_write(ds, addr, regnum, val);
+}
+
+static void mv88e6060_poll_link(struct dsa_switch *ds)
+{
+ int i;
+
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ struct net_device *dev;
+ int uninitialized_var(port_status);
+ int link;
+ int speed;
+ int duplex;
+ int fc;
+
+ dev = ds->ports[i];
+ if (dev == NULL)
+ continue;
+
+ link = 0;
+ if (dev->flags & IFF_UP) {
+ port_status = reg_read(ds, REG_PORT(i), 0x00);
+ if (port_status < 0)
+ continue;
+
+ link = !!(port_status & 0x1000);
+ }
+
+ if (!link) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ continue;
+ }
+
+ speed = (port_status & 0x0100) ? 100 : 10;
+ duplex = (port_status & 0x0200) ? 1 : 0;
+ fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
+
+ if (!netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half",
+ fc ? "en" : "dis");
+ netif_carrier_on(dev);
+ }
+ }
+}
+
+static struct dsa_switch_driver mv88e6060_switch_driver = {
+ .tag_protocol = htons(ETH_P_TRAILER),
+ .probe = mv88e6060_probe,
+ .setup = mv88e6060_setup,
+ .set_addr = mv88e6060_set_addr,
+ .phy_read = mv88e6060_phy_read,
+ .phy_write = mv88e6060_phy_write,
+ .poll_link = mv88e6060_poll_link,
+};
+
+static int __init mv88e6060_init(void)
+{
+ register_switch_driver(&mv88e6060_switch_driver);
+ return 0;
+}
+module_init(mv88e6060_init);
+
+static void __exit mv88e6060_cleanup(void)
+{
+ unregister_switch_driver(&mv88e6060_switch_driver);
+}
+module_exit(mv88e6060_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mv88e6060");
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
new file mode 100644
index 00000000000..c0a458fc698
--- /dev/null
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -0,0 +1,438 @@
+/*
+ * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ ret &= 0xfff0;
+ if (ret == 0x1210)
+ return "Marvell 88E6123";
+ if (ret == 0x1610)
+ return "Marvell 88E6161";
+ if (ret == 0x1650)
+ return "Marvell 88E6165";
+ }
+
+ return NULL;
+}
+
+static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 8; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0xc800) == 0xc800)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ /*
+ * Disable the PHY polling unit (since there won't be any
+ * external PHYs to poll), don't discard packets with
+ * excessive collisions, and mask all interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
+
+ /*
+ * Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+ /*
+ * Configure the priority mapping registers.
+ */
+ ret = mv88e6xxx_config_prio(ds);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Configure the upstream port, and configure the upstream
+ * port as the port to which ingress and egress monitor frames
+ * are to be sent.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+
+ /*
+ * Disable remote management for now, and set the switch's
+ * DSA device number.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:2x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+ /*
+ * Disable the loopback filter, disable flow control
+ * messages, disable flood broadcast override, disable
+ * removing of provider tags, disable ATU age violation
+ * interrupts, disable tag flow control, force flow
+ * control priority to the highest, and send all special
+ * multicast frames to the CPU at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+ /*
+ * Program the DSA routing table.
+ */
+ for (i = 0; i < 32; i++) {
+ int nexthop;
+
+ nexthop = 0x1f;
+ if (i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+ }
+
+ /*
+ * Clear all trunk masks.
+ */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
+
+ /*
+ * Clear all trunk mappings.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+ /*
+ * Disable ingress rate limiting by resetting all ingress
+ * rate limit registers to their initial state.
+ */
+ for (i = 0; i < 6; i++)
+ REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
+
+ /*
+ * Initialise cross-chip port VLAN table to reset defaults.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
+
+ /*
+ * Clear the priority override table.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
+
+ /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
+{
+ int addr = REG_PORT(p);
+ u16 val;
+
+ /*
+ * MAC Forcing register: don't force link, speed, duplex
+ * or flow control state to any particular values on physical
+ * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+ * full duplex.
+ */
+ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+ REG_WRITE(addr, 0x01, 0x003e);
+ else
+ REG_WRITE(addr, 0x01, 0x0003);
+
+ /*
+ * Do not limit the period of time that this port can be
+ * paused for by the remote end or the period of time that
+ * this port can pause the remote end.
+ */
+ REG_WRITE(addr, 0x02, 0x0000);
+
+ /*
+ * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ val = 0x0433;
+ if (dsa_is_cpu_port(ds, p)) {
+ if (ds->dst->tag_protocol == htons(ETH_P_EDSA))
+ val |= 0x3300;
+ else
+ val |= 0x0100;
+ }
+ if (ds->dsa_port_mask & (1 << p))
+ val |= 0x0100;
+ if (p == dsa_upstream_port(ds))
+ val |= 0x000c;
+ REG_WRITE(addr, 0x04, val);
+
+ /*
+ * Port Control 1: disable trunking. Also, if this is the
+ * CPU port, enable learn messages to be sent to this port.
+ */
+ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ val = (p & 0xf) << 12;
+ if (dsa_is_cpu_port(ds, p))
+ val |= ds->phys_port_mask;
+ else
+ val |= 1 << dsa_upstream_port(ds);
+ REG_WRITE(addr, 0x06, val);
+
+ /*
+ * Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ REG_WRITE(addr, 0x07, 0x0000);
+
+ /*
+ * Port Control 2: don't force a good FCS, set the maximum
+ * frame size to 10240 bytes, don't let the switch add or
+ * strip 802.1q tags, don't discard tagged or untagged frames
+ * on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
+ */
+ REG_WRITE(addr, 0x08, 0x2080);
+
+ /*
+ * Egress rate control: disable egress rate control.
+ */
+ REG_WRITE(addr, 0x09, 0x0001);
+
+ /*
+ * Egress rate control 2: disable egress rate control.
+ */
+ REG_WRITE(addr, 0x0a, 0x0000);
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ /*
+ * Port ATU control: disable limiting the number of address
+ * database entries that this port is allowed to use.
+ */
+ REG_WRITE(addr, 0x0c, 0x0000);
+
+ /*
+ * Priorit Override: disable DA, SA and VTU priority override.
+ */
+ REG_WRITE(addr, 0x0d, 0x0000);
+
+ /*
+ * Port Ethertype: use the Ethertype DSA Ethertype value.
+ */
+ REG_WRITE(addr, 0x0f, ETH_P_EDSA);
+
+ /*
+ * Tag Remap: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x18, 0x3210);
+
+ /*
+ * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x19, 0x7654);
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int i;
+ int ret;
+
+ mutex_init(&ps->smi_mutex);
+ mutex_init(&ps->stats_mutex);
+
+ ret = mv88e6123_61_65_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise vtu and atu */
+
+ ret = mv88e6123_61_65_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 6; i++) {
+ ret = mv88e6123_61_65_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6123_61_65_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 4)
+ return port;
+ return -1;
+}
+
+static int
+mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr = mv88e6123_61_65_port_to_phy_addr(port);
+ return mv88e6xxx_phy_read(ds, addr, regnum);
+}
+
+static int
+mv88e6123_61_65_phy_write(struct dsa_switch *ds,
+ int port, int regnum, u16 val)
+{
+ int addr = mv88e6123_61_65_port_to_phy_addr(port);
+ return mv88e6xxx_phy_write(ds, addr, regnum, val);
+}
+
+static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+};
+
+static void
+mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
+ mv88e6123_61_65_hw_stats, port, data);
+}
+
+static void
+mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds,
+ int port, uint64_t *data)
+{
+ mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
+ mv88e6123_61_65_hw_stats, port, data);
+}
+
+static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
+ .tag_protocol = cpu_to_be16(ETH_P_EDSA),
+ .priv_size = sizeof(struct mv88e6xxx_priv_state),
+ .probe = mv88e6123_61_65_probe,
+ .setup = mv88e6123_61_65_setup,
+ .set_addr = mv88e6xxx_set_addr_indirect,
+ .phy_read = mv88e6123_61_65_phy_read,
+ .phy_write = mv88e6123_61_65_phy_write,
+ .poll_link = mv88e6xxx_poll_link,
+ .get_strings = mv88e6123_61_65_get_strings,
+ .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats,
+ .get_sset_count = mv88e6123_61_65_get_sset_count,
+};
+
+MODULE_ALIAS("platform:mv88e6123");
+MODULE_ALIAS("platform:mv88e6161");
+MODULE_ALIAS("platform:mv88e6165");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
new file mode 100644
index 00000000000..e0eb6824383
--- /dev/null
+++ b/drivers/net/dsa/mv88e6131.c
@@ -0,0 +1,435 @@
+/*
+ * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+/*
+ * Switch product IDs
+ */
+#define ID_6085 0x04a0
+#define ID_6095 0x0950
+#define ID_6131 0x1060
+
+static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ ret &= 0xfff0;
+ if (ret == ID_6085)
+ return "Marvell 88E6085";
+ if (ret == ID_6095)
+ return "Marvell 88E6095/88E6095F";
+ if (ret == ID_6131)
+ return "Marvell 88E6131";
+ }
+
+ return NULL;
+}
+
+static int mv88e6131_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 11; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0xc800) == 0xc800)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6131_setup_global(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ /*
+ * Enable the PHY polling unit, don't discard packets with
+ * excessive collisions, use a weighted fair queueing scheme
+ * to arbitrate between packet queues, set the maximum frame
+ * size to 1632, and mask all interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
+
+ /*
+ * Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+ /*
+ * Configure the priority mapping registers.
+ */
+ ret = mv88e6xxx_config_prio(ds);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set the VLAN ethertype to 0x8100.
+ */
+ REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
+
+ /*
+ * Disable ARP mirroring, and configure the upstream port as
+ * the port to which ingress and egress monitor frames are to
+ * be sent.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
+
+ /*
+ * Disable cascade port functionality unless this device
+ * is used in a cascade configuration, and set the switch's
+ * DSA device number.
+ */
+ if (ds->dst->pd->nr_chips > 1)
+ REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
+ else
+ REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+ /*
+ * Ignore removed tag data on doubly tagged packets, disable
+ * flow control messages, force flow control priority to the
+ * highest, and send all special multicast frames to the CPU
+ * port at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+ /*
+ * Program the DSA routing table.
+ */
+ for (i = 0; i < 32; i++) {
+ int nexthop;
+
+ nexthop = 0x1f;
+ if (i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+ }
+
+ /*
+ * Clear all trunk masks.
+ */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
+
+ /*
+ * Clear all trunk mappings.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+ /*
+ * Force the priority of IGMP/MLD snoop frames and ARP frames
+ * to the highest setting.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
+
+ return 0;
+}
+
+static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int addr = REG_PORT(p);
+ u16 val;
+
+ /*
+ * MAC Forcing register: don't force link, speed, duplex
+ * or flow control state to any particular values on physical
+ * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+ * (100 Mb/s on 6085) full duplex.
+ */
+ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+ if (ps->id == ID_6085)
+ REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x0003);
+
+ /*
+ * Port Control: disable Core Tag, disable Drop-on-Lock,
+ * transmit frames unmodified, disable Header mode,
+ * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and
+ * IP priority fields (IP prio has precedence), and set STP
+ * state to Forwarding.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts, and enable DSA tagging
+ * mode.
+ *
+ * If this is the link to another switch, use DSA tagging
+ * mode, but do not enable forwarding of unknown unicasts.
+ */
+ val = 0x0433;
+ if (p == dsa_upstream_port(ds)) {
+ val |= 0x0104;
+ /*
+ * On 6085, unknown multicast forward is controlled
+ * here rather than in Port Control 2 register.
+ */
+ if (ps->id == ID_6085)
+ val |= 0x0008;
+ }
+ if (ds->dsa_port_mask & (1 << p))
+ val |= 0x0100;
+ REG_WRITE(addr, 0x04, val);
+
+ /*
+ * Port Control 1: disable trunking. Also, if this is the
+ * CPU port, enable learn messages to be sent to this port.
+ */
+ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ val = (p & 0xf) << 12;
+ if (dsa_is_cpu_port(ds, p))
+ val |= ds->phys_port_mask;
+ else
+ val |= 1 << dsa_upstream_port(ds);
+ REG_WRITE(addr, 0x06, val);
+
+ /*
+ * Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ REG_WRITE(addr, 0x07, 0x0000);
+
+ /*
+ * Port Control 2: don't force a good FCS, don't use
+ * VLAN-based, source address-based or destination
+ * address-based priority overrides, don't let the switch
+ * add or strip 802.1q tags, don't discard tagged or
+ * untagged frames on this port, do a destination address
+ * lookup on received packets as usual, don't send a copy
+ * of all transmitted/received frames on this port to the
+ * CPU, and configure the upstream port number.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown multicast addresses.
+ */
+ if (ps->id == ID_6085)
+ /*
+ * on 6085, bits 3:0 are reserved, bit 6 control ARP
+ * mirroring, and multicast forward is handled in
+ * Port Control register.
+ */
+ REG_WRITE(addr, 0x08, 0x0080);
+ else {
+ val = 0x0080 | dsa_upstream_port(ds);
+ if (p == dsa_upstream_port(ds))
+ val |= 0x0040;
+ REG_WRITE(addr, 0x08, val);
+ }
+
+ /*
+ * Rate Control: disable ingress rate limiting.
+ */
+ REG_WRITE(addr, 0x09, 0x0000);
+
+ /*
+ * Rate Control 2: disable egress rate limiting.
+ */
+ REG_WRITE(addr, 0x0a, 0x0000);
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ /*
+ * Tag Remap: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x18, 0x3210);
+
+ /*
+ * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x19, 0x7654);
+
+ return 0;
+}
+
+static int mv88e6131_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int i;
+ int ret;
+
+ mutex_init(&ps->smi_mutex);
+ mv88e6xxx_ppu_state_init(ds);
+ mutex_init(&ps->stats_mutex);
+
+ ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
+ ret = mv88e6131_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise vtu and atu */
+
+ ret = mv88e6131_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 11; i++) {
+ ret = mv88e6131_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6131_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 11)
+ return port;
+ return -1;
+}
+
+static int
+mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr = mv88e6131_port_to_phy_addr(port);
+ return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
+}
+
+static int
+mv88e6131_phy_write(struct dsa_switch *ds,
+ int port, int regnum, u16 val)
+{
+ int addr = mv88e6131_port_to_phy_addr(port);
+ return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
+}
+
+static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+};
+
+static void
+mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats),
+ mv88e6131_hw_stats, port, data);
+}
+
+static void
+mv88e6131_get_ethtool_stats(struct dsa_switch *ds,
+ int port, uint64_t *data)
+{
+ mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats),
+ mv88e6131_hw_stats, port, data);
+}
+
+static int mv88e6131_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mv88e6131_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6131_switch_driver = {
+ .tag_protocol = cpu_to_be16(ETH_P_DSA),
+ .priv_size = sizeof(struct mv88e6xxx_priv_state),
+ .probe = mv88e6131_probe,
+ .setup = mv88e6131_setup,
+ .set_addr = mv88e6xxx_set_addr_direct,
+ .phy_read = mv88e6131_phy_read,
+ .phy_write = mv88e6131_phy_write,
+ .poll_link = mv88e6xxx_poll_link,
+ .get_strings = mv88e6131_get_strings,
+ .get_ethtool_stats = mv88e6131_get_ethtool_stats,
+ .get_sset_count = mv88e6131_get_sset_count,
+};
+
+MODULE_ALIAS("platform:mv88e6085");
+MODULE_ALIAS("platform:mv88e6095");
+MODULE_ALIAS("platform:mv88e6095f");
+MODULE_ALIAS("platform:mv88e6131");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
new file mode 100644
index 00000000000..5467c040824
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -0,0 +1,549 @@
+/*
+ * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+/*
+ * If the switch's ADDR[4:0] strap pins are strapped to zero, it will
+ * use all 32 SMI bus addresses on its SMI bus, and all switch registers
+ * will be directly accessible on some {device address,register address}
+ * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
+ * will only respond to SMI transactions to that specific address, and
+ * an indirect addressing mechanism needs to be used to access its
+ * registers.
+ */
+static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ ret = mdiobus_read(bus, sw_addr, 0);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & 0x8000) == 0)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
+{
+ int ret;
+
+ if (sw_addr == 0)
+ return mdiobus_read(bus, addr, reg);
+
+ /*
+ * Wait for the bus to become free.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the read command.
+ */
+ ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Wait for the read command to complete.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Read the data.
+ */
+ ret = mdiobus_read(bus, sw_addr, 1);
+ if (ret < 0)
+ return ret;
+
+ return ret & 0xffff;
+}
+
+int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_reg_read(ds->master_mii_bus,
+ ds->pd->sw_addr, addr, reg);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
+ int reg, u16 val)
+{
+ int ret;
+
+ if (sw_addr == 0)
+ return mdiobus_write(bus, addr, reg, val);
+
+ /*
+ * Wait for the bus to become free.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the data to write.
+ */
+ ret = mdiobus_write(bus, sw_addr, 1, val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the write command.
+ */
+ ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Wait for the write command to complete.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_reg_write(ds->master_mii_bus,
+ ds->pd->sw_addr, addr, reg, val);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int mv88e6xxx_config_prio(struct dsa_switch *ds)
+{
+ /*
+ * Configure the IP ToS mapping registers.
+ */
+ REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
+ REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
+ REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
+ REG_WRITE(REG_GLOBAL, 0x13, 0x5555);
+ REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
+ REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
+
+ /*
+ * Configure the IEEE 802.1p priority mapping register.
+ */
+ REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
+
+ return 0;
+}
+
+int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
+{
+ REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
+ REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+
+ return 0;
+}
+
+int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < 6; i++) {
+ int j;
+
+ /*
+ * Write the MAC address byte.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
+
+ /*
+ * Wait for the write to complete.
+ */
+ for (j = 0; j < 16; j++) {
+ ret = REG_READ(REG_GLOBAL2, 0x0d);
+ if ((ret & 0x8000) == 0)
+ break;
+ }
+ if (j == 16)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+{
+ if (addr >= 0)
+ return mv88e6xxx_reg_read(ds, addr, regnum);
+ return 0xffff;
+}
+
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val)
+{
+ if (addr >= 0)
+ return mv88e6xxx_reg_write(ds, addr, regnum, val);
+ return 0;
+}
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ ret = REG_READ(REG_GLOBAL, 0x04);
+ REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
+
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ msleep(1);
+ if ((ret & 0xc000) != 0xc000)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ ret = REG_READ(REG_GLOBAL, 0x04);
+ REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
+
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ msleep(1);
+ if ((ret & 0xc000) == 0xc000)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
+{
+ struct mv88e6xxx_priv_state *ps;
+
+ ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
+ if (mutex_trylock(&ps->ppu_mutex)) {
+ struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
+
+ if (mv88e6xxx_ppu_enable(ds) == 0)
+ ps->ppu_disabled = 0;
+ mutex_unlock(&ps->ppu_mutex);
+ }
+}
+
+static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)_ps;
+
+ schedule_work(&ps->ppu_work);
+}
+
+static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->ppu_mutex);
+
+ /*
+ * If the PHY polling unit is enabled, disable it so that
+ * we can access the PHY registers. If it was already
+ * disabled, cancel the timer that is going to re-enable
+ * it.
+ */
+ if (!ps->ppu_disabled) {
+ ret = mv88e6xxx_ppu_disable(ds);
+ if (ret < 0) {
+ mutex_unlock(&ps->ppu_mutex);
+ return ret;
+ }
+ ps->ppu_disabled = 1;
+ } else {
+ del_timer(&ps->ppu_timer);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+
+ /*
+ * Schedule a timer to re-enable the PHY polling unit.
+ */
+ mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
+ mutex_unlock(&ps->ppu_mutex);
+}
+
+void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+
+ mutex_init(&ps->ppu_mutex);
+ INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
+ init_timer(&ps->ppu_timer);
+ ps->ppu_timer.data = (unsigned long)ps;
+ ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
+}
+
+int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
+{
+ int ret;
+
+ ret = mv88e6xxx_ppu_access_get(ds);
+ if (ret >= 0) {
+ ret = mv88e6xxx_reg_read(ds, addr, regnum);
+ mv88e6xxx_ppu_access_put(ds);
+ }
+
+ return ret;
+}
+
+int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
+ int regnum, u16 val)
+{
+ int ret;
+
+ ret = mv88e6xxx_ppu_access_get(ds);
+ if (ret >= 0) {
+ ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(ds);
+ }
+
+ return ret;
+}
+#endif
+
+void mv88e6xxx_poll_link(struct dsa_switch *ds)
+{
+ int i;
+
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ struct net_device *dev;
+ int uninitialized_var(port_status);
+ int link;
+ int speed;
+ int duplex;
+ int fc;
+
+ dev = ds->ports[i];
+ if (dev == NULL)
+ continue;
+
+ link = 0;
+ if (dev->flags & IFF_UP) {
+ port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00);
+ if (port_status < 0)
+ continue;
+
+ link = !!(port_status & 0x0800);
+ }
+
+ if (!link) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ continue;
+ }
+
+ switch (port_status & 0x0300) {
+ case 0x0000:
+ speed = 10;
+ break;
+ case 0x0100:
+ speed = 100;
+ break;
+ case 0x0200:
+ speed = 1000;
+ break;
+ default:
+ speed = -1;
+ break;
+ }
+ duplex = (port_status & 0x0400) ? 1 : 0;
+ fc = (port_status & 0x8000) ? 1 : 0;
+
+ if (!netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half",
+ fc ? "en" : "dis");
+ netif_carrier_on(dev);
+ }
+ }
+}
+
+static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x1d);
+ if ((ret & 0x8000) == 0)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+{
+ int ret;
+
+ /*
+ * Snapshot the hardware statistics counters for this port.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
+
+ /*
+ * Wait for the snapshotting to complete.
+ */
+ ret = mv88e6xxx_stats_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+{
+ u32 _val;
+ int ret;
+
+ *val = 0;
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat);
+ if (ret < 0)
+ return;
+
+ ret = mv88e6xxx_stats_wait(ds);
+ if (ret < 0)
+ return;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e);
+ if (ret < 0)
+ return;
+
+ _val = ret << 16;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f);
+ if (ret < 0)
+ return;
+
+ *val = _val | ret;
+}
+
+void mv88e6xxx_get_strings(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < nr_stats; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint64_t *data)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+ int i;
+
+ mutex_lock(&ps->stats_mutex);
+
+ ret = mv88e6xxx_stats_snapshot(ds, port);
+ if (ret < 0) {
+ mutex_unlock(&ps->stats_mutex);
+ return;
+ }
+
+ /*
+ * Read each of the counters.
+ */
+ for (i = 0; i < nr_stats; i++) {
+ struct mv88e6xxx_hw_stat *s = stats + i;
+ u32 low;
+ u32 high;
+
+ mv88e6xxx_stats_read(ds, s->reg, &low);
+ if (s->sizeof_stat == 8)
+ mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+ else
+ high = 0;
+
+ data[i] = (((u64)high) << 32) | low;
+ }
+
+ mutex_unlock(&ps->stats_mutex);
+}
+
+static int __init mv88e6xxx_init(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+ register_switch_driver(&mv88e6131_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+ register_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+ return 0;
+}
+module_init(mv88e6xxx_init);
+
+static void __exit mv88e6xxx_cleanup(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+ unregister_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+ unregister_switch_driver(&mv88e6131_switch_driver);
+#endif
+}
+module_exit(mv88e6xxx_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
new file mode 100644
index 00000000000..fc2cd7b90e8
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -0,0 +1,98 @@
+/*
+ * net/dsa/mv88e6xxx.h - Marvell 88e6xxx switch chip support
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MV88E6XXX_H
+#define __MV88E6XXX_H
+
+#define REG_PORT(p) (0x10 + (p))
+#define REG_GLOBAL 0x1b
+#define REG_GLOBAL2 0x1c
+
+struct mv88e6xxx_priv_state {
+ /*
+ * When using multi-chip addressing, this mutex protects
+ * access to the indirect access registers. (In single-chip
+ * mode, this mutex is effectively useless.)
+ */
+ struct mutex smi_mutex;
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+ /*
+ * Handles automatic disabling and re-enabling of the PHY
+ * polling unit.
+ */
+ struct mutex ppu_mutex;
+ int ppu_disabled;
+ struct work_struct ppu_work;
+ struct timer_list ppu_timer;
+#endif
+
+ /*
+ * This mutex serialises access to the statistics unit.
+ * Hold this mutex over snapshot + dump sequences.
+ */
+ struct mutex stats_mutex;
+
+ int id; /* switch product id */
+};
+
+struct mv88e6xxx_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int reg;
+};
+
+int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
+int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
+int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
+ int reg, u16 val);
+int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
+int mv88e6xxx_config_prio(struct dsa_switch *ds);
+int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
+int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum);
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val);
+void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
+int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
+int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
+ int regnum, u16 val);
+void mv88e6xxx_poll_link(struct dsa_switch *ds);
+void mv88e6xxx_get_strings(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint8_t *data);
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint64_t *data);
+
+extern struct dsa_switch_driver mv88e6131_switch_driver;
+extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+
+#define REG_READ(addr, reg) \
+ ({ \
+ int __ret; \
+ \
+ __ret = mv88e6xxx_reg_read(ds, addr, reg); \
+ if (__ret < 0) \
+ return __ret; \
+ __ret; \
+ })
+
+#define REG_WRITE(addr, reg, val) \
+ ({ \
+ int __ret; \
+ \
+ __ret = mv88e6xxx_reg_write(ds, addr, reg, val); \
+ if (__ret < 0) \
+ return __ret; \
+ })
+
+
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index a7c5e8831e8..087648ea1ed 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -134,7 +134,7 @@ static void dummy_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
- dev->features |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 972f80ecc51..da410f03686 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -468,9 +468,10 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index b42c06baba8..8153a3e0a1a 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2929,15 +2929,17 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+ strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strcpy(info->bus_info, dev_name(vp->gendev));
+ strlcpy(info->bus_info, dev_name(vp->gendev),
+ sizeof(info->bus_info));
else
- sprintf(info->bus_info, "EISA 0x%lx %d",
- dev->base_addr, dev->irq);
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "EISA 0x%lx %d", dev->base_addr, dev->irq);
}
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 20ea07508ac..6d6bc754b1a 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -988,21 +988,23 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if(tp->card_state == Sleeping) {
- strcpy(info->fw_version, "Sleep image");
+ strlcpy(info->fw_version, "Sleep image",
+ sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strcpy(info->fw_version, "Unknown runtime");
+ strlcpy(info->fw_version, "Unknown runtime",
+ sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
- snprintf(info->fw_version, 32, "%02x.%03x.%03x",
- sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
- sleep_ver & 0xfff);
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%02x.%03x.%03x", sleep_ver >> 24,
+ (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
}
}
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 58a12e4c78f..ef325ffa1b5 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -14,8 +14,6 @@
#define TX_PAGES 12 /* Two Tx slots */
-#define ETHER_ADDR_LEN 6
-
/* The 8390 specific per-packet-header format. */
struct e8390_pkt_hdr {
unsigned char status; /* status */
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 547737340cb..3ad5d2f9a49 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -318,7 +318,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e9f8432f55b..9e8ba4f5636 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -735,15 +735,14 @@ static int ax_init_dev(struct net_device *dev)
if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ei_local->mem + E8390_CMD); /* 0x61 */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] =
ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
}
if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr,
- ETHER_ADDR_LEN);
+ memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
ax_reset_8390(dev);
@@ -991,18 +990,7 @@ static struct platform_driver axdrv = {
.resume = ax_resume,
};
-static int __init axdrv_init(void)
-{
- return platform_driver_register(&axdrv);
-}
-
-static void __exit axdrv_exit(void)
-{
- platform_driver_unregister(&axdrv);
-}
-
-module_init(axdrv_init);
-module_exit(axdrv_exit);
+module_platform_driver(axdrv);
MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
index 7a09575ecff..6428f9e7a55 100644
--- a/drivers/net/ethernet/8390/es3210.c
+++ b/drivers/net/ethernet/8390/es3210.c
@@ -195,7 +195,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- for (i = 0; i < ETHER_ADDR_LEN ; i++)
+ for (i = 0; i < ETH_ALEN ; i++)
dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
/* Check the Racal vendor ID as well. */
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
index eeac843dcd2..d42938b6b59 100644
--- a/drivers/net/ethernet/8390/hp-plus.c
+++ b/drivers/net/ethernet/8390/hp-plus.c
@@ -202,7 +202,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
/* Retrieve and checksum the station address. */
outw(MAC_Page, ioaddr + HP_PAGING);
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ for(i = 0; i < ETH_ALEN; i++) {
unsigned char inval = inb(ioaddr + 8 + i);
dev->dev_addr[i] = inval;
checksum += inval;
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
index 18564d4a7c0..113f1e075a2 100644
--- a/drivers/net/ethernet/8390/hp.c
+++ b/drivers/net/ethernet/8390/hp.c
@@ -156,7 +156,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for(i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + i);
printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 3dac937a67c..5370c884620 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -129,7 +129,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
if (!dev)
return -ENOMEM;
- for(j = 0; j < ETHER_ADDR_LEN; j++)
+ for (j = 0; j < ETH_ALEN; j++)
dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
/* We must set the 8390 for word mode. */
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
index f9888d20177..69490ae018e 100644
--- a/drivers/net/ethernet/8390/lne390.c
+++ b/drivers/net/ethernet/8390/lne390.c
@@ -191,14 +191,14 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
|| inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
|| inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
printk("lne390.c: card not found");
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
printk(" (invalid prefix).\n");
return -ENODEV;
}
#endif
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
0xa+revision, ioaddr/0x1000, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
index cd36a6a5f40..9b9c77d5a65 100644
--- a/drivers/net/ethernet/8390/ne-h8300.c
+++ b/drivers/net/ethernet/8390/ne-h8300.c
@@ -312,7 +312,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 1063093b3af..f92ea2a65a5 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -503,12 +503,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
#ifdef CONFIG_PLAT_MAPPI
outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ioaddr + E8390_CMD); /* 0x61 */
- for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i]
= inb_p(ioaddr + EN1_PHYS_SHIFT(i));
}
#else
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i];
}
#endif
diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c
index 70cdc699634..922b32036c6 100644
--- a/drivers/net/ethernet/8390/ne2.c
+++ b/drivers/net/ethernet/8390/ne2.c
@@ -460,7 +460,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
dev->base_addr = base_addr;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 39923425ba2..3fab04a0034 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -639,9 +639,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
struct ei_device *ei = netdev_priv(dev);
struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ne2k_pci_ethtool_ops = {
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index 243ed2aee88..2a3e8057fea 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -125,7 +125,7 @@ static int __init ne3210_eisa_probe (struct device *device)
#endif
port_index = inb(ioaddr + NE3210_CFG2) >> 6;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
edev->slot, ifmap[port_index], dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index d85f0a84bc7..3b903759980 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -114,7 +114,7 @@ static int __init stnic_probe(void)
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
#endif
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = stnic_eadr[i];
/* Set the base address to point to the NIC, not the "real" base! */
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 3aa9fe9999b..bcd27323b20 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -365,7 +365,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
if (i)
return i;
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 597f4d45c63..3474a61d470 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -28,6 +28,7 @@ source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index be5dde04026..cd6d69a6a7d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ATMEL) += cadence/
obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 6d9f6911000..cb4f38a17f2 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops;
#ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -617,9 +617,11 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
+
+ return 0;
}
-static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -629,6 +631,8 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
+
+ return 0;
}
#endif /* VLAN_SUPPORT */
@@ -1842,9 +1846,9 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 442fefa4f2c..c885aa905de 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1623,18 +1623,7 @@ static struct platform_driver greth_of_driver = {
.remove = __devexit_p(greth_of_remove),
};
-static int __init greth_init(void)
-{
- return platform_driver_register(&greth_of_driver);
-}
-
-static void __exit greth_cleanup(void)
-{
- platform_driver_unregister(&greth_of_driver);
-}
-
-module_init(greth_init);
-module_exit(greth_cleanup);
+module_platform_driver(greth_of_driver);
MODULE_AUTHOR("Aeroflex Gaisler AB.");
MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a9745f4ddbf..33e0a8c20f6 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev)
writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
/* Setting the MAC address to the device */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], mmio + PADR + i );
/* Enable interrupt coalesce */
@@ -1412,10 +1412,11 @@ static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strcpy (info->driver, MODULE_NAME);
- strcpy (info->version, MODULE_VERS);
- sprintf(info->fw_version,"%u",chip_version);
- strcpy (info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, MODULE_VERS, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u", chip_version);
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1549,7 +1550,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], lp->mmio + PADR + i );
spin_unlock_irq(&lp->lock);
@@ -1885,7 +1886,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
}
/* Initializing MAC address */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = readb(lp->mmio + PADR + i);
/* Setting user defined parametrs */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 2ff2e7a12dd..8baa3527ba7 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -586,7 +586,6 @@ typedef enum {
#define PKT_BUFF_SZ 1536
#define MIN_PKT_LEN 60
-#define ETH_ADDR_LEN 6
#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
@@ -808,8 +807,8 @@ typedef enum {
static int card_idx;
static int speed_duplex[MAX_UNITS] = { 0, };
-static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
-static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
static unsigned int chip_version;
#endif /* _AMD8111E_H */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 4865ff14beb..cc9262be69c 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1339,18 +1339,7 @@ static struct platform_driver au1000_eth_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:au1000-eth");
-
-
-static int __init au1000_init_module(void)
-{
- return platform_driver_register(&au1000_eth_driver);
-}
-static void __exit au1000_exit_module(void)
-{
- platform_driver_unregister(&au1000_eth_driver);
-}
+module_platform_driver(au1000_eth_driver);
-module_init(au1000_init_module);
-module_exit(au1000_exit_module);
+MODULE_ALIAS("platform:au1000-eth");
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 3accd5d21b0..6be0dd67631 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -160,8 +160,6 @@ Include Files
Defines
---------------------------------------------------------------------------- */
-#define ETHER_ADDR_LEN ETH_ALEN
- /* 6 bytes in an Ethernet Address */
#define MACE_LADRF_LEN 8
/* 8 bytes in Logical Address Filter */
@@ -600,7 +598,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
}
}
/* Set PADR register */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
/* MAC Configuration Control Register should be written last */
@@ -639,11 +637,11 @@ static int nmclan_config(struct pcmcia_device *link)
/* Read the ethernet address from the CIS. */
len = pcmcia_get_tuple(link, 0x80, &buf);
- if (!buf || len < ETHER_ADDR_LEN) {
+ if (!buf || len < ETH_ALEN) {
kfree(buf);
goto failed;
}
- memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
+ memcpy(dev->dev_addr, buf, ETH_ALEN);
kfree(buf);
/* Verify configuration by reading the MACE ID. */
@@ -822,9 +820,10 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1420,7 +1419,7 @@ Output
static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
- int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+ int adr[ETH_ALEN] = {0}; /* Ethernet address */
struct netdev_hw_addr *ha;
#ifdef PCMCIA_DEBUG
@@ -1442,7 +1441,7 @@ static void set_multicast_list(struct net_device *dev)
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
netdev_for_each_mc_addr(ha, dev) {
- memcpy(adr, ha->addr, ETHER_ADDR_LEN);
+ memcpy(adr, ha->addr, ETH_ALEN);
BuildLAF(lp->multicast_ladrf, adr);
}
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f92bc6e3482..20e6dab0186 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -711,12 +711,14 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (lp->pci_dev)
- strcpy(info->bus_info, pci_name(lp->pci_dev));
+ strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ sizeof(info->bus_info));
else
- sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "VLB 0x%lx", dev->base_addr);
}
static u32 pcnet32_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 8fda457f94c..7ea16d32a5f 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1540,17 +1540,4 @@ static struct platform_driver sunlance_sbus_driver = {
.remove = __devexit_p(sunlance_sbus_remove),
};
-
-/* Find all the lance cards on the system and initialize them */
-static int __init sparc_lance_init(void)
-{
- return platform_driver_register(&sunlance_sbus_driver);
-}
-
-static void __exit sparc_lance_exit(void)
-{
- platform_driver_unregister(&sunlance_sbus_driver);
-}
-
-module_init(sparc_lance_init);
-module_exit(sparc_lance_exit);
+module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 7be884d0aaf..0a9326aa58b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -232,7 +232,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, atl1c_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 02c7ed8d9ec..b8591246eb4 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -411,7 +411,7 @@ static void atl1c_set_multi(struct net_device *netdev)
}
}
-static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -422,7 +422,8 @@ static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
}
}
-static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1c_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
@@ -482,7 +483,8 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
}
-static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -499,9 +501,10 @@ static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl1c_set_features(struct net_device *netdev, u32 features)
+static int atl1c_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1c_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6269438d365..6e61f9f9ebb 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -310,10 +310,12 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl1e_driver_name, 32);
- strncpy(drvinfo->version, atl1e_driver_version, 32);
- strncpy(drvinfo->fw_version, "L1e", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl1e_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl1e_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 95483bcac1d..c915c087381 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,7 +313,7 @@ static void atl1e_set_multi(struct net_device *netdev)
}
}
-static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -324,7 +324,8 @@ static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
}
}
-static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1e_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
u32 mac_ctrl_data = 0;
@@ -370,7 +371,8 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
return 0;
}
-static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1e_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -384,9 +386,10 @@ static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl1e_set_features(struct net_device *netdev, u32 features)
+static int atl1e_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1e_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 33a4e35f5ee..9bd20497664 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3365,7 +3365,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->eedump_len = ATL1_EEDUMP_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1feae5928a4..071f4c85896 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -361,7 +361,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -372,7 +372,8 @@ static void __atl2_vlan_mode(u32 features, u32 *ctrl)
}
}
-static void atl2_vlan_mode(struct net_device *netdev, u32 features)
+static void atl2_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
u32 ctrl;
@@ -391,7 +392,8 @@ static void atl2_restore_vlan(struct atl2_adapter *adapter)
atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl2_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -405,9 +407,10 @@ static u32 atl2_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl2_set_features(struct net_device *netdev, u32 features)
+static int atl2_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl2_vlan_mode(netdev, features);
@@ -2049,10 +2052,12 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl2_driver_name, 32);
- strncpy(drvinfo->version, atl2_driver_version, 32);
- strncpy(drvinfo->fw_version, "L2", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl2_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl2_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index aabcf4b5745..8ff7411094d 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -211,7 +211,7 @@ static void atlx_link_chg_task(struct work_struct *work)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -222,7 +222,8 @@ static void __atlx_vlan_mode(u32 features, u32 *ctrl)
}
}
-static void atlx_vlan_mode(struct net_device *netdev, u32 features)
+static void atlx_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
@@ -242,7 +243,8 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter)
atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atlx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -256,9 +258,10 @@ static u32 atlx_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atlx_set_features(struct net_device *netdev, u32 features)
+static int atlx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atlx_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4cf835dbc12..3fb66d09ece 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
bp->tx_cons = cons;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 965c7235804..021fb818007 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -57,11 +57,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.1.11"
-#define DRV_MODULE_RELDATE "July 20, 2011"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
+#define DRV_MODULE_VERSION "2.2.1"
+#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
@@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
mutex_lock(&bp->cnic_lock);
cp->drv_state = 0;
bnapi->cnic_present = 0;
- rcu_assign_pointer(bp->cnic_ops, NULL);
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_lock);
synchronize_rcu();
return 0;
@@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock)
if (bp->autoneg & AUTONEG_SPEED) {
u32 adv_reg, adv1000_reg;
- u32 new_adv_reg = 0;
- u32 new_adv1000_reg = 0;
+ u32 new_adv = 0;
+ u32 new_adv1000 = 0;
bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
@@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock)
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
- if (bp->advertising & ADVERTISED_10baseT_Half)
- new_adv_reg |= ADVERTISE_10HALF;
- if (bp->advertising & ADVERTISED_10baseT_Full)
- new_adv_reg |= ADVERTISE_10FULL;
- if (bp->advertising & ADVERTISED_100baseT_Half)
- new_adv_reg |= ADVERTISE_100HALF;
- if (bp->advertising & ADVERTISED_100baseT_Full)
- new_adv_reg |= ADVERTISE_100FULL;
- if (bp->advertising & ADVERTISED_1000baseT_Full)
- new_adv1000_reg |= ADVERTISE_1000FULL;
+ new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
+ new_adv |= ADVERTISE_CSMA;
+ new_adv |= bnx2_phy_get_pause_adv(bp);
- new_adv_reg |= ADVERTISE_CSMA;
+ new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
- new_adv_reg |= bnx2_phy_get_pause_adv(bp);
-
- if ((adv1000_reg != new_adv1000_reg) ||
- (adv_reg != new_adv_reg) ||
+ if ((adv1000_reg != new_adv1000) ||
+ (adv_reg != new_adv) ||
((bmcr & BMCR_ANENABLE) == 0)) {
- bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
- bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+ bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
BMCR_ANENABLE);
}
@@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
- struct sk_buff *skb;
+ u8 *data;
struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
dma_addr_t mapping;
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
- unsigned long align;
- skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
- if (skb == NULL) {
+ data = kmalloc(bp->rx_buf_size, gfp);
+ if (!data)
return -ENOMEM;
- }
- if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
- skb_reserve(skb, BNX2_RX_ALIGN - align);
-
- mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+ mapping = dma_map_single(&bp->pdev->dev,
+ get_l2_fhdr(data),
+ bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- rx_buf->skb = skb;
- rx_buf->desc = (struct l2_fhdr *) skb->data;
+ rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2823,6 +2810,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
u16 hw_cons, sw_cons, sw_ring_cons;
int tx_pkt = 0, index;
+ unsigned int tx_bytes = 0;
struct netdev_queue *txq;
index = (bnapi - bp->bnx2_napi);
@@ -2877,6 +2865,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = NEXT_TX_BD(sw_cons);
+ tx_bytes += skb->len;
dev_kfree_skb(skb);
tx_pkt++;
if (tx_pkt == budget)
@@ -2886,6 +2875,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
+ netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
txr->hw_tx_cons = hw_cons;
txr->tx_cons = sw_cons;
@@ -2965,8 +2955,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
}
static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
- struct sk_buff *skb, u16 cons, u16 prod)
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+ u8 *data, u16 cons, u16 prod)
{
struct sw_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *cons_bd, *prod_bd;
@@ -2980,8 +2970,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
rxr->rx_prod_bseq += bp->rx_buf_use_size;
- prod_rx_buf->skb = skb;
- prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+ prod_rx_buf->data = data;
if (cons == prod)
return;
@@ -2995,33 +2984,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
-static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
u32 ring_idx)
{
int err;
u16 prod = ring_idx & 0xffff;
+ struct sk_buff *skb;
- err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+ err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
- bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+ bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
if (hdr_len) {
unsigned int raw_len = len + 4;
int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
}
- return err;
+ return NULL;
}
- skb_reserve(skb, BNX2_RX_OFFSET);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
-
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto error;
+ }
+ skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
if (hdr_len == 0) {
skb_put(skb, len);
- return 0;
+ return skb;
} else {
unsigned int i, frag_len, frag_size, pages;
struct sw_pg *rx_pg;
@@ -3052,7 +3047,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
skb_frag_size_sub(frag, tail);
skb->data_len -= tail;
}
- return 0;
+ return skb;
}
rx_pg = &rxr->rx_pg_ring[pg_cons];
@@ -3074,7 +3069,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rxr->rx_pg_prod = pg_prod;
bnx2_reuse_rx_skb_pages(bp, rxr, skb,
pages - i);
- return err;
+ return NULL;
}
dma_unmap_page(&bp->pdev->dev, mapping_old,
@@ -3091,7 +3086,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rxr->rx_pg_prod = pg_prod;
rxr->rx_pg_cons = pg_cons;
}
- return 0;
+ return skb;
}
static inline u16
@@ -3130,19 +3125,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ u8 *data;
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
- skb = rx_buf->skb;
- prefetchw(skb);
-
- next_rx_buf =
- &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
- prefetch(next_rx_buf->desc);
+ data = rx_buf->data;
+ rx_buf->data = NULL;
- rx_buf->skb = NULL;
+ rx_hdr = get_l2_fhdr(data);
+ prefetch(rx_hdr);
dma_addr = dma_unmap_addr(rx_buf, mapping);
@@ -3150,7 +3143,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- rx_hdr = rx_buf->desc;
+ next_rx_buf =
+ &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(get_l2_fhdr(next_rx_buf->data));
+
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
@@ -3169,7 +3165,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME))) {
- bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
if (pg_ring_used) {
int pages;
@@ -3184,30 +3180,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
len -= 4;
if (len <= bp->rx_copy_thresh) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev, len + 6);
- if (new_skb == NULL) {
- bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ skb = netdev_alloc_skb(bp->dev, len + 6);
+ if (skb == NULL) {
+ bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
/* aligned copy */
- skb_copy_from_linear_data_offset(skb,
- BNX2_RX_OFFSET - 6,
- new_skb->data, len + 6);
- skb_reserve(new_skb, 6);
- skb_put(new_skb, len);
+ memcpy(skb->data,
+ (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+ len + 6);
+ skb_reserve(skb, 6);
+ skb_put(skb, len);
- bnx2_reuse_rx_skb(bp, rxr, skb,
+ bnx2_reuse_rx_data(bp, rxr, data,
sw_ring_cons, sw_ring_prod);
- skb = new_skb;
- } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
- dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
- goto next_rx;
-
+ } else {
+ skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+ (sw_ring_cons << 16) | sw_ring_prod);
+ if (!skb)
+ goto next_rx;
+ }
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
@@ -5234,7 +5229,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+ if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_num, i, bp->rx_ring_size);
break;
@@ -5329,7 +5324,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
- sizeof(struct skb_shared_info);
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
bp->rx_pg_ring_size = 0;
@@ -5351,8 +5346,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
}
bp->rx_buf_use_size = rx_size;
- /* hw alignment */
- bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+ /* hw alignment + build_skb() overhead*/
+ bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+ NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
@@ -5400,6 +5396,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
}
dev_kfree_skb(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
}
}
@@ -5418,9 +5415,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_ring_idx; j++) {
struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
- struct sk_buff *skb = rx_buf->skb;
+ u8 *data = rx_buf->data;
- if (skb == NULL)
+ if (data == NULL)
continue;
dma_unmap_single(&bp->pdev->dev,
@@ -5428,9 +5425,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
- rx_buf->skb = NULL;
+ rx_buf->data = NULL;
- dev_kfree_skb(skb);
+ kfree(data);
}
for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
bnx2_free_rx_page(bp, rxr, j);
@@ -5736,7 +5733,8 @@ static int
bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
{
unsigned int pkt_size, num_pkts, i;
- struct sk_buff *skb, *rx_skb;
+ struct sk_buff *skb;
+ u8 *data;
unsigned char *packet;
u16 rx_start_idx, rx_idx;
dma_addr_t map;
@@ -5828,14 +5826,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
}
rx_buf = &rxr->rx_buf_ring[rx_start_idx];
- rx_skb = rx_buf->skb;
+ data = rx_buf->data;
- rx_hdr = rx_buf->desc;
- skb_reserve(rx_skb, BNX2_RX_OFFSET);
+ rx_hdr = get_l2_fhdr(data);
+ data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
if (rx_hdr->l2_fhdr_status &
(L2_FHDR_ERRORS_BAD_CRC |
@@ -5852,7 +5850,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
}
for (i = 14; i < pkt_size; i++) {
- if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+ if (*(data + i) != (unsigned char) (i & 0xff)) {
goto loopback_test_done;
}
}
@@ -6552,6 +6550,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
+ netdev_tx_sent_queue(txq, skb->len);
+
prod = NEXT_TX_BD(prod);
txr->tx_prod_bseq += skb->len;
@@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->bus_info, pci_name(bp->pdev));
- strcpy(info->fw_version, bp->fw_version);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
return 0;
}
-static u32
-bnx2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t
+bnx2_fix_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, u32 features)
}
static int
-bnx2_set_features(struct net_device *dev, u32 features)
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 99d31a7d6aa..1db2d51ba3f 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6563,12 +6563,25 @@ struct l2_fhdr {
#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID)
#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID)
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
struct sw_bd {
- struct sk_buff *skb;
- struct l2_fhdr *desc;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
+/* Its faster to compute this from data than storing it in sw_bd
+ * (less cache misses)
+ */
+static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
+{
+ return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD);
+}
+
+
struct sw_pg {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index aec7212ac98..8c73d34b2ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.70.30-0"
-#define DRV_MODULE_RELDATE "2011/10/25"
+#define DRV_MODULE_VERSION "1.70.35-0"
+#define DRV_MODULE_RELDATE "2011/11/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -293,8 +293,13 @@ enum {
#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
/* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
struct sw_rx_bd {
- struct sk_buff *skb;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -411,8 +416,7 @@ union db_prod {
/* Number of u64 elements in SGE mask array */
-#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
- BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
@@ -425,8 +429,8 @@ union host_hc_status_block {
struct bnx2x_agg_info {
/*
- * First aggregation buffer is an skb, the following - are pages.
- * We will preallocate the skbs for each aggregation when
+ * First aggregation buffer is a data buffer, the following - are pages.
+ * We will preallocate the data buffer for each aggregation when
* we open the interface and will replace the BD at the consumer
* with this one when we receive the TPA_START CQE in order to
* keep the Rx BD ring consistent.
@@ -440,6 +444,7 @@ struct bnx2x_agg_info {
u16 parsing_flags;
u16 vlan_tag;
u16 len_on_bd;
+ u32 rxhash;
};
#define Q_STATS_OFFSET32(stat_name) \
@@ -507,6 +512,7 @@ struct bnx2x_fastpath {
__le16 fp_hc_idx;
u8 index; /* number in fp array */
+ u8 rx_queue; /* index for skb_record */
u8 cl_id; /* eth client id */
u8 cl_qzone_id;
u8 fw_sb_id; /* status block number in FW */
@@ -881,6 +887,8 @@ struct bnx2x_common {
#define CHIP_PORT_MODE_NONE 0x2
#define CHIP_MODE(bp) (bp->common.chip_port_mode)
#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+ u32 boot_mode;
};
/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
@@ -1042,6 +1050,8 @@ struct bnx2x_slowpath {
u32 wb_comp;
u32 wb_data[4];
+
+ union drv_info_to_mcp drv_info_to_mcp;
};
#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -1122,18 +1132,21 @@ enum {
enum {
BNX2X_PORT_QUERY_IDX,
BNX2X_PF_QUERY_IDX,
+ BNX2X_FCOE_QUERY_IDX,
BNX2X_FIRST_QUEUE_QUERY_IDX,
};
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+ struct stats_query_entry query[FP_SB_MAX_E1x+
+ BNX2X_FIRST_QUEUE_QUERY_IDX];
};
struct bnx2x_fw_stats_data {
struct stats_counter storm_counters;
struct per_port_stats port;
struct per_pf_stats pf;
+ struct fcoe_statistics_params fcoe;
struct per_queue_stats queue_stats[1];
};
@@ -1141,6 +1154,7 @@ struct bnx2x_fw_stats_data {
enum {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
+ BNX2X_SP_RTNL_FAN_FAILURE,
};
@@ -1186,10 +1200,20 @@ struct bnx2x {
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
/* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
- L1_CACHE_SHIFT : 8)
- /* FW use 2 Cache lines Alignment for start packet and size */
-#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
+
+ /* FW uses 2 Cache lines Alignment for start packet and size
+ *
+ * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END \
+ max(1UL << BNX2X_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
struct host_sp_status_block *def_status_blk;
@@ -1249,6 +1273,7 @@ struct bnx2x {
#define NO_ISCSI_OOO_FLAG (1 << 13)
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
+#define BC_SUPPORTS_PFC_STATS (1 << 17)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1984,13 +2009,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
-#define RSS_FLAGS(bp) \
- (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
- (bp->multi_mode << \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
#define MULTI_MASK 0x7f
@@ -2055,6 +2073,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+int bnx2x_close(struct net_device *dev);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2072,4 +2092,16 @@ static const u32 dmae_reg_go_c[] = {
void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+
+#define BNX2X_MF_PROTOCOL(bp) \
+ ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#ifdef BCM_CNIC
+#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#endif
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 580b44edb06..2b731b25359 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -79,19 +79,21 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
* @to: destination FP index
*
* Makes sure the contents of the bp->fp[to].napi is kept
- * intact.
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
- struct napi_struct orig_napi = to_fp->napi;
+
+ /* Copy the NAPI object as it has been already initialized */
+ from_fp->napi = to_fp->napi;
+
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
-
- /* Restore the NAPI object as it has been already initialized */
- to_fp->napi = orig_napi;
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -100,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
* return idx of last bd freed
*/
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
- u16 idx)
+ u16 idx, unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
struct eth_tx_start_bd *tx_start_bd;
@@ -157,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* release skb */
WARN_ON(!skb);
+ if (skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ }
dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -168,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
{
struct netdev_queue *txq;
u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -187,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
" pkt_cons %u\n",
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
- bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+ &pkts_compl, &bytes_compl);
+
sw_cons++;
}
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
txdata->tx_pkt_cons = sw_cons;
txdata->tx_bd_cons = bd_cons;
@@ -292,8 +304,21 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
fp->last_max_sge, fp->rx_sge_prod);
}
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+ const struct eth_fast_path_rx_cqe *cqe)
+{
+ /* Set Toeplitz hash from CQE */
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ return le32_to_cpu(cqe->rss_hash_result);
+ return 0;
+}
+
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
- struct sk_buff *skb, u16 cons, u16 prod,
+ u16 cons, u16 prod,
struct eth_fast_path_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
@@ -308,9 +333,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
if (tpa_info->tpa_state != BNX2X_TPA_STOP)
BNX2X_ERR("start of bin not in stop [%d]\n", queue);
- /* Try to map an empty skb from the aggregation info */
+ /* Try to map an empty data buffer from the aggregation info */
mapping = dma_map_single(&bp->pdev->dev,
- first_buf->skb->data,
+ first_buf->data + NET_SKB_PAD,
fp->rx_buf_size, DMA_FROM_DEVICE);
/*
* ...if it fails - move the skb from the consumer to the producer
@@ -320,15 +345,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
/* Move the BD from the consumer to the producer */
- bnx2x_reuse_rx_skb(fp, cons, prod);
+ bnx2x_reuse_rx_data(fp, cons, prod);
tpa_info->tpa_state = BNX2X_TPA_ERROR;
return;
}
- /* move empty skb from pool to prod */
- prod_rx_buf->skb = first_buf->skb;
+ /* move empty data from pool to prod */
+ prod_rx_buf->data = first_buf->data;
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
- /* point prod_bd to new skb */
+ /* point prod_bd to new data */
prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -342,6 +367,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
@@ -469,11 +495,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
{
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
- u8 pad = tpa_info->placement_offset;
+ u32 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
- struct sk_buff *skb = rx_buf->skb;
+ struct sk_buff *skb = NULL;
+ u8 *data = rx_buf->data;
/* alloc new skb */
- struct sk_buff *new_skb;
+ u8 *new_data;
u8 old_tpa_state = tpa_info->tpa_state;
tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -484,18 +511,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (old_tpa_state == BNX2X_TPA_ERROR)
goto drop;
- /* Try to allocate the new skb */
- new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+ /* Try to allocate the new data */
+ new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
+ if (likely(new_data))
+ skb = build_skb(data);
- if (likely(new_skb)) {
- prefetch(skb);
- prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+ if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
@@ -507,8 +534,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
#endif
- skb_reserve(skb, pad);
+ skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
+ skb->rxhash = tpa_info->rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -524,8 +552,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
- /* put new skb in bin */
- rx_buf->skb = new_skb;
+ /* put new data in bin */
+ rx_buf->data = new_data;
return;
}
@@ -537,19 +565,6 @@ drop:
fp->eth_q_stats.rx_skb_alloc_failed++;
}
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
- struct sk_buff *skb)
-{
- /* Set Toeplitz hash from CQE */
- if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->fast_path_cqe.status_flags &
- ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
- skb->rxhash =
- le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
@@ -592,6 +607,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad;
+ u8 *data;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -602,13 +618,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
- /* Prefetch the page containing the BD descriptor
- at producer's index. It will be needed when new skb is
- allocated */
- prefetch((void *)(PAGE_ALIGN((unsigned long)
- (&fp->rx_desc_ring[bd_prod])) -
- PAGE_SIZE + 1));
-
cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe_fp = &cqe->fast_path_cqe;
cqe_fp_flags = cqe_fp->type_error_flags;
@@ -624,138 +633,123 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
+ }
+ rx_buf = &fp->rx_buf_ring[bd_cons];
+ data = rx_buf->data;
- /* this is an rx packet */
- } else {
- rx_buf = &fp->rx_buf_ring[bd_cons];
- skb = rx_buf->skb;
- prefetch(skb);
-
- if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ if (!CQE_TYPE_FAST(cqe_fp_type)) {
#ifdef BNX2X_STOP_ON_ERROR
- /* sanity check */
- if (fp->disable_tpa &&
- (CQE_TYPE_START(cqe_fp_type) ||
- CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while "
- "disable_tpa type %x\n",
- CQE_TYPE(cqe_fp_type));
+ /* sanity check */
+ if (fp->disable_tpa &&
+ (CQE_TYPE_START(cqe_fp_type) ||
+ CQE_TYPE_STOP(cqe_fp_type)))
+ BNX2X_ERR("START/STOP packet while "
+ "disable_tpa type %x\n",
+ CQE_TYPE(cqe_fp_type));
#endif
- if (CQE_TYPE_START(cqe_fp_type)) {
- u16 queue = cqe_fp->queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_start on queue %d\n",
- queue);
+ if (CQE_TYPE_START(cqe_fp_type)) {
+ u16 queue = cqe_fp->queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_start on queue %d\n",
+ queue);
- bnx2x_tpa_start(fp, queue, skb,
- bd_cons, bd_prod,
- cqe_fp);
-
- /* Set Toeplitz hash for LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
-
- goto next_rx;
-
- } else {
- u16 queue =
- cqe->end_agg_cqe.queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
-
- bnx2x_tpa_stop(bp, fp, queue,
- &cqe->end_agg_cqe,
- comp_ring_cons);
+ bnx2x_tpa_start(fp, queue,
+ bd_cons, bd_prod,
+ cqe_fp);
+ goto next_rx;
+ } else {
+ u16 queue =
+ cqe->end_agg_cqe.queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ bnx2x_tpa_stop(bp, fp, queue,
+ &cqe->end_agg_cqe,
+ comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
+ if (bp->panic)
+ return 0;
#endif
- bnx2x_update_sge_prod(fp, cqe_fp);
- goto next_cqe;
- }
+ bnx2x_update_sge_prod(fp, cqe_fp);
+ goto next_cqe;
}
- /* non TPA */
- len = le16_to_cpu(cqe_fp->pkt_len);
- pad = cqe_fp->placement_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev,
+ }
+ /* non TPA */
+ len = le16_to_cpu(cqe_fp->pkt_len);
+ pad = cqe_fp->placement_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- DMA_FROM_DEVICE);
- prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
+ pad += NET_SKB_PAD;
+ prefetch(data + pad); /* speedup eth_type_trans() */
+ /* is this an error packet? */
+ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR flags %x rx packet %u\n",
+ cqe_fp_flags, sw_comp_cons);
+ fp->eth_q_stats.rx_err_discard_pkt++;
+ goto reuse_rx;
+ }
- /* is this an error packet? */
- if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ /* Since we don't have a jumbo ring
+ * copy small packets if mtu > 1500
+ */
+ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+ (len <= RX_COPY_THRESH)) {
+ skb = netdev_alloc_skb_ip_align(bp->dev, len);
+ if (skb == NULL) {
DP(NETIF_MSG_RX_ERR,
- "ERROR flags %x rx packet %u\n",
- cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ "ERROR packet dropped because of alloc failure\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
goto reuse_rx;
}
-
- /* Since we don't have a jumbo ring
- * copy small packets if mtu > 1500
- */
- if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
- (len <= RX_COPY_THRESH)) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev, len + pad);
- if (new_skb == NULL) {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped "
- "because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
- goto reuse_rx;
- }
-
- /* aligned copy */
- skb_copy_from_linear_data_offset(skb, pad,
- new_skb->data + pad, len);
- skb_reserve(new_skb, pad);
- skb_put(new_skb, len);
-
- bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
-
- skb = new_skb;
-
- } else
- if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+ memcpy(skb->data, data + pad, len);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+ } else {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
dma_unmap_single(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
DMA_FROM_DEVICE);
+ skb = build_skb(data);
+ if (unlikely(!skb)) {
+ kfree(data);
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ goto next_rx;
+ }
skb_reserve(skb, pad);
- skb_put(skb, len);
-
} else {
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped because "
"of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
reuse_rx:
- bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
}
+ }
- skb->protocol = eth_type_trans(skb, bp->dev);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, bp->dev);
- /* Set Toeplitz hash for a none-LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
+ /* Set Toeplitz hash for a none-LRO skb */
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
- skb_checksum_none_assert(skb);
+ skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
- if (likely(BNX2X_RX_CSUM_OK(cqe)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- fp->eth_q_stats.hw_csum_err++;
- }
+ if (likely(BNX2X_RX_CSUM_OK(cqe)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ fp->eth_q_stats.hw_csum_err++;
}
- skb_record_rx_queue(skb, fp->index);
+ skb_record_rx_queue(skb, fp->rx_queue);
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN)
@@ -765,7 +759,7 @@ reuse_rx:
next_rx:
- rx_buf->skb = NULL;
+ rx_buf->data = NULL;
bd_cons = NEXT_RX_IDX(bd_cons);
bd_prod = NEXT_RX_IDX(bd_prod);
@@ -1011,9 +1005,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->skb = netdev_alloc_skb(bp->dev,
- fp->rx_buf_size);
- if (!first_buf->skb) {
+ first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
+ GFP_ATOMIC);
+ if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
"disabling TPA on this "
@@ -1093,16 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ unsigned pkts_compl = 0, bytes_compl = 0;
- u16 bd_cons = txdata->tx_bd_cons;
u16 sw_prod = txdata->tx_pkt_prod;
u16 sw_cons = txdata->tx_pkt_cons;
while (sw_cons != sw_prod) {
- bd_cons = bnx2x_free_tx_pkt(bp, txdata,
- TX_BD(sw_cons));
+ bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+ &pkts_compl, &bytes_compl);
sw_cons++;
}
+ netdev_tx_reset_queue(
+ netdev_get_tx_queue(bp->dev, txdata->txq_index));
}
}
}
@@ -1118,16 +1114,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
for (i = 0; i < NUM_RX_BD; i++) {
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
- struct sk_buff *skb = rx_buf->skb;
+ u8 *data = rx_buf->data;
- if (skb == NULL)
+ if (data == NULL)
continue;
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- rx_buf->skb = NULL;
- dev_kfree_skb(skb);
+ rx_buf->data = NULL;
+ kfree(data);
}
}
@@ -1445,6 +1441,11 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
break;
}
+#ifdef BCM_CNIC
+ /* override in ISCSI SD mod */
+ if (IS_MF_ISCSI_SD(bp))
+ bp->num_queues = 1;
+#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
}
@@ -1509,6 +1510,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ u32 mtu;
/* Always use a mini-jumbo MTU for the FCoE L2 ring */
if (IS_FCOE_IDX(i))
@@ -1518,13 +1520,15 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
* IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
* overrun attack.
*/
- fp->rx_buf_size =
- BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
- BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
else
- fp->rx_buf_size =
- bp->dev->mtu + ETH_OVREHEAD +
- BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ mtu = bp->dev->mtu;
+ fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+ IP_HEADER_ALIGNMENT_PADDING +
+ ETH_OVREHEAD +
+ mtu +
+ BNX2X_FW_RX_ALIGN_END;
+ /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
}
}
@@ -1541,7 +1545,8 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
for (i = 0; i < sizeof(ind_table); i++)
ind_table[i] =
- bp->fp->cl_id + (i % num_eth_queues);
+ bp->fp->cl_id +
+ ethtool_rxfh_indir_default(i, num_eth_queues);
}
/*
@@ -1929,13 +1934,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
}
- if (!bp->port.pmf)
+ if (bp->port.pmf)
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+ else
bnx2x__link_status_update(bp);
/* start the timer */
mod_timer(&bp->timer, jiffies + bp->current_interval);
#ifdef BCM_CNIC
+ /* re-read iscsi info */
+ bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
@@ -2799,6 +2808,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
"dropping packet...\n");
@@ -2810,7 +2820,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
first_bd->nbd = cpu_to_le16(nbd);
bnx2x_free_tx_pkt(bp, txdata,
- TX_BD(txdata->tx_pkt_prod));
+ TX_BD(txdata->tx_pkt_prod),
+ &pkts_compl, &bytes_compl);
return NETDEV_TX_OK;
}
@@ -2871,6 +2882,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
+ netdev_tx_sent_queue(txq, skb->len);
+
txdata->tx_pkt_prod++;
/*
* Make sure that the BD data is updated before updating the producer
@@ -2981,9 +2994,14 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
- if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+ if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
return -EINVAL;
+#ifdef BCM_CNIC
+ if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+ return -EINVAL;
+#endif
+
if (netif_running(dev)) {
rc = bnx2x_set_eth_mac(bp, false);
if (rc)
@@ -3098,7 +3116,12 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
u8 cos;
int rx_ring_size = 0;
- /* if rx_ring_size specified - use it */
+#ifdef BCM_CNIC
+ if (IS_MF_ISCSI_SD(bp)) {
+ rx_ring_size = MIN_RX_SIZE_NONTPA;
+ bp->rx_ring_size = rx_ring_size;
+ } else
+#endif
if (!bp->rx_ring_size) {
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
@@ -3108,7 +3131,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
MIN_RX_SIZE_TPA, rx_ring_size);
bp->rx_ring_size = rx_ring_size;
- } else
+ } else /* if rx_ring_size specified - use it */
rx_ring_size = bp->rx_ring_size;
/* Common */
@@ -3278,14 +3301,14 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+ fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
bp->fp = fp;
/* msix table */
- tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+ tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
goto alloc_err;
bp->msix_table = tbl;
@@ -3409,7 +3432,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
return bnx2x_reload_if_running(dev);
}
-u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -3420,7 +3444,7 @@ u32 bnx2x_fix_features(struct net_device *dev, u32 features)
return features;
}
-int bnx2x_set_features(struct net_device *dev, u32 features)
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
u32 flags = bp->flags;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 283d663da18..bf27c54ff2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include "bnx2x.h"
@@ -533,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
*/
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
#endif
-u32 bnx2x_fix_features(struct net_device *dev, u32 features);
-int bnx2x_set_features(struct net_device *dev, u32 features);
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
/**
* bnx2x_tx_timeout - tx timeout netdev callback
@@ -874,8 +876,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
- memset(fp->sge_mask, 0xff,
- (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the "next" element,
@@ -911,26 +912,27 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
return 0;
}
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 index)
{
- struct sk_buff *skb;
+ u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
- if (unlikely(skb == NULL))
+ data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ if (unlikely(data == NULL))
return -ENOMEM;
- mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+ mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+ fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- dev_kfree_skb_any(skb);
+ kfree(data);
return -ENOMEM;
}
- rx_buf->skb = skb;
+ rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -939,12 +941,12 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
return 0;
}
-/* note that we are not allocating a new skb,
+/* note that we are not allocating a new buffer,
* we are just moving one from cons to prod
* we are not creating a new mapping,
* so there is no need to check for dma_mapping_error().
*/
-static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
u16 cons, u16 prod)
{
struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -954,7 +956,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
dma_unmap_addr_set(prod_rx_buf, mapping,
dma_unmap_addr(cons_rx_buf, mapping));
- prod_rx_buf->skb = cons_rx_buf->skb;
+ prod_rx_buf->data = cons_rx_buf->data;
*prod_bd = *cons_bd;
}
@@ -1030,9 +1032,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
for (i = 0; i < last; i++) {
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
struct sw_rx_bd *first_buf = &tpa_info->first_buf;
- struct sk_buff *skb = first_buf->skb;
+ u8 *data = first_buf->data;
- if (skb == NULL) {
+ if (data == NULL) {
DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
continue;
}
@@ -1040,8 +1042,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(first_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- first_buf->skb = NULL;
+ kfree(data);
+ first_buf->data = NULL;
}
}
@@ -1149,7 +1151,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
fp->eth_q_stats.rx_skb_alloc_failed++;
continue;
}
@@ -1318,6 +1320,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
unsigned long q_type = 0;
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
@@ -1488,4 +1491,77 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
return max_cfg;
}
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp: driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp: driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
+ continue;
+
+ func = func_by_vn(bp, vn);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp: driver handle
+ * @flags: flags to update
+ * @set: set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+ if (SHMEM2_HAS(bp, drv_flags)) {
+ u32 drv_flags;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(bp, drv_flags);
+
+ if (set)
+ SET_FLAGS(drv_flags, flags);
+ else
+ RESET_FLAGS(drv_flags, flags);
+
+ SHMEM2_WR(bp, drv_flags, drv_flags);
+ DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ }
+}
+
+static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
+{
+ if (is_valid_ether_addr(addr))
+ return true;
+#ifdef BCM_CNIC
+ if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+ return true;
+#endif
+ return false;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 51bd7485ab1..5051cf3deb2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -685,24 +685,6 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
}
#endif
-static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
-{
- if (SHMEM2_HAS(bp, drv_flags)) {
- u32 drv_flags;
- bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
- drv_flags = SHMEM2_RD(bp, drv_flags);
-
- if (set)
- SET_FLAGS(drv_flags, flags);
- else
- RESET_FLAGS(drv_flags, flags);
-
- SHMEM2_WR(bp, drv_flags, drv_flags);
- DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
- bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
- }
-}
-
static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
{
u8 prio, cos;
@@ -755,18 +737,26 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
/* mark DCBX result for PMF migration */
bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
#ifdef BCM_DCBNL
- /**
+ /*
* Add new app tlvs to dcbnl
*/
bnx2x_dcbnl_update_applist(bp, false);
#endif
- bnx2x_dcbx_stop_hw_tx(bp);
-
- /* reconfigure the netdevice with the results of the new
+ /*
+ * reconfigure the netdevice with the results of the new
* dcbx negotiation.
*/
bnx2x_dcbx_update_tc_mapping(bp);
+ /*
+ * allow other funtions to update their netdevices
+ * accordingly
+ */
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+
+ bnx2x_dcbx_stop_hw_tx(bp);
+
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -775,6 +765,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_dcbx_update_ets_params(bp);
bnx2x_dcbx_resume_hw_tx(bp);
+
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -883,7 +874,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
/*For IEEE admin_recommendation_bw_precentage
*For IEEE admin_recommendation_ets_pg */
af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
if (dp->admin_priority_app_table[i].valid) {
struct bnx2x_admin_priority_app_table *table =
dp->admin_priority_app_table;
@@ -923,7 +914,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
{
- if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
bp->dcb_state = dcb_on;
bp->dcbx_enabled = dcbx_enabled;
} else {
@@ -1863,7 +1854,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
{
/* if we need to syncronize DCBX result from prev PMF
- * read it from shmem and update bp accordingly
+ * read it from shmem and update bp and netdev accordingly
*/
if (SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
@@ -1875,6 +1866,22 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
bp->dcbx_error);
bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
bp->dcbx_error);
+#ifdef BCM_DCBNL
+ /*
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+ /*
+ * reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+
}
}
@@ -2242,7 +2249,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
int i, ff;
/* iterate over the app entries looking for idtype and idval */
- for (i = 0, ff = -1; i < 4; i++) {
+ for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
struct bnx2x_admin_priority_app_table *app_ent =
&bp->dcbx_config_params.admin_priority_app_table[i];
if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
@@ -2251,7 +2258,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
if (ff < 0 && !app_ent->valid)
ff = i;
}
- if (i < 4)
+ if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
/* if found overwrite up */
bp->dcbx_config_params.
admin_priority_app_table[i].priority = up;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2c6a3bca6f2..2ab9254e2d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -90,6 +90,7 @@ struct bnx2x_admin_priority_app_table {
u32 app_id;
};
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
struct bnx2x_config_dcbx_params {
u32 overwrite_settings;
u32 admin_dcbx_version;
@@ -109,7 +110,8 @@ struct bnx2x_config_dcbx_params {
u32 admin_recommendation_bw_precentage[8];
u32 admin_recommendation_ets_pg[8];
u32 admin_pfc_bitmap;
- struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+ struct bnx2x_admin_priority_app_table
+ admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
u32 admin_default_priority;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index f0ca8b27a55..a688b9d975a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -107,6 +107,10 @@ static const struct {
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
{ STATS_OFFSET32(mf_tag_discard),
4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+ { STATS_OFFSET32(pfc_frames_received_hi),
+ 8, STATS_FLAGS_PORT, "pfc_frames_received" },
+ { STATS_OFFSET32(pfc_frames_sent_hi),
+ 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
@@ -352,7 +356,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(NETIF_MSG_LINK, "Unsupported port type\n");
return -EINVAL;
}
- /* Save new config in case command complete successuly */
+ /* Save new config in case command complete successully */
new_multi_phy_config = bp->link_params.multi_phy_config;
/* Get the new cfg_idx */
cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -761,8 +765,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
u8 phy_fw_ver[PHY_FW_VER_LEN];
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
phy_fw_ver[0] = '\0';
if (bp->port.pmf) {
@@ -773,14 +777,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
- strncpy(info->fw_version, bp->fw_ver, 32);
+ strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
- strcpy(info->bus_info, pci_name(bp->pdev));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
info->eedump_len = bp->common.flash_size;
@@ -1740,6 +1744,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
struct sw_rx_bd *rx_buf;
u16 len;
int rc = -ENODEV;
+ u8 *data;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
@@ -1748,8 +1754,18 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
return -EINVAL;
break;
case BNX2X_MAC_LOOPBACK:
- bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
- LOOPBACK_XMAC : LOOPBACK_BMAC;
+ if (CHIP_IS_E3(bp)) {
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (bp->port.supported[cfg_idx] &
+ (SUPPORTED_10000baseT_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
+ bp->link_params.loopback_mode = LOOPBACK_XMAC;
+ else
+ bp->link_params.loopback_mode = LOOPBACK_UMAC;
+ } else
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
default:
@@ -1784,6 +1800,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+ netdev_tx_sent_queue(txq, skb->len);
+
pkt_prod = txdata->tx_pkt_prod++;
tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -1865,10 +1883,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp_rx->rx_buf_size, DMA_FROM_DEVICE);
- skb = rx_buf->skb;
- skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+ data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
for (i = ETH_HLEN; i < pkt_size; i++)
- if (*(skb->data + i) != (unsigned char) (i & 0xff))
+ if (*(data + i) != (unsigned char) (i & 0xff))
goto test_loopback_rx_exit;
rc = 0;
@@ -2285,18 +2302,20 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
}
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev,
- struct ethtool_rxfh_indir *indir)
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
+ 0 : T_ETH_INDIRECTION_TABLE_SIZE);
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
- size_t copy_size =
- min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return -EOPNOTSUPP;
-
/* Get the current configuration of the RSS indirection table */
bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
@@ -2309,33 +2328,19 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
* align the returned table to the Client ID of the leading RSS
* queue.
*/
- for (i = 0; i < copy_size; i++)
- indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
-
- indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+ indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev,
- const struct ethtool_rxfh_indir *indir)
+static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
- u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
-
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return -EOPNOTSUPP;
-
- /* validate the size */
- if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
- return -EINVAL;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
- /* validate the indices */
- if (indir->ring_index[i] >= num_eth_queues)
- return -EINVAL;
/*
* The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
@@ -2345,7 +2350,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+ ind_table[i] = indir[i] + bp->fp->cl_id;
}
return bnx2x_config_rss_pf(bp, ind_table, false);
@@ -2378,6 +2383,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index fc754cb6cc0..3e30c8642c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1247,11 +1247,14 @@ struct drv_func_mb {
#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+ #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+ #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
+ #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
@@ -1304,6 +1307,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+ #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
+ #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1360,6 +1365,7 @@ struct drv_func_mb {
#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+ #define DRV_STATUS_DRV_INFO_REQ 0x04000000
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
@@ -1964,9 +1970,38 @@ struct shmem2_region {
u32 extended_dev_info_shared_addr;
u32 ncsi_oem_data_addr;
- u32 ocsd_host_addr;
- u32 ocbb_host_addr;
- u32 ocsd_req_update_interval;
+ u32 ocsd_host_addr; /* initialized by option ROM */
+ u32 ocbb_host_addr; /* initialized by option ROM */
+ u32 ocsd_req_update_interval; /* initialized by option ROM */
+ u32 temperature_in_half_celsius;
+ u32 glob_struct_in_host;
+
+ u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000
+
+ u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+
+ u32 extended_dev_info_shared_cfg_size;
+
+ u32 dcbx_en[PORT_MAX];
+
+ /* The offset points to the multi threaded meta structure */
+ u32 multi_thread_data_offset;
+
+ /* address of DMAable host address holding values from the drivers */
+ u32 drv_info_host_addr_lo;
+ u32 drv_info_host_addr_hi;
+
+ /* general values written by the MFW (such as current version) */
+ u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK 0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT 0
+#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
};
@@ -2501,14 +2536,18 @@ struct mac_stx {
#define MAC_STX_IDX_MAX 2
struct host_port_stats {
- u32 host_port_stats_start;
+ u32 host_port_stats_counter;
struct mac_stx mac_stx[MAC_STX_IDX_MAX];
u32 brb_drop_hi;
u32 brb_drop_lo;
- u32 host_port_stats_end;
+ u32 not_used; /* obsolete */
+ u32 pfc_frames_tx_hi;
+ u32 pfc_frames_tx_lo;
+ u32 pfc_frames_rx_hi;
+ u32 pfc_frames_rx_lo;
};
@@ -2548,6 +2587,118 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 1
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 0
#define BCM_5710_FW_REVISION_VERSION 29
@@ -4161,8 +4312,62 @@ struct ustorm_eth_rx_producers {
/*
- * cfc delete event data
+ * FCoE RX statistics parameters section#0
*/
+struct fcoe_rx_stat_params_section0 {
+ __le32 fcoe_rx_pkt_cnt;
+ __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ __le32 fcoe_ver_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+ __le32 fc_crc_cnt;
+ __le32 eofa_del_cnt;
+ __le32 miss_frame_cnt;
+ __le32 seq_timeout_cnt;
+ __le32 drop_seq_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+ __le32 fcp_rx_pkt_cnt;
+ __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ __le32 fcoe_tx_pkt_cnt;
+ __le32 fcoe_tx_byte_cnt;
+ __le32 fcp_tx_pkt_cnt;
+ __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+ struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * cfc delete event data
+*/
struct cfc_del_event_data {
u32 cid;
u32 reserved0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index bce203fa4b9..4df9505b67b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,7 +27,6 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
-
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -163,6 +162,11 @@
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
+/* BRB default for class 0 E2 */
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250
+#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10
+#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50
/* BRB thresholds for E2*/
#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
@@ -177,6 +181,12 @@
#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
+/* BRB default for class 0 E3A0 */
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50
+
/* BRB thresholds for E3A0 */
#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
@@ -190,6 +200,11 @@
#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
+/* BRB default for E3B0 */
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55
/* BRB thresholds for E3B0 2 port mode*/
#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
@@ -239,18 +254,29 @@
#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
-
/* only for E3B0*/
#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
-#define PFC_E3B0_4P_LB_GUART 120
+#define PFC_E3B0_4P_LB_GUART 120
#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+
+/* Pause defines*/
+#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330
+#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490
+#define DEFAULT_E3B0_LB_GUART 40
+
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0
+
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0
+/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -440,7 +466,7 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
u32 min_w_val = 0;
/* Calculate min_w_val.*/
if (vars->link_up) {
- if (SPEED_20000 == vars->line_speed)
+ if (vars->line_speed == SPEED_20000)
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
else
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
@@ -490,7 +516,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
- if (0 == port) {
+ if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
credit_upper_bound);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
@@ -584,7 +610,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
- if (0 == port) {
+ if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
@@ -612,7 +638,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
* port mode port1 has COS0-2 that can be used for WFQ.
*/
- if (0 == port) {
+ if (!port) {
base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
@@ -674,7 +700,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
* In 2 port mode port0 has COS0-5 that can be used for WFQ.
* In 4 port mode port1 has COS0-2 that can be used for WFQ.
*/
- if (0 == port) {
+ if (!port) {
base_weight = PBF_REG_COS0_WEIGHT_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
@@ -846,34 +872,47 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
******************************************************************************/
static int bnx2x_ets_e3b0_get_total_bw(
const struct link_params *params,
- const struct bnx2x_ets_params *ets_params,
+ struct bnx2x_ets_params *ets_params,
u16 *total_bw)
{
struct bnx2x *bp = params->bp;
u8 cos_idx = 0;
+ u8 is_bw_cos_exist = 0;
*total_bw = 0 ;
+
/* Calculate total BW requested */
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
- if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+ if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+ is_bw_cos_exist = 1;
+ if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+ "was set to 0\n");
+ /*
+ * This is to prevent a state when ramrods
+ * can't be sent
+ */
+ ets_params->cos[cos_idx].params.bw_params.bw
+ = 1;
+ }
*total_bw +=
ets_params->cos[cos_idx].params.bw_params.bw;
}
}
/* Check total BW is valid */
- if ((100 != *total_bw) || (0 == *total_bw)) {
- if (0 == *total_bw) {
+ if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+ if (*total_bw == 0) {
DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+ "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
return -EINVAL;
}
DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config toatl BW should be 100\n");
- /**
- * We can handle a case whre the BW isn't 100 this can happen
- * if the TC are joined.
- */
+ "bnx2x_ets_E3B0_config total BW should be 100\n");
+ /*
+ * We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
}
return 0;
}
@@ -904,7 +943,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
DCBX_E3B0_MAX_NUM_COS_PORT0;
- if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+ if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter There can't be two COS's with "
"the same strict pri\n");
@@ -913,7 +952,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
if (pri > max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
- "parameter Illegal strict priority\n");
+ "parameter Illegal strict priority\n");
return -EINVAL;
}
@@ -995,8 +1034,8 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
/* Set all the strict priority first */
for (i = 0; i < max_num_of_cos; i++) {
- if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
- if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+ if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+ if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid cos entry\n");
@@ -1010,7 +1049,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
sp_pri_to_cos[i], pri_set);
pri_bitmask = 1 << sp_pri_to_cos[i];
/* COS is used remove it from bitmap.*/
- if (0 == (pri_bitmask & cos_bit_to_set)) {
+ if (!(pri_bitmask & cos_bit_to_set)) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid There can't be two COS's with"
@@ -1072,7 +1111,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
******************************************************************************/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
- const struct bnx2x_ets_params *ets_params)
+ struct bnx2x_ets_params *ets_params)
{
struct bnx2x *bp = params->bp;
int bnx2x_status = 0;
@@ -1105,15 +1144,15 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
/* Prepare BW parameters*/
bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
&total_bw);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config get_total_bw failed\n");
return -EINVAL;
}
- /**
- * Upper bound is set according to current link speed (min_w_val
- * should be the same for upper bound and COS credit val).
+ /*
+ * Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
*/
bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
@@ -1122,7 +1161,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
cos_bw_bitmap |= (1 << cos_entry);
- /**
+ /*
* The function also sets the BW in HW(not the mappin
* yet)
*/
@@ -1146,7 +1185,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
"bnx2x_ets_e3b0_config cos state not valid\n");
return -EINVAL;
}
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_config set cos bw failed\n");
return bnx2x_status;
@@ -1157,7 +1196,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
sp_pri_to_cos);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
return bnx2x_status;
@@ -1168,7 +1207,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
cos_sp_bitmap,
cos_bw_bitmap);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
return bnx2x_status;
}
@@ -1232,9 +1271,9 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
- if ((0 == total_bw) ||
- (0 == cos0_bw) ||
- (0 == cos1_bw)) {
+ if ((!total_bw) ||
+ (!cos0_bw) ||
+ (!cos1_bw)) {
DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
return;
}
@@ -1290,7 +1329,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
* dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
* dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
*/
- val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+ val = (!strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
return 0;
@@ -1298,7 +1337,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
/******************************************************************/
/* PFC section */
/******************************************************************/
-
static void bnx2x_update_pfc_xmac(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
@@ -1401,7 +1439,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
if (!vars->link_up)
return;
- if (MAC_TYPE_EMAC == vars->mac_type) {
+ if (vars->mac_type == MAC_TYPE_EMAC) {
DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
pfc_frames_received);
@@ -1435,6 +1473,18 @@ static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
udelay(40);
}
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+ u32 port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1<<0)) {
+ /* Return 4-port mode override value */
+ return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+ }
+ /* Return 4-port mode from input pin */
+ return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
static void bnx2x_emac_init(struct link_params *params,
struct link_vars *vars)
@@ -1601,31 +1651,18 @@ static void bnx2x_umac_enable(struct link_params *params,
}
-static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
-{
- u32 port4mode_ovwr_val;
- /* Check 4-port override enabled */
- port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
- if (port4mode_ovwr_val & (1<<0)) {
- /* Return 4-port mode override value */
- return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
- }
- /* Return 4-port mode from input pin */
- return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
-}
-
/* Define the XMAC mode */
static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
{
struct bnx2x *bp = params->bp;
u32 is_port4mode = bnx2x_is_4_port_mode(bp);
- /**
- * In 4-port mode, need to set the mode only once, so if XMAC is
- * already out of reset, it means the mode has already been set,
- * and it must not* reset the XMAC again, since it controls both
- * ports of the path
- **/
+ /*
+ * In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ */
if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -1743,6 +1780,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
return 0;
}
+
static int bnx2x_emac_enable(struct link_params *params,
struct link_vars *vars, u8 lb)
{
@@ -1999,7 +2037,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
-
/* PFC BRB internal port configuration params */
struct bnx2x_pfc_brb_threshold_val {
u32 pause_xoff;
@@ -2009,6 +2046,8 @@ struct bnx2x_pfc_brb_threshold_val {
};
struct bnx2x_pfc_brb_e3b0_val {
+ u32 per_class_guaranty_mode;
+ u32 lb_guarantied_hyst;
u32 full_lb_xoff_th;
u32 full_lb_xon_threshold;
u32 lb_guarantied;
@@ -2021,6 +2060,9 @@ struct bnx2x_pfc_brb_e3b0_val {
struct bnx2x_pfc_brb_th_val {
struct bnx2x_pfc_brb_threshold_val pauseable_th;
struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+ struct bnx2x_pfc_brb_threshold_val default_class0;
+ struct bnx2x_pfc_brb_threshold_val default_class1;
+
};
static int bnx2x_pfc_brb_get_config_params(
struct link_params *params,
@@ -2028,140 +2070,200 @@ static int bnx2x_pfc_brb_get_config_params(
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+
+ config_val->default_class1.pause_xoff = 0;
+ config_val->default_class1.pause_xon = 0;
+ config_val->default_class1.full_xoff = 0;
+ config_val->default_class1.full_xon = 0;
+
if (CHIP_IS_E2(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
+ /* pause able*/
config_val->pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
} else if (CHIP_IS_E3A0(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
+ /* pause able */
config_val->pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
} else if (CHIP_IS_E3B0(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
+
if (params->phy[INT_PHY].flags &
FLAGS_4_PORT_MODE) {
config_val->pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else {
- config_val->pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- }
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ }
} else
return -EINVAL;
return 0;
}
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
- struct bnx2x_pfc_brb_e3b0_val
- *e3b0_val,
- u32 cos0_pauseable,
- u32 cos1_pauseable)
+static void bnx2x_pfc_brb_get_e3b0_config_params(
+ struct link_params *params,
+ struct bnx2x_pfc_brb_e3b0_val
+ *e3b0_val,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params,
+ const u8 pfc_enabled)
{
- if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+ if (pfc_enabled && pfc_params) {
+ e3b0_val->per_class_guaranty_mode = 1;
+ e3b0_val->lb_guarantied_hyst = 80;
+
+ if (params->phy[INT_PHY].flags &
+ FLAGS_4_PORT_MODE) {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_4P_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+ } else {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+ if (pfc_params->cos0_pauseable !=
+ pfc_params->cos1_pauseable) {
+ /* nonpauseable= Lossy + pauseable = Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+ } else if (pfc_params->cos0_pauseable) {
+ /* Lossless +Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+ } else {
+ /* Lossy +Lossy*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+ }
+ }
+ } else {
+ e3b0_val->per_class_guaranty_mode = 0;
+ e3b0_val->lb_guarantied_hyst = 0;
e3b0_val->full_lb_xoff_th =
- PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
e3b0_val->lb_guarantied =
- PFC_E3B0_4P_LB_GUART;
+ DEFAULT_E3B0_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
- } else {
- e3b0_val->full_lb_xoff_th =
- PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
- if (cos0_pauseable != cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
- } else if (cos0_pauseable) {
- /* Lossless +Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
- } else {
- /* Lossy +Lossy*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_NON_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
- }
+ DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
}
}
static int bnx2x_update_pfc_brb(struct link_params *params,
@@ -2172,23 +2274,28 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
struct bnx2x *bp = params->bp;
struct bnx2x_pfc_brb_th_val config_val = { {0} };
struct bnx2x_pfc_brb_threshold_val *reg_th_config =
- &config_val.pauseable_th;
+ &config_val.pauseable_th;
struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
- int set_pfc = params->feature_config_flags &
+ const int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
+ const u8 pfc_enabled = (set_pfc && pfc_params);
int bnx2x_status = 0;
u8 port = params->port;
/* default - pause configuration */
reg_th_config = &config_val.pauseable_th;
bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
- if (0 != bnx2x_status)
+ if (bnx2x_status)
return bnx2x_status;
- if (set_pfc && pfc_params)
+ if (pfc_enabled) {
/* First COS */
- if (!pfc_params->cos0_pauseable)
+ if (pfc_params->cos0_pauseable)
+ reg_th_config = &config_val.pauseable_th;
+ else
reg_th_config = &config_val.non_pauseable_th;
+ } else
+ reg_th_config = &config_val.default_class0;
/*
* The number of free blocks below which the pause signal to class 0
* of MAC #n is asserted. n=0,1
@@ -2215,122 +2322,119 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
- if (set_pfc && pfc_params) {
+ if (pfc_enabled) {
/* Second COS */
if (pfc_params->cos1_pauseable)
reg_th_config = &config_val.pauseable_th;
else
reg_th_config = &config_val.non_pauseable_th;
+ } else
+ reg_th_config = &config_val.default_class1;
+ /*
+ * The number of free blocks below which the pause signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+ reg_th_config->pause_xoff);
+
+ /*
+ * The number of free blocks above which the pause signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+ reg_th_config->pause_xon);
+ /*
+ * The number of free blocks below which the full signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+ reg_th_config->full_xoff);
+ /*
+ * The number of free blocks above which the full signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XON_THRESHOLD_0,
+ reg_th_config->full_xon);
+
+ if (CHIP_IS_E3B0(bp)) {
+ bnx2x_pfc_brb_get_e3b0_config_params(
+ params,
+ &e3b0_val,
+ pfc_params,
+ pfc_enabled);
+
+ REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
+ e3b0_val.per_class_guaranty_mode);
+
/*
- * The number of free blocks below which the pause signal to
- * class 1 of MAC #n is asserted. n=0,1
- **/
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
- reg_th_config->pause_xoff);
+ * The hysteresis on the guarantied buffer space for the Lb
+ * port before signaling XON.
+ */
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
+ e3b0_val.lb_guarantied_hyst);
+
/*
- * The number of free blocks above which the pause signal to
- * class 1 of MAC #n is de-asserted. n=0,1
+ * The number of free blocks below which the full signal to the
+ * LB port is asserted.
*/
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
- reg_th_config->pause_xon);
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+ e3b0_val.full_lb_xoff_th);
/*
- * The number of free blocks below which the full signal to
- * class 1 of MAC #n is asserted. n=0,1
+ * The number of free blocks above which the full signal to the
+ * LB port is de-asserted.
*/
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
- BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
- reg_th_config->full_xoff);
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+ e3b0_val.full_lb_xon_threshold);
/*
- * The number of free blocks above which the full signal to
- * class 1 of MAC #n is de-asserted. n=0,1
+ * The number of blocks guarantied for the MAC #n port. n=0,1
*/
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
- BRB1_REG_FULL_1_XON_THRESHOLD_0,
- reg_th_config->full_xon);
+ /* The number of blocks guarantied for the LB port.*/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+ e3b0_val.lb_guarantied);
- if (CHIP_IS_E3B0(bp)) {
- /*Should be done by init tool */
- /*
- * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
- * reset value
- * 944
- */
-
- /**
- * The hysteresis on the guarantied buffer space for the Lb port
- * before signaling XON.
- **/
- REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
-
- bnx2x_pfc_brb_get_e3b0_config_params(
- params,
- &e3b0_val,
- pfc_params->cos0_pauseable,
- pfc_params->cos1_pauseable);
- /**
- * The number of free blocks below which the full signal to the
- * LB port is asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
- e3b0_val.full_lb_xoff_th);
- /**
- * The number of free blocks above which the full signal to the
- * LB port is de-asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
- e3b0_val.full_lb_xon_threshold);
- /**
- * The number of blocks guarantied for the MAC #n port. n=0,1
- */
-
- /*The number of blocks guarantied for the LB port.*/
- REG_WR(bp, BRB1_REG_LB_GUARANTIED,
- e3b0_val.lb_guarantied);
-
- /**
- * The number of blocks guarantied for the MAC #n port.
- */
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
- 2 * e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
- 2 * e3b0_val.mac_1_class_t_guarantied);
- /**
- * The number of blocks guarantied for class #t in MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- /**
- * The hysteresis on the guarantied buffer space for class in
- * MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
-
- /**
- * The number of blocks guarantied for class #t in MAC1.t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- /**
- * The hysteresis on the guarantied buffer space for class #t
- * in MAC1. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
-
- }
+ /*
+ * The number of blocks guarantied for the MAC #n port.
+ */
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+ 2 * e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+ 2 * e3b0_val.mac_1_class_t_guarantied);
+ /*
+ * The number of blocks guarantied for class #t in MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ /*
+ * The hysteresis on the guarantied buffer space for class in
+ * MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ /*
+ * The number of blocks guarantied for class #t in MAC1.t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ /*
+ * The hysteresis on the guarantied buffer space for class #t
+ * in MAC1. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
}
return bnx2x_status;
@@ -2515,7 +2619,7 @@ int bnx2x_update_pfc(struct link_params *params,
/* update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
- if (0 != bnx2x_status)
+ if (bnx2x_status)
return bnx2x_status;
if (!vars->link_up)
@@ -2533,7 +2637,6 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_emac_enable(params, vars, 0);
return bnx2x_status;
}
-
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
else
@@ -3053,7 +3156,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "write phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
-
} else {
/* data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
@@ -3090,8 +3192,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
EMAC_MDIO_STATUS_10MB);
return rc;
}
-
-
/******************************************************************/
/* BSC access functions from E3 */
/******************************************************************/
@@ -3339,7 +3439,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
- DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, aer_val);
@@ -3942,13 +4042,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
struct link_params *params,
- u8 fiber_mode)
+ u8 fiber_mode,
+ u8 always_autoneg)
{
struct bnx2x *bp = params->bp;
u16 val16, digctrl_kx1, digctrl_kx2;
- u8 lane;
-
- lane = bnx2x_get_warpcore_lane(phy, params);
/* Clear XFI clock comp in non-10G single lane mode. */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3956,7 +4054,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
- if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
/* SGMII Autoneg */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3967,7 +4065,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- val16 &= 0xcfbf;
+ val16 &= 0xcebf;
switch (phy->req_line_speed) {
case SPEED_10:
break;
@@ -4043,9 +4141,7 @@ static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6, &val);
}
-
-
- /* Clear SFI/XFI link settings registers */
+/* Clear SFI/XFI link settings registers */
static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
struct link_params *params,
u16 lane)
@@ -4250,7 +4346,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
vars->phy_flags |= PHY_SGMII_FLAG;
DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
bnx2x_warpcore_clear_regs(phy, params, lane);
- bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
} else {
switch (serdes_net_if) {
case PORT_HW_CFG_NET_SERDES_IF_KR:
@@ -4278,7 +4374,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
}
bnx2x_warpcore_set_sgmii_speed(phy,
params,
- fiber_mode);
+ fiber_mode,
+ 0);
}
break;
@@ -4291,7 +4388,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
bnx2x_warpcore_set_10G_XFI(phy, params, 0);
} else if (vars->line_speed == SPEED_1000) {
DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+ bnx2x_warpcore_set_sgmii_speed(
+ phy, params, 1, 0);
}
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
@@ -4428,12 +4526,6 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
/* Switch back to 4-copy registers */
bnx2x_set_aer_mmd(params, phy);
- /* Global loopback, not recommended. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
} else {
/* 10G & 20G */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4450,25 +4542,14 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
}
-void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
- u8 port = params->port;
- u32 sync_offset, media_types;
- /* Update PHY configuration */
- set_phy_vars(params, vars);
-
- vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
-
- vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
- vars->phy_flags = PHY_XGXS_FLAG;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
-
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
if (vars->link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
@@ -4563,7 +4644,23 @@ void bnx2x_link_status_update(struct link_params *params,
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
}
+}
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_sync_link(params, vars);
/* Sync media type */
sync_offset = params->shmem_base +
offsetof(struct shmem_region,
@@ -4602,7 +4699,6 @@ void bnx2x_link_status_update(struct link_params *params,
vars->line_speed, vars->duplex, vars->flow_ctrl);
}
-
static void bnx2x_set_master_ln(struct link_params *params,
struct bnx2x_phy *phy)
{
@@ -4676,11 +4772,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
* Each two bits represents a lane number:
* No swap is 0123 => 0x1b no need to enable the swap
*/
- u16 ser_lane, rx_lane_swap, tx_lane_swap;
+ u16 rx_lane_swap, tx_lane_swap;
- ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
rx_lane_swap = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
@@ -5356,7 +5449,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
-
struct bnx2x *bp = params->bp;
u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
@@ -5403,9 +5495,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
-
struct bnx2x *bp = params->bp;
-
u8 lane;
u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
int rc = 0;
@@ -6678,7 +6768,6 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
return rc;
}
-
/*****************************************************************************/
/* External Phy section */
/*****************************************************************************/
@@ -8103,7 +8192,15 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
+ struct bnx2x *bp = params->bp;
bnx2x_warpcore_power_module(params, phy, 0);
+ /* Put Warpcore in low power mode */
+ REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+ /* Put LCPLL in low power mode */
+ REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
}
static void bnx2x_power_sfp_module(struct link_params *params,
@@ -9040,13 +9137,13 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK,
"8727 Power fault has been detected on port %d\n",
oc_port);
- netdev_err(bp->dev, "Error: Power fault on Port %d has"
- " been detected and the power to "
- "that SFP+ module has been removed"
- " to prevent failure of the card."
- " Please remove the SFP+ module and"
- " restart the system to clear this"
- " error.\n",
+ netdev_err(bp->dev, "Error: Power fault on Port %d has "
+ "been detected and the power to "
+ "that SFP+ module has been removed "
+ "to prevent failure of the card. "
+ "Please remove the SFP+ module and "
+ "restart the system to clear this "
+ "error.\n",
oc_port);
/* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
@@ -9228,7 +9325,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val;
+ u16 val, offset;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
@@ -9263,14 +9360,22 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_REG_8481_LED3_BLINK,
0);
- bnx2x_cl45_read(bp, phy,
+ /* Configure the blink rate to ~15.9 Hz */
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
- val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+ MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+ MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+ else
+ offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, offset, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+ MDIO_PMA_DEVAD, offset, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -9283,7 +9388,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 autoneg_val, an_1000_val, an_10_100_val;
+ u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
u16 tmp_req_line_speed;
tmp_req_line_speed = phy->req_line_speed;
@@ -9378,6 +9483,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
(1<<15 | 1<<9 | 7<<0));
+ /* The PHY needs this set even for forced link. */
+ an_10_100_val |= (1<<8) | (1<<7);
DP(NETIF_MSG_LINK, "Setting 100M force\n");
}
if ((phy->req_line_speed == SPEED_10) &&
@@ -9415,9 +9522,17 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 10G\n");
/* Restart autoneg for 10G*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ &an_10g_val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ an_10g_val | 0x1000);
bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
- 0x3200);
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
} else
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
@@ -9449,74 +9564,95 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
return bnx2x_848xx_cmn_config_init(phy, params, vars);
}
-
-#define PHY84833_HDSHK_WAIT 300
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+#define PHY84833_CMDHDLR_WAIT 300
+#define PHY84833_CMDHDLR_MAX_ARGS 5
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ u16 fw_cmd,
+ u16 cmd_args[])
{
u32 idx;
- u32 pair_swap;
u16 val;
- u16 data;
struct bnx2x *bp = params->bp;
- /* Do pair swap */
-
- /* Check for configuration. */
- pair_swap = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
- PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
-
- if (pair_swap == 0)
- return 0;
-
- data = (u16)pair_swap;
-
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_OPEN_OVERRIDE);
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
msleep(1);
}
- if (idx >= PHY84833_HDSHK_WAIT) {
- DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+ if (idx >= PHY84833_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
return -EINVAL;
}
+ /* Prepare argument(s) and issue command */
+ for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG4,
- data);
- /* Issue pair swap command */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG0,
- PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if ((val == PHY84833_CMD_COMPLETE_PASS) ||
- (val == PHY84833_CMD_COMPLETE_ERROR))
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
msleep(1);
}
- if ((idx >= PHY84833_HDSHK_WAIT) ||
- (val == PHY84833_CMD_COMPLETE_ERROR)) {
- DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+ if ((idx >= PHY84833_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
return -EINVAL;
}
+ /* Gather returning data */
+ for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_CLEAR_COMPLETE);
- DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
return 0;
}
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 pair_swap;
+ u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+ int status;
+ struct bnx2x *bp = params->bp;
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return 0;
+
+ /* Only the second argument is used for this command */
+ data[1] = (u16)pair_swap;
+
+ status = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_PAIR_SWAP, data);
+ if (status == 0)
+ DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
+
+ return status;
+}
+
static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
u32 shmem_base_path[],
u32 chip_id)
@@ -9579,24 +9715,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
- u32 shmem_base_path[],
- u32 chip_id)
-{
- u8 reset_gpios;
-
- reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
-
- bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
- udelay(10);
- bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
- msleep(800);
- DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
- reset_gpios);
-
- return 0;
-}
-
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9605,8 +9723,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
u16 val;
- u16 temp;
- u32 actual_phy_selection, cms_enable, idx;
+ u32 actual_phy_selection, cms_enable;
+ u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
msleep(1);
@@ -9625,6 +9743,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL, 0x8000);
+ }
+
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Bring PHY out of super isolate mode */
bnx2x_cl45_read(bp, phy,
MDIO_CTL_DEVAD,
@@ -9633,26 +9758,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_CTL_DEVAD,
MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
- }
-
- bnx2x_wait_reset_complete(bp, phy, params);
-
- /* Wait for GPHY to come out of reset */
- msleep(50);
-
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
bnx2x_84833_pair_swap_cfg(phy, params, vars);
-
- /*
- * BCM84823 requires that XGXS links up first @ 10G for normal behavior
- */
- temp = vars->line_speed;
- vars->line_speed = SPEED_10000;
- bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
- bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
- vars->line_speed = temp;
-
- /* Set dual-media configuration according to configuration */
+ } else {
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal
+ * behavior.
+ */
+ u16 temp;
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+ }
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, &val);
@@ -9700,64 +9818,18 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* AutogrEEEn */
if (params->feature_config_flags &
- FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
- /* Ensure that f/w is ready */
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if (val == PHY84833_CMD_OPEN_FOR_CMDS)
- break;
- usleep_range(1000, 1000);
- }
- if (idx >= PHY84833_HDSHK_WAIT) {
- DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
- return -EINVAL;
- }
-
- /* Select EEE mode */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG3,
- 0x2);
-
- /* Set Idle and Latency */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG4,
- PHY84833_CONSTANT_LATENCY + 1);
-
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_DATA3_REG,
- PHY84833_CONSTANT_LATENCY + 1);
-
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_DATA4_REG,
- PHY84833_CONSTANT_LATENCY);
-
- /* Send EEE instruction to command register */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG0,
- PHY84833_DIAG_CMD_SET_EEE_MODE);
-
- /* Ensure that the command has completed */
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if ((val == PHY84833_CMD_COMPLETE_PASS) ||
- (val == PHY84833_CMD_COMPLETE_ERROR))
- break;
- usleep_range(1000, 1000);
- }
- if ((idx >= PHY84833_HDSHK_WAIT) ||
- (val == PHY84833_CMD_COMPLETE_ERROR)) {
- DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
- return -EINVAL;
- }
-
- /* Reset command handler */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_CLEAR_COMPLETE);
- }
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED)
+ cmd_args[0] = 0x2;
+ else
+ cmd_args[0] = 0x0;
+ cmd_args[1] = 0x0;
+ cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+ cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, cmd_args);
+ if (rc != 0)
+ DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
if (initialize)
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
@@ -10144,8 +10216,10 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
usleep_range(1000, 1000);
- /* This works with E3 only, no need to check the chip
- before determining the port. */
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
port = params->port;
cfg_pin = (REG_RD(bp, params->shmem_base +
@@ -10327,6 +10401,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
return 0;
}
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= 0xff00;
+
+ DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ temp |= 0x00ee;
+ break;
+ case LED_MODE_OPER:
+ temp |= 0x0001;
+ break;
+ case LED_MODE_ON:
+ temp |= 0x00ff;
+ break;
+ default:
+ break;
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ return;
+}
+
+
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -11103,7 +11214,7 @@ static struct bnx2x_phy phy_54618se = {
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
.phy_specific_func = (phy_specific_func_t)NULL
};
/*****************************************************************/
@@ -11181,7 +11292,9 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
offsetof(struct shmem_region,
dev_info.port_feature_config[port].link_config)) &
PORT_FEATURE_CONNECTED_SWITCH_MASK);
- chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
if (USES_WARPCORE(bp)) {
u32 serdes_net_if;
@@ -11360,6 +11473,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
return -EINVAL;
default:
*phy = phy_null;
+ /* In case external PHY wasn't found */
+ if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ return -EINVAL;
return 0;
}
@@ -11533,7 +11650,7 @@ u32 bnx2x_phy_selection(struct link_params *params)
int bnx2x_phy_probe(struct link_params *params)
{
- u8 phy_index, actual_phy_idx, link_cfg_idx;
+ u8 phy_index, actual_phy_idx;
u32 phy_config_swapped, sync_offset, media_types;
struct bnx2x *bp = params->bp;
struct bnx2x_phy *phy;
@@ -11544,7 +11661,6 @@ int bnx2x_phy_probe(struct link_params *params)
for (phy_index = INT_PHY; phy_index < MAX_PHYS;
phy_index++) {
- link_cfg_idx = LINK_CONFIG_IDX(phy_index);
actual_phy_idx = phy_index;
if (phy_config_swapped) {
if (phy_index == EXT_PHY1)
@@ -12210,6 +12326,63 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
return 0;
}
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[],
+ u8 phy_index,
+ u32 chip_id)
+{
+ u8 reset_gpios;
+ struct bnx2x_phy phy;
+ u32 shmem_base, shmem2_base, cnt;
+ s8 port = 0;
+ u16 val;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+ reset_gpios);
+ for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+ /* This PHY is for E2 and E3. */
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ /* Extract the ext phy address for the port */
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+ 0, &phy) !=
+ 0) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
+
+ /* Wait for FW completing its initialization. */
+ for (cnt = 0; cnt < 1000; cnt++) {
+ bnx2x_cl45_read(bp, &phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &val);
+ if (!(val & (1<<15)))
+ break;
+ msleep(1);
+ }
+ if (cnt >= 1000)
+ DP(NETIF_MSG_LINK,
+ "84833 Cmn reset timeout (%d)\n", port);
+
+ /* Put the port in super isolate mode. */
+ bnx2x_cl45_read(bp, &phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val |= MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, &phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ }
+
+ return 0;
+}
+
+
static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 ext_phy_type, u32 chip_id)
@@ -12244,7 +12417,9 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
- rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
rc = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 2a46e633abe..e02a68a7fb8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -479,7 +479,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
/* Configure the COS to ETS according to BW and SP settings.*/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
- const struct bnx2x_ets_params *ets_params);
+ struct bnx2x_ets_params *ets_params);
/* Read pfc statistic*/
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6486ab8c8fc..ffeaaa95ed9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2318,12 +2318,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
- return 2 * vn + BP_PORT(bp);
-}
-
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{
struct rate_shaping_vars_per_vn m_rs_vn;
@@ -2475,22 +2469,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
"rate shaping and fairness are disabled\n");
}
-static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
-{
- int func;
- int vn;
-
- /* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
- if (vn == BP_VN(bp))
- continue;
-
- func = func_by_vn(bp, vn);
- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
- (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
- }
-}
-
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2549,6 +2527,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
if (bp->state != BNX2X_STATE_OPEN)
return;
+ /* read updated dcb configuration */
+ bnx2x_dcbx_pmf_update(bp);
+
bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
if (bp->link_vars.link_up)
@@ -2643,15 +2624,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
return rc;
}
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
- /* Statistics are not supported for CNIC Clients at the moment */
- if (IS_FCOE_FP(fp))
- return false;
-#endif
- return true;
-}
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
@@ -2695,11 +2667,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
* parent connection). The statistics are zeroed when the parent
* connection is initialized.
*/
- if (stat_counter_valid(bp, fp)) {
- __set_bit(BNX2X_Q_FLG_STATS, &flags);
- if (zero_stats)
- __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
- }
+
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
return flags;
}
@@ -2808,8 +2780,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* This should be a maximum number of data bytes that may be
* placed on the BD (not including paddings).
*/
- rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
- IP_HEADER_ALIGNMENT_PADDING;
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+ BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2940,6 +2912,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
*/
}
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+ struct eth_stats_info *ether_stat =
+ &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+ /* leave last char as NULL */
+ memcpy(ether_stat->version, DRV_MODULE_VERSION,
+ ETH_STAT_INFO_VERSION_LEN - 1);
+
+ bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
+
+ ether_stat->mtu_size = bp->dev->mtu;
+
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+ if (bp->dev->features & NETIF_F_TSO)
+ ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+ ether_stat->feature_flags |= bp->common.boot_mode;
+
+ ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+ ether_stat->txq_size = bp->tx_ring_size;
+ ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct fcoe_stats_info *fcoe_stat =
+ &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+ memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+ fcoe_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+ /* insert FCoE stats from ramrod response */
+ if (!NO_FCOE(bp)) {
+ struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ tstorm_queue_statistics;
+
+ struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ xstorm_queue_statistics;
+
+ struct fcoe_statistics_params *fw_fcoe_stat =
+ &bp->fw_stats_data->fcoe;
+
+ ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+ ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
+ }
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct iscsi_stats_info *iscsi_stat =
+ &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+ memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+ iscsi_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
/* called due to MCP event (on pmf):
* reread new bandwidth configuration
* configure FW
@@ -2960,6 +3069,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+ enum drv_info_opcode op_code;
+ u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+ /* if drv_info version supported by MFW doesn't match - send NACK */
+ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+ DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+
+ switch (op_code) {
+ case ETH_STATS_OPCODE:
+ bnx2x_drv_info_ether_stat(bp);
+ break;
+ case FCOE_STATS_OPCODE:
+ bnx2x_drv_info_fcoe_stat(bp);
+ break;
+ case ISCSI_STATS_OPCODE:
+ bnx2x_drv_info_iscsi_stat(bp);
+ break;
+ default:
+ /* if op code isn't supported - send NACK */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ /* if we got drv_info attn from MFW then these fields are defined in
+ * shmem2 for sure
+ */
+ SHMEM2_WR(bp, drv_info_host_addr_lo,
+ U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+ SHMEM2_WR(bp, drv_info_host_addr_hi,
+ U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3318,6 +3471,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
" the driver to shutdown the card to prevent permanent"
" damage. Please contact OEM Support for assistance\n");
+
+ /*
+ * Scheudle device reset (unload)
+ * This is due to some boards consuming sufficient power when driver is
+ * up to overheat if fan fails.
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3456,6 +3620,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_SET_MF_BW)
bnx2x_set_mf_bw(bp);
+ if (val & DRV_STATUS_DRV_INFO_REQ)
+ bnx2x_handle_drv_info_req(bp);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -5247,7 +5413,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
u8 cos;
unsigned long q_type = 0;
u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
-
+ fp->rx_queue = fp_idx;
fp->cid = fp_idx;
fp->cl_id = bnx2x_fp_cl_id(fp);
fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
@@ -6856,13 +7022,16 @@ void bnx2x_free_mem(struct bnx2x *bp)
static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
{
int num_groups;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
- /* number of eth_queues */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
/* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + num_eth_queues */
- bp->fw_stats_num = 2 + num_queue_stats;
+ * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+ * num of queues
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
/* Request is built from stats_query_header and an array of
@@ -6870,8 +7039,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
* STATS_QUERY_CMD_COUNT rules. The real number or requests is
* configured in the stats_query_header.
*/
- num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
- (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+ num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
num_groups * sizeof(struct stats_query_cmd_group);
@@ -6880,9 +7049,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
*
* stats_counter holds per-STORM counters that are incremented
* when STORM has finished with the current request.
+ *
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
*/
bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct stats_counter);
@@ -7025,6 +7198,13 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
unsigned long ramrod_flags = 0;
+#ifdef BCM_CNIC
+ if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
+ DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+ return 0;
+ }
+#endif
+
DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8522,6 +8702,17 @@ sp_rtnl_not_reset:
if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+ /*
+ * in case of fan failure we need to reset id if the "stop on error"
+ * debug flag is set, since we trying to prevent permanent overheating
+ * damage
+ */
+ if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+ netif_device_detach(bp->dev);
+ bnx2x_close(bp->dev);
+ }
+
sp_rtnl_exit:
rtnl_unlock();
}
@@ -8708,7 +8899,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
{
- u32 val, val2, val3, val4, id;
+ u32 val, val2, val3, val4, id, boot_mode;
u16 pmc;
/* Get the chip revision id and number. */
@@ -8817,6 +9008,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+ BC_SUPPORTS_PFC_STATS : 0;
+
+ boot_mode = SHMEM_RD(bp,
+ dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+ PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+ switch (boot_mode) {
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+ break;
+ }
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9267,22 +9478,43 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->common.shmem2_base);
}
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
{
+#ifdef BCM_CNIC
int port = BP_PORT(bp);
- int func = BP_ABS_FUNC(bp);
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_iscsi_conn);
- u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
- drv_lic_key[port].max_fcoe_conn);
- /* Get the number of maximum allowed iSCSI and FCoE connections */
+ /* Get the number of maximum allowed iSCSI connections */
bp->cnic_eth_dev.max_iscsi_conn =
(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_FLAG;
+#else
+ bp->flags |= NO_ISCSI_FLAG;
+#endif
+}
+
+static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn);
+
+ /* Get the number of maximum allowed FCoE connections */
bp->cnic_eth_dev.max_fcoe_conn =
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
@@ -9334,21 +9566,29 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
}
}
- BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
- bp->cnic_eth_dev.max_iscsi_conn,
- bp->cnic_eth_dev.max_fcoe_conn);
+ BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
/*
* If maximum allowed number of connections is zero -
* disable the feature.
*/
- if (!bp->cnic_eth_dev.max_iscsi_conn)
- bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
if (!bp->cnic_eth_dev.max_fcoe_conn)
bp->flags |= NO_FCOE_FLAG;
-}
+#else
+ bp->flags |= NO_FCOE_FLAG;
#endif
+}
+
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ /*
+ * iSCSI may be dynamically disabled but reading
+ * info here we will decrease memory usage by driver
+ * if the feature is disabled for good
+ */
+ bnx2x_get_iscsi_info(bp);
+ bnx2x_get_fcoe_info(bp);
+}
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
{
@@ -9374,7 +9614,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
#ifdef BCM_CNIC
- /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ /*
+ * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
*/
if (IS_MF_SI(bp)) {
@@ -9396,11 +9637,22 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val = MF_CFG_RD(bp, func_ext_config[func].
fcoe_mac_addr_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
- BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+ BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
fip_mac);
} else
bp->flags |= NO_FCOE_FLAG;
+ } else { /* SD mode */
+ if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ }
}
#endif
} else {
@@ -9449,7 +9701,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
}
#endif
- if (!is_valid_ether_addr(bp->dev->dev_addr))
+ if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: "
"%pM, change it manually before bringing up "
@@ -9661,9 +9913,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
-#ifdef BCM_CNIC
bnx2x_get_cnic_info(bp);
-#endif
/* Get current FW pulse sequence */
if (!BP_NOMCP(bp)) {
@@ -9681,30 +9931,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
{
int cnt, i, block_end, rodi;
- char vpd_data[BNX2X_VPD_LEN+1];
+ char vpd_start[BNX2X_VPD_LEN+1];
char str_id_reg[VENDOR_ID_LEN+1];
char str_id_cap[VENDOR_ID_LEN+1];
+ char *vpd_data;
+ char *vpd_extended_data = NULL;
u8 len;
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
if (cnt < BNX2X_VPD_LEN)
goto out_not_found;
- i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ /* VPD RO tag should be first tag after identifier string, hence
+ * we should be able to find it in first BNX2X_VPD_LEN chars
+ */
+ i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
-
block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_data[i]);
+ pci_vpd_lrdt_size(&vpd_start[i]);
i += PCI_VPD_LRDT_TAG_SIZE;
- if (block_end > BNX2X_VPD_LEN)
- goto out_not_found;
+ if (block_end > BNX2X_VPD_LEN) {
+ vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+ if (vpd_extended_data == NULL)
+ goto out_not_found;
+
+ /* read rest of vpd image into vpd_extended_data */
+ memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+ cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+ block_end - BNX2X_VPD_LEN,
+ vpd_extended_data + BNX2X_VPD_LEN);
+ if (cnt < (block_end - BNX2X_VPD_LEN))
+ goto out_not_found;
+ vpd_data = vpd_extended_data;
+ } else
+ vpd_data = vpd_start;
+
+ /* now vpd_data holds full vpd content in both cases */
rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9736,9 +10005,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
bp->fw_ver[len] = ' ';
}
}
+ kfree(vpd_extended_data);
return;
}
out_not_found:
+ kfree(vpd_extended_data);
return;
}
@@ -9840,15 +10111,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->multi_mode = multi_mode;
+ bp->disable_tpa = disable_tpa;
+
+#ifdef BCM_CNIC
+ bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+#endif
+
/* Set TPA flags */
- if (disable_tpa) {
+ if (bp->disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
bp->dev->features &= ~NETIF_F_LRO;
} else {
bp->flags |= TPA_ENABLE_FLAG;
bp->dev->features |= NETIF_F_LRO;
}
- bp->disable_tpa = disable_tpa;
if (CHIP_IS_E1(bp))
bp->dropless_fc = 0;
@@ -9965,7 +10241,7 @@ static int bnx2x_open(struct net_device *dev)
}
/* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
+int bnx2x_close(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10119,6 +10395,11 @@ void bnx2x_set_rx_mode(struct net_device *dev)
}
bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+ /* handle ISCSI SD mode */
+ if (IS_MF_ISCSI_SD(bp))
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10198,6 +10479,15 @@ static void poll_bnx2x(struct net_device *dev)
}
#endif
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+ return -EADDRNOTAVAIL;
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -10205,7 +10495,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_select_queue = bnx2x_select_queue,
.ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_validate_addr = bnx2x_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
.ndo_change_mtu = bnx2x_change_mtu,
.ndo_fix_features = bnx2x_fix_features,
@@ -10548,33 +10838,38 @@ do { \
int bnx2x_init_firmware(struct bnx2x *bp)
{
- const char *fw_file_name;
struct bnx2x_fw_file_hdr *fw_hdr;
int rc;
- if (CHIP_IS_E1(bp))
- fw_file_name = FW_FILE_NAME_E1;
- else if (CHIP_IS_E1H(bp))
- fw_file_name = FW_FILE_NAME_E1H;
- else if (!CHIP_IS_E1x(bp))
- fw_file_name = FW_FILE_NAME_E2;
- else {
- BNX2X_ERR("Unsupported chip revision\n");
- return -EINVAL;
- }
- BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+ if (!bp->firmware) {
+ const char *fw_file_name;
- rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
- if (rc) {
- BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
- goto request_firmware_exit;
- }
+ if (CHIP_IS_E1(bp))
+ fw_file_name = FW_FILE_NAME_E1;
+ else if (CHIP_IS_E1H(bp))
+ fw_file_name = FW_FILE_NAME_E1H;
+ else if (!CHIP_IS_E1x(bp))
+ fw_file_name = FW_FILE_NAME_E2;
+ else {
+ BNX2X_ERR("Unsupported chip revision\n");
+ return -EINVAL;
+ }
+ BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
- rc = bnx2x_check_firmware(bp);
- if (rc) {
- BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
- goto request_firmware_exit;
+ rc = request_firmware(&bp->firmware, fw_file_name,
+ &bp->pdev->dev);
+ if (rc) {
+ BNX2X_ERR("Can't load firmware file %s\n",
+ fw_file_name);
+ goto request_firmware_exit;
+ }
+
+ rc = bnx2x_check_firmware(bp);
+ if (rc) {
+ BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+ goto request_firmware_exit;
+ }
}
fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
@@ -10630,6 +10925,7 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
kfree(bp->init_ops);
kfree(bp->init_data);
release_firmware(bp->firmware);
+ bp->firmware = NULL;
}
@@ -10817,8 +11113,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
#ifdef BCM_CNIC
- /* disable FCOE L2 queue for E1x and E3*/
- if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+ /* disable FCOE L2 queue for E1x */
+ if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
#endif
@@ -10925,6 +11221,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
if (bp->doorbells)
iounmap(bp->doorbells);
+ bnx2x_release_firmware(bp);
+
bnx2x_free_mem_bp(bp);
free_netdev(dev);
@@ -11478,6 +11776,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
smp_mb__after_atomic_inc();
break;
}
+ case DRV_CTL_ULP_REGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ break;
+ }
+ case DRV_CTL_ULP_UNREGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ break;
+ }
default:
BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11553,7 +11883,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
mutex_lock(&bp->cnic_mutex);
cp->drv_state = 0;
- rcu_assign_pointer(bp->cnic_ops, NULL);
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
kfree(bp->cnic_kwq);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fc7bd0f23c0..44609de4e5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -160,8 +160,11 @@
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
/* [RW 10] Write client 0: Assert pause threshold. */
#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
-#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
-/* [R 24] The number of full blocks occupied by port. */
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE 0x60268
+/* [R 24] The number of full blocks occpied by port. */
#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
/* [RW 1] Reset the design by software. */
#define BRB1_REG_SOFT_RESET 0x600dc
@@ -1619,6 +1622,14 @@
register bits. */
#define MISC_REG_LCPLL_CTRL_1 0xa2a4
#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c
/* [RW 4] Interrupt mask register #0 read/write */
#define MISC_REG_MISC_INT_MASK 0xa388
/* [RW 1] Parity mask register #0 read/write */
@@ -1754,6 +1765,7 @@
* is compared to the value on ctrl_md_devad. Drives output
* misc_xgxs0_phy_addr. Global register. */
#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+#define MISC_REG_WC0_RESET 0xac30
/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
side. This should be less than or equal to phy_port_mode; if some of the
ports are not used. This enables reduction of frequency on the core side.
@@ -6823,11 +6835,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
-#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
-#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
-
-#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
-#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
@@ -6838,26 +6852,35 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
-#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
-#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
/* Mailbox command set used by 84833. */
-#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
+#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
+#define PHY84833_CMD_GET_EEE_MODE 0x8008
+#define PHY84833_CMD_SET_EEE_MODE 0x8009
/* Mailbox status set used by 84833. */
-#define PHY84833_CMD_RECEIVED 0x0001
-#define PHY84833_CMD_IN_PROGRESS 0x0002
-#define PHY84833_CMD_COMPLETE_PASS 0x0004
-#define PHY84833_CMD_COMPLETE_ERROR 0x0008
-#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
-#define PHY84833_CMD_SYSTEM_BOOT 0x0020
-#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
-#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
-#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
-
+#define PHY84833_STATUS_CMD_RECEIVED 0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
-/* 84833 F/W Feature Commands */
-#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
-#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
/* Warpcore clause 45 addressing */
#define MDIO_WC_DEVAD 0x3
@@ -6990,6 +7013,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0440425c83d..5ac616093f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,6 +30,8 @@
#define BNX2X_MAX_EMUL_MULTI 16
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/**** Exe Queue interfaces ****/
/**
@@ -441,6 +443,36 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
return true;
}
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *buf)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ u8 *next = buf;
+ int counter = 0;
+
+ /* traverse list */
+ list_for_each_entry(pos, &o->head, link) {
+ if (counter < n) {
+ /* place leading zeroes in buffer */
+ memset(next, 0, MAC_LEADING_ZERO_CNT);
+
+ /* place mac after leading zeroes*/
+ memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
+ ETH_ALEN);
+
+ /* calculate address of next element and
+ * advance counter
+ */
+ counter++;
+ next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
+ counter, next, pos->u.mac.mac);
+ }
+ }
+ return counter * ETH_ALEN;
+}
+
/* check_add() callbacks */
static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
@@ -1886,6 +1918,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
mac_obj->check_move = bnx2x_check_move;
mac_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ mac_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -3342,7 +3375,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
if (!list_empty(&o->registry.exact_match.macs))
return 0;
- elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+ elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
if (!elem) {
BNX2X_ERR("Failed to allocate registry memory\n");
return -ENOMEM;
@@ -5380,7 +5413,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
rc = drv->init_fw(bp);
if (rc) {
BNX2X_ERR("Error loading firmware\n");
- goto fw_init_err;
+ goto init_err;
}
/* Handle the beginning of COMMON_XXX pases separatelly... */
@@ -5388,25 +5421,25 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
rc = bnx2x_func_init_cmn_chip(bp, drv);
if (rc)
- goto init_hw_err;
+ goto init_err;
break;
case FW_MSG_CODE_DRV_LOAD_COMMON:
rc = bnx2x_func_init_cmn(bp, drv);
if (rc)
- goto init_hw_err;
+ goto init_err;
break;
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = bnx2x_func_init_port(bp, drv);
if (rc)
- goto init_hw_err;
+ goto init_err;
break;
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = bnx2x_func_init_func(bp, drv);
if (rc)
- goto init_hw_err;
+ goto init_err;
break;
default:
@@ -5414,10 +5447,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp,
rc = -EINVAL;
}
-init_hw_err:
- drv->release_fw(bp);
-
-fw_init_err:
+init_err:
drv->gunzip_end(bp);
/* In case of success, complete the comand immediatelly: no ramrods
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 9a517c2e9f1..992308ff82e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,19 @@ struct bnx2x_vlan_mac_obj {
/* RAMROD command to be used */
int ramrod_cmd;
+ /* copy first n elements onto preallocated buffer
+ *
+ * @param n number of elements to get
+ * @param buf buffer preallocated by caller into which elements
+ * will be copied. Note elements are 4-byte aligned
+ * so buffer size must be able to accomodate the
+ * aligned elements.
+ *
+ * @return number of copied bytes
+ */
+ int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *buf);
+
/**
* Checks if ADD-ramrod with the given params may be performed.
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 02ac6a771bf..bc0121ac291 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,6 +39,17 @@ static inline long bnx2x_hilo(u32 *hiref)
#endif
}
+static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+ u16 res = sizeof(struct host_port_stats) >> 2;
+
+ /* if PFC stats are not supported by the MFW, don't DMA them */
+ if (!(bp->flags & BC_SUPPORTS_PFC_STATS))
+ res -= (sizeof(u32)*4) >> 2;
+
+ return res;
+}
+
/*
* Init service functions
*/
@@ -178,7 +189,8 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
DMAE_LEN32_RD_MAX * 4);
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
DMAE_LEN32_RD_MAX * 4);
- dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -217,7 +229,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
@@ -540,6 +552,25 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ /* collect PFC stats */
+ DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
+ pstats->pfc_frames_tx_hi,
+ diff.lo, new->tx_stat_gtpp_lo,
+ pstats->pfc_frames_tx_lo);
+ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+ pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+ ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
+ pstats->pfc_frames_tx_lo, diff.lo);
+
+ DIFF_64(diff.hi, new->rx_stat_grpp_hi,
+ pstats->pfc_frames_rx_hi,
+ diff.lo, new->rx_stat_grpp_lo,
+ pstats->pfc_frames_rx_lo);
+ pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+ pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+ ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
+ pstats->pfc_frames_rx_lo, diff.lo);
}
estats->pause_frames_received_hi =
@@ -551,6 +582,15 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
}
static void bnx2x_mstat_stats_update(struct bnx2x *bp)
@@ -571,6 +611,11 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+ /* collect pfc stats */
+ ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+ pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+ ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+ pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
ADD_STAT64(stats_tx.tx_gt127,
@@ -628,6 +673,15 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
}
static void bnx2x_emac_stats_update(struct bnx2x *bp)
@@ -740,7 +794,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
- pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+ pstats->host_port_stats_counter++;
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
@@ -1265,7 +1319,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
if (bp->func_stx) {
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
@@ -1349,12 +1403,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
enum bnx2x_stats_state state;
if (unlikely(bp->panic))
return;
- bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
+ bnx2x_stats_stm[state][event].action(bp);
+
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
@@ -1380,7 +1436,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -1457,6 +1513,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
int i;
+ int first_queue_query_index;
struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
dma_addr_t cur_data_offset;
@@ -1512,14 +1569,40 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+ /**** FCoE FW statistics data ****/
+ if (!NO_FCOE(bp)) {
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+ cur_query_entry =
+ &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_FCOE;
+ /* For FCoE query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
+
/**** Clients' queries ****/
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
+ /* first queue query index depends whether FCoE offloaded request will
+ * be included in the ramrod
+ */
+ if (!NO_FCOE(bp))
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+ else
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
for_each_eth_queue(bp, i) {
cur_query_entry =
&bp->fw_stats_req->
- query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+ query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
@@ -1531,6 +1614,21 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
cur_data_offset += sizeof(struct per_queue_stats);
}
+
+ /* add FCoE queue query if needed */
+ if (!NO_FCOE(bp)) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[first_queue_query_index + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
}
void bnx2x_stats_init(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 5d8ce2f6afe..683deb05310 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -193,6 +193,12 @@ struct bnx2x_eth_stats {
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
+
+ /* PFC */
+ u32 pfc_frames_received_hi;
+ u32 pfc_frames_received_lo;
+ u32 pfc_frames_sent_hi;
+ u32 pfc_frames_sent_lo;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 6f10c693983..4bcb67eedf1 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -250,6 +250,21 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
return io->data;
}
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+
+ if (reg)
+ info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+ else
+ info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+
+ info.data.ulp_type = ulp_type;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
static int cnic_in_use(struct cnic_sock *csk)
{
return test_bit(SK_F_INUSE, &csk->flags);
@@ -506,7 +521,7 @@ int cnic_unregister_driver(int ulp_type)
}
read_unlock(&cnic_dev_lock);
- rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+ RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
mutex_unlock(&cnic_lock);
synchronize_rcu();
@@ -563,6 +578,8 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
mutex_unlock(&cnic_lock);
+ cnic_ulp_ctl(dev, ulp_type, true);
+
return 0;
}
@@ -579,7 +596,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
- rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+ RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
pr_err("%s: device not registered to this ulp type %d\n",
@@ -602,6 +619,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
+ cnic_ulp_ctl(dev, ulp_type, false);
+
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
@@ -3052,9 +3071,26 @@ static void cnic_ulp_start(struct cnic_dev *dev)
}
}
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_ulp_ops *ulp_ops;
+ int rc;
+
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ if (ulp_ops && ulp_ops->cnic_get_stats)
+ rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+ else
+ rc = -ENODEV;
+ mutex_unlock(&cnic_lock);
+ return rc;
+}
+
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
{
struct cnic_dev *dev = data;
+ int ulp_type = CNIC_ULP_ISCSI;
switch (info->cmd) {
case CNIC_CTL_STOP_CMD:
@@ -3100,6 +3136,15 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
}
break;
}
+ case CNIC_CTL_FCOE_STATS_GET_CMD:
+ ulp_type = CNIC_ULP_FCOE;
+ /* fall through */
+ case CNIC_CTL_ISCSI_STATS_GET_CMD:
+ cnic_hold(dev);
+ cnic_copy_ulp_stats(dev, ulp_type);
+ cnic_put(dev);
+ break;
+
default:
return -EINVAL;
}
@@ -3475,7 +3520,7 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
- ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+ fl6.daddr = dst_addr->sin6_addr;
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = dst_addr->sin6_scope_id;
@@ -5134,7 +5179,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
}
cnic_shutdown_rings(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
- rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+ RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
cnic_cm_shutdown(dev);
cp->stop_hw(dev);
@@ -5288,6 +5333,8 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 79443e0dbf9..d1f6456d22b 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -86,6 +86,8 @@ struct kcqe {
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define CNIC_CTL_STOP_ISCSI_CMD 4
+#define CNIC_CTL_FCOE_STATS_GET_CMD 5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD 6
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
@@ -96,6 +98,8 @@ struct kcqe {
#define DRV_CTL_STOP_L2_CMD 0x107
#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
+#define DRV_CTL_ULP_REGISTER_CMD 0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f
struct cnic_ctl_completion {
u32 cid;
@@ -133,6 +137,7 @@ struct drv_ctl_info {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
+ int ulp_type;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -201,6 +206,7 @@ struct cnic_eth_dev {
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
+ union drv_info_to_mcp *addr_drv_info_to_mcp;
};
struct cnic_sockaddr {
@@ -297,6 +303,8 @@ struct cnic_dev {
int max_fcoe_conn;
int max_rdma_conn;
+ union drv_info_to_mcp *stats_addr;
+
void *cnic_priv;
};
@@ -326,6 +334,7 @@ struct cnic_ulp_ops {
void (*cm_remote_abort)(struct cnic_sock *);
int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
char *data, u16 data_size);
+ int (*cnic_get_stats)(void *ulp_ctx);
struct module *owner;
atomic_t ref_count;
};
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 0a1d7f279fc..8fa7abc53ec 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -163,7 +163,6 @@ enum sbmac_state {
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256
-#define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */
@@ -266,7 +265,7 @@ struct sbmac_softc {
int sbm_pause; /* current pause setting */
int sbm_link; /* current link state */
- unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
+ unsigned char sbm_hwaddr[ETH_ALEN];
struct sbmacdma sbm_txdma; /* only channel 0 for now */
struct sbmacdma sbm_rxdma;
@@ -2676,15 +2675,4 @@ static struct platform_driver sbmac_driver = {
},
};
-static int __init sbmac_init_module(void)
-{
- return platform_driver_register(&sbmac_driver);
-}
-
-static void __exit sbmac_cleanup_module(void)
-{
- platform_driver_unregister(&sbmac_driver);
-}
-
-module_init(sbmac_init_module);
-module_exit(sbmac_cleanup_module);
+module_platform_driver(sbmac_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bf4074167d6..076e02a415a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 121
+#define TG3_MIN_NUM 122
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "November 2, 2011"
+#define DRV_MODULE_RELDATE "December 7, 2011"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
-#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -194,12 +193,13 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#if (NET_IP_ALIGN != 0)
#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
#else
-#define TG3_RX_OFFSET(tp) 0
+#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
#endif
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
-#define TG3_TX_BD_DMA_MAX 4096
+#define TG3_TX_BD_DMA_MAX_2K 2048
+#define TG3_TX_BD_DMA_MAX_4K 4096
#define TG3_RAW_IP_ALIGN 2
@@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp)
}
}
-static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
-{
- u16 miireg;
-
- if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
- miireg = ADVERTISE_PAUSE_CAP;
- else if (flow_ctrl & FLOW_CTRL_TX)
- miireg = ADVERTISE_PAUSE_ASYM;
- else if (flow_ctrl & FLOW_CTRL_RX)
- miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- else
- miireg = 0;
-
- return miireg;
-}
-
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
{
u16 miireg;
@@ -1706,18 +1690,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
- if (lcladv & ADVERTISE_1000XPAUSE) {
- if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- else if (rmtadv & LPA_1000XPAUSE_ASYM)
- cap = FLOW_CTRL_RX;
- } else {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- }
- } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+ if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+ if (lcladv & ADVERTISE_1000XPAUSE)
+ cap = FLOW_CTRL_RX;
+ if (rmtadv & ADVERTISE_1000XPAUSE)
cap = FLOW_CTRL_TX;
}
@@ -1792,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev)
if (phydev->duplex == DUPLEX_HALF)
mac_mode |= MAC_MODE_HALF_DUPLEX;
else {
- lcl_adv = tg3_advert_flowctrl_1000T(
+ lcl_adv = mii_advertise_flowctrl(
tp->link_config.flowctrl);
if (phydev->pause)
@@ -2160,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
if (tp->link_config.active_speed == SPEED_1000 &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+ tg3_flag(tp, 57765_CLASS)) &&
!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
MII_TG3_DSP_TAP26_RMRXSTO;
@@ -2679,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
- if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -3594,37 +3571,24 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
u32 val, new_adv;
new_adv = ADVERTISE_CSMA;
- if (advertise & ADVERTISED_10baseT_Half)
- new_adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- new_adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- new_adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- new_adv |= ADVERTISE_100FULL;
-
- new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+ new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
+ new_adv |= mii_advertise_flowctrl(flowctrl);
err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
if (err)
goto done;
- if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
- goto done;
-
- new_adv = 0;
- if (advertise & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000HALF;
- if (advertise & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000FULL;
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
- new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
- err = tg3_writephy(tp, MII_CTRL1000, new_adv);
- if (err)
- goto done;
+ err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+ if (err)
+ goto done;
+ }
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
goto done;
@@ -3650,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
case ASIC_REV_5717:
case ASIC_REV_57765:
+ case ASIC_REV_57766:
case ASIC_REV_5719:
/* If we advertised any eee advertisements above... */
if (val)
@@ -3786,76 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
return err;
}
-static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
{
- u32 adv_reg, all_mask = 0;
+ u32 advmsk, tgtadv, advertising;
- if (mask & ADVERTISED_10baseT_Half)
- all_mask |= ADVERTISE_10HALF;
- if (mask & ADVERTISED_10baseT_Full)
- all_mask |= ADVERTISE_10FULL;
- if (mask & ADVERTISED_100baseT_Half)
- all_mask |= ADVERTISE_100HALF;
- if (mask & ADVERTISED_100baseT_Full)
- all_mask |= ADVERTISE_100FULL;
+ advertising = tp->link_config.advertising;
+ tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
- if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
- return 0;
+ advmsk = ADVERTISE_ALL;
+ if (tp->link_config.active_duplex == DUPLEX_FULL) {
+ tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+ advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ }
- if ((adv_reg & ADVERTISE_ALL) != all_mask)
- return 0;
+ if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+ return false;
+
+ if ((*lcladv & advmsk) != tgtadv)
+ return false;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
u32 tg3_ctrl;
- all_mask = 0;
- if (mask & ADVERTISED_1000baseT_Half)
- all_mask |= ADVERTISE_1000HALF;
- if (mask & ADVERTISED_1000baseT_Full)
- all_mask |= ADVERTISE_1000FULL;
+ tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
- return 0;
+ return false;
tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
- if (tg3_ctrl != all_mask)
- return 0;
+ if (tg3_ctrl != tgtadv)
+ return false;
}
- return 1;
+ return true;
}
-static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
{
- u32 curadv, reqadv;
+ u32 lpeth = 0;
- if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
- return 1;
-
- curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ u32 val;
- if (tp->link_config.active_duplex == DUPLEX_FULL) {
- if (curadv != reqadv)
- return 0;
+ if (tg3_readphy(tp, MII_STAT1000, &val))
+ return false;
- if (tg3_flag(tp, PAUSE_AUTONEG))
- tg3_readphy(tp, MII_LPA, rmtadv);
- } else {
- /* Reprogram the advertisement register, even if it
- * does not affect the current link. If the link
- * gets renegotiated in the future, we can save an
- * additional renegotiation cycle by advertising
- * it correctly in the first place.
- */
- if (curadv != reqadv) {
- *lcladv &= ~(ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
- tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
- }
+ lpeth = mii_stat1000_to_ethtool_lpa_t(val);
}
- return 1;
+ if (tg3_readphy(tp, MII_LPA, rmtadv))
+ return false;
+
+ lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+ tp->link_config.rmt_adv = lpeth;
+
+ return true;
}
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
@@ -3961,6 +3911,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+ tp->link_config.rmt_adv = 0;
if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
err = tg3_phy_auxctl_read(tp,
@@ -4016,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
if ((bmcr & BMCR_ANENABLE) &&
- tg3_copper_is_advertising_all(tp,
- tp->link_config.advertising)) {
- if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
- &rmt_adv))
- current_link_up = 1;
- }
+ tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+ tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+ current_link_up = 1;
} else {
if (!(bmcr & BMCR_ANENABLE) &&
tp->link_config.speed == current_speed &&
@@ -4033,8 +3982,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
if (current_link_up == 1 &&
- tp->link_config.active_duplex == DUPLEX_FULL)
+ tp->link_config.active_duplex == DUPLEX_FULL) {
+ u32 reg, bit;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ reg = MII_TG3_FET_GEN_STAT;
+ bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+ } else {
+ reg = MII_TG3_EXT_STAT;
+ bit = MII_TG3_EXT_STAT_MDIX;
+ }
+
+ if (!tg3_readphy(tp, reg, &val) && (val & bit))
+ tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ }
}
relink:
@@ -4643,6 +4606,9 @@ restart_autoneg:
if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
+
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
tp->serdes_counter = 0;
@@ -4714,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
if (rxflags & MR_LP_ADV_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
+
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
@@ -4796,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
udelay(40);
current_link_up = 0;
+ tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
if (tg3_flag(tp, HW_AUTONEG))
@@ -4887,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->link_config.rmt_adv = 0;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -4903,23 +4874,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
(tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
/* do nothing, just check for link up at the end */
} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
- u32 adv, new_adv;
+ u32 adv, newadv;
err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
- new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
- ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM |
- ADVERTISE_SLCT);
+ newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+ ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM |
+ ADVERTISE_SLCT);
- new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000XHALF;
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000XFULL;
-
- if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
- tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+ tg3_writephy(tp, MII_ADVERTISE, newadv);
bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
tg3_writephy(tp, MII_BMCR, bmcr);
@@ -4997,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
+
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
} else if (!tg3_flag(tp, 5780_CLASS)) {
/* Link is up via parallel detect */
} else {
@@ -5320,6 +5290,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
u32 sw_idx = tnapi->tx_cons;
struct netdev_queue *txq;
int index = tnapi - tp->napi;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
if (tg3_flag(tp, ENABLE_TSS))
index--;
@@ -5370,6 +5341,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
sw_idx = NEXT_TX(sw_idx);
}
+ pkts_compl++;
+ bytes_compl += skb->len;
+
dev_kfree_skb(skb);
if (unlikely(tx_bug)) {
@@ -5378,6 +5352,8 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
+ netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+
tnapi->tx_cons = sw_idx;
/* Need to make the tx_cons update visible to tg3_start_xmit()
@@ -5397,15 +5373,15 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
-static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
{
- if (!ri->skb)
+ if (!ri->data)
return;
pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(ri->skb);
- ri->skb = NULL;
+ kfree(ri->data);
+ ri->data = NULL;
}
/* Returns size of skb allocated or < 0 on error.
@@ -5419,28 +5395,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* buffers the cpu only reads the last cacheline of the RX descriptor
* (to fetch the error flags, vlan tag, checksum, and opaque cookie).
*/
-static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
u32 opaque_key, u32 dest_idx_unmasked)
{
struct tg3_rx_buffer_desc *desc;
struct ring_info *map;
- struct sk_buff *skb;
+ u8 *data;
dma_addr_t mapping;
- int skb_size, dest_idx;
+ int skb_size, data_size, dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
desc = &tpr->rx_std[dest_idx];
map = &tpr->rx_std_buffers[dest_idx];
- skb_size = tp->rx_pkt_map_sz;
+ data_size = tp->rx_pkt_map_sz;
break;
case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
desc = &tpr->rx_jmb[dest_idx].std;
map = &tpr->rx_jmb_buffers[dest_idx];
- skb_size = TG3_RX_JMB_MAP_SZ;
+ data_size = TG3_RX_JMB_MAP_SZ;
break;
default:
@@ -5453,31 +5429,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
- if (skb == NULL)
+ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = kmalloc(skb_size, GFP_ATOMIC);
+ if (!data)
return -ENOMEM;
- skb_reserve(skb, TG3_RX_OFFSET(tp));
-
- mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+ mapping = pci_map_single(tp->pdev,
+ data + TG3_RX_OFFSET(tp),
+ data_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(tp->pdev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- map->skb = skb;
+ map->data = data;
dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
- return skb_size;
+ return data_size;
}
/* We only need to move over in the address because the other
* members of the RX descriptor are invariant. See notes above
- * tg3_alloc_rx_skb for full details.
+ * tg3_alloc_rx_data for full details.
*/
static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3_rx_prodring_set *dpr,
@@ -5511,7 +5489,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
return;
}
- dest_map->skb = src_map->skb;
+ dest_map->data = src_map->data;
dma_unmap_addr_set(dest_map, mapping,
dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
@@ -5522,7 +5500,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
*/
smp_wmb();
- src_map->skb = NULL;
+ src_map->data = NULL;
}
/* The RX ring scheme is composed of multiple rings which post fresh
@@ -5576,19 +5554,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ u8 *data;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &jmb_prod_idx;
} else
goto next_pkt_nopost;
@@ -5606,13 +5585,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ prefetch(data + TG3_RX_OFFSET(tp));
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
- skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+ skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
*post_ptr);
if (skb_size < 0)
goto drop_it;
@@ -5620,35 +5600,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
- /* Ensure that the update to the skb happens
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto drop_it_no_recycle;
+ }
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
+ /* Ensure that the update to the data happens
* after the usage of the old DMA mapping.
*/
smp_wmb();
- ri->skb = NULL;
+ ri->data = NULL;
- skb_put(skb, len);
} else {
- struct sk_buff *copy_skb;
-
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len +
- TG3_RAW_IP_ALIGN);
- if (copy_skb == NULL)
+ skb = netdev_alloc_skb(tp->dev,
+ len + TG3_RAW_IP_ALIGN);
+ if (skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
- skb_put(copy_skb, len);
+ skb_reserve(skb, TG3_RAW_IP_ALIGN);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
- skb_copy_from_linear_data(skb, copy_skb->data, len);
+ memcpy(skb->data,
+ data + TG3_RX_OFFSET(tp),
+ len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
- /* We'll reuse the original ring buffer. */
- skb = copy_skb;
}
+ skb_put(skb, len);
if ((tp->dev->features & NETIF_F_RXCSUM) &&
(desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
(((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -5787,7 +5769,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_std_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_std_buffers[i].skb) {
+ if (dpr->rx_std_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -5845,7 +5827,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_jmb_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_jmb_buffers[i].skb) {
+ if (dpr->rx_jmb_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -6443,25 +6425,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
bool hwbug = false;
if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
- hwbug = 1;
+ hwbug = true;
if (tg3_4g_overflow_test(map, len))
- hwbug = 1;
+ hwbug = true;
if (tg3_40bit_overflow_test(tp, map, len))
- hwbug = 1;
+ hwbug = true;
- if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ if (tp->dma_limit) {
u32 prvidx = *entry;
u32 tmp_flag = flags & ~TXD_FLAG_END;
- while (len > TG3_TX_BD_DMA_MAX && *budget) {
- u32 frag_len = TG3_TX_BD_DMA_MAX;
- len -= TG3_TX_BD_DMA_MAX;
+ while (len > tp->dma_limit && *budget) {
+ u32 frag_len = tp->dma_limit;
+ len -= tp->dma_limit;
/* Avoid the 8byte DMA problem */
if (len <= 8) {
- len += TG3_TX_BD_DMA_MAX / 2;
- frag_len = TG3_TX_BD_DMA_MAX / 2;
+ len += tp->dma_limit / 2;
+ frag_len = tp->dma_limit / 2;
}
tnapi->tx_buffers[*entry].fragmented = true;
@@ -6482,7 +6464,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
*budget -= 1;
*entry = NEXT_TX(*entry);
} else {
- hwbug = 1;
+ hwbug = true;
tnapi->tx_buffers[prvidx].fragmented = false;
}
}
@@ -6816,6 +6798,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ netdev_sent_queue(tp->dev, skb->len);
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6968,7 +6951,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
return 0;
}
-static void tg3_set_loopback(struct net_device *dev, u32 features)
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -6994,7 +6977,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
}
}
-static u32 tg3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7004,9 +6988,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int tg3_set_features(struct net_device *dev, u32 features)
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
tg3_set_loopback(dev, features);
@@ -7082,14 +7066,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
if (tpr != &tp->napi[0].prodring) {
for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
i = (i + 1) & tp->rx_std_ring_mask)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE)) {
for (i = tpr->rx_jmb_cons_idx;
i != tpr->rx_jmb_prod_idx;
i = (i + 1) & tp->rx_jmb_ring_mask) {
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7098,12 +7082,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
for (i = 0; i <= tp->rx_std_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7159,7 +7143,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX standard ring. Only "
"%d out of %d buffers were allocated "
@@ -7191,7 +7175,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
}
for (i = 0; i < tp->rx_jumbo_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX jumbo ring. Only %d "
"out of %d buffers were allocated "
@@ -7297,6 +7281,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
}
+ netdev_reset_queue(tp->dev);
}
/* Initialize tx/rx rings for packet processing.
@@ -7591,8 +7576,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
if (tnapi->hw_status)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
}
- if (tp->hw_stats)
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
return err;
}
@@ -7626,15 +7609,11 @@ static void tg3_restore_pci_state(struct tg3 *tp)
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
- if (tg3_flag(tp, PCI_EXPRESS))
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
- else {
- pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
- tp->pci_cacheline_sz);
- pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
- tp->pci_lat_timer);
- }
+ if (!tg3_flag(tp, PCI_EXPRESS)) {
+ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ tp->pci_cacheline_sz);
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
}
/* Make sure PCI-X relaxed ordering bit is clear. */
@@ -7819,8 +7798,6 @@ static int tg3_chip_reset(struct tg3 *tp)
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
val16);
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-
/* Clear error status */
pci_write_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
@@ -7914,6 +7891,11 @@ static int tg3_chip_reset(struct tg3 *tp)
return 0;
}
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+ struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
+ struct tg3_ethtool_stats *);
+
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, int silent)
{
@@ -7931,6 +7913,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
tg3_write_sig_legacy(tp, kind);
tg3_write_sig_post_reset(tp, kind);
+ if (tp->hw_stats) {
+ /* Save the stats across chip resets... */
+ tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+ tg3_get_estats(tp, &tp->estats_prev);
+
+ /* And make sure the next sample is new data */
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+ }
+
if (err)
return err;
@@ -8074,7 +8065,7 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
else
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8091,7 +8082,7 @@ static void tg3_rings_reset(struct tg3 *tp)
else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
else
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -8197,7 +8188,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -8217,10 +8209,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
return;
- if (!tg3_flag(tp, 5705_PLUS))
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
- else
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
@@ -8231,6 +8220,54 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
}
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ tp->rss_ind_tbl[i] =
+ ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+}
+
+static void tg3_rss_check_indir_tbl(struct tg3 *tp)
+{
+ int i;
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return;
+
+ if (tp->irq_cnt <= 2) {
+ memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+ return;
+ }
+
+ /* Validate table against current IRQ count */
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+ if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+ break;
+ }
+
+ if (i != TG3_RSS_INDIR_TBL_SIZE)
+ tg3_rss_init_dflt_indir_tbl(tp);
+}
+
+static void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+ int i = 0;
+ u32 reg = MAC_RSS_INDIR_TBL_0;
+
+ while (i < TG3_RSS_INDIR_TBL_SIZE) {
+ u32 val = tp->rss_ind_tbl[i];
+ i++;
+ for (; i % 8; i++) {
+ val <<= 4;
+ val |= tp->rss_ind_tbl[i];
+ }
+ tw32(reg, val);
+ reg += 4;
+ }
+}
+
/* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
@@ -8337,7 +8374,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tg3_flag(tp, 57765_CLASS)) {
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
u32 grc_mode = tr32(GRC_MODE);
@@ -8425,7 +8462,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+ if (!tg3_flag(tp, 57765_CLASS) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -8572,7 +8609,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
val | BDINFO_FLAGS_USE_EXT_RECV);
if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -8581,10 +8618,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
- val = TG3_RX_STD_MAX_SIZE_5700;
- else
- val = TG3_RX_STD_MAX_SIZE_5717;
+ val = TG3_RX_STD_RING_SIZE(tp);
val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
val |= (TG3_RX_STD_DMA_SZ << 2);
} else
@@ -8661,6 +8695,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
@@ -8924,28 +8961,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(100);
if (tg3_flag(tp, ENABLE_RSS)) {
- int i = 0;
- u32 reg = MAC_RSS_INDIR_TBL_0;
-
- if (tp->irq_cnt == 2) {
- for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
- tw32(reg, 0x0);
- reg += 4;
- }
- } else {
- u32 val;
-
- while (i < TG3_RSS_INDIR_TBL_SIZE) {
- val = i % (tp->irq_cnt - 1);
- i++;
- for (; i % 8; i++) {
- val <<= 4;
- val |= (i % (tp->irq_cnt - 1));
- }
- tw32(reg, val);
- reg += 4;
- }
- }
+ tg3_rss_write_indir_tbl(tp);
/* Setup the "secret" hash key. */
tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
@@ -9002,7 +9018,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Prevent chip from dropping frames when flow control
* is enabled.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ if (tg3_flag(tp, 57765_CLASS))
val = 1;
else
val = 2;
@@ -9217,7 +9233,7 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
if (!tg3_flag(tp, TAGGED_STATUS)) {
@@ -9669,6 +9685,8 @@ static int tg3_open(struct net_device *dev)
*/
tg3_ints_init(tp);
+ tg3_rss_check_indir_tbl(tp);
+
/* The placement of this call is tied
* to the setup and use of Host TX descriptors.
*/
@@ -9700,8 +9718,8 @@ static int tg3_open(struct net_device *dev)
tg3_free_rings(tp);
} else {
if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ !tg3_flag(tp, 57765_CLASS))
tp->timer_offset = HZ;
else
tp->timer_offset = HZ / 10;
@@ -9782,10 +9800,6 @@ err_out1:
return err;
}
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
- struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
-
static int tg3_close(struct net_device *dev)
{
int i;
@@ -9817,10 +9831,9 @@ static int tg3_close(struct net_device *dev)
tg3_ints_fini(tp);
- tg3_get_stats64(tp->dev, &tp->net_stats_prev);
-
- memcpy(&tp->estats_prev, tg3_get_estats(tp),
- sizeof(tp->estats_prev));
+ /* Clear stats across close / open calls */
+ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+ memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
tg3_napi_fini(tp);
@@ -9868,9 +9881,9 @@ static u64 calc_crc_errors(struct tg3 *tp)
estats->member = old_estats->member + \
get_stat64(&hw_stats->member)
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
+ struct tg3_ethtool_stats *estats)
{
- struct tg3_ethtool_stats *estats = &tp->estats;
struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
@@ -10318,12 +10331,20 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising |= ADVERTISED_Asym_Pause;
}
}
- if (netif_running(dev)) {
+ if (netif_running(dev) && netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
+ cmd->lp_advertising = tp->link_config.rmt_adv;
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
} else {
ethtool_cmd_speed_set(cmd, SPEED_INVALID);
cmd->duplex = DUPLEX_INVALID;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
}
cmd->phy_address = tp->phy_addr;
cmd->transceiver = XCVR_INTERNAL;
@@ -10428,10 +10449,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->fw_version, tp->fw_ver);
- strcpy(info->bus_info, pci_name(tp->pdev));
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -10590,12 +10611,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
- if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+ if (tp->link_config.flowctrl & FLOW_CTRL_RX)
epause->rx_pause = 1;
else
epause->rx_pause = 0;
- if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+ if (tp->link_config.flowctrl & FLOW_CTRL_TX)
epause->tx_pause = 1;
else
epause->tx_pause = 0;
@@ -10715,6 +10736,78 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
}
}
+static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (netif_running(tp->dev))
+ info->data = tp->irq_cnt;
+ else {
+ info->data = num_online_cpus();
+ if (info->data > TG3_IRQ_MAX_VECS_RSS)
+ info->data = TG3_IRQ_MAX_VECS_RSS;
+ }
+
+ /* The first interrupt vector only
+ * handles link interrupts.
+ */
+ info->data -= 1;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
+{
+ u32 size = 0;
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tg3_flag(tp, SUPPORT_MSIX))
+ size = TG3_RSS_INDIR_TBL_SIZE;
+
+ return size;
+}
+
+static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ indir[i] = tp->rss_ind_tbl[i];
+
+ return 0;
+}
+
+static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ size_t i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ tp->rss_ind_tbl[i] = indir[i];
+
+ if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
+ return 0;
+
+ /* It is legal to write the indirection
+ * table while the device is running.
+ */
+ tg3_full_lock(tp, 0);
+ tg3_rss_write_indir_tbl(tp);
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
@@ -10769,7 +10862,8 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct tg3 *tp = netdev_priv(dev);
- memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+
+ tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
}
static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11352,7 +11446,7 @@ static int tg3_test_memory(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (tg3_flag(tp, 57765_CLASS))
mem_tbl = mem_tbl_57765;
else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
@@ -11400,8 +11494,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
u32 budget;
- struct sk_buff *skb, *rx_skb;
- u8 *tx_data;
+ struct sk_buff *skb;
+ u8 *tx_data, *rx_data;
dma_addr_t map;
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
@@ -11569,11 +11663,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
}
if (opaque_key == RXD_OPAQUE_RING_STD) {
- rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ rx_data = tpr->rx_std_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
mapping);
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
- rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+ rx_data = tpr->rx_jmb_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
mapping);
} else
@@ -11582,15 +11676,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
PCI_DMA_FROMDEVICE);
+ rx_data += TG3_RX_OFFSET(tp);
for (i = data_off; i < rx_len; i++, val++) {
- if (*(rx_skb->data + i) != (u8) (val & 0xff))
+ if (*(rx_data + i) != (u8) (val & 0xff))
goto out;
}
}
err = 0;
- /* tg3_free_rings will unmap and free the rx_skb */
+ /* tg3_free_rings will unmap and free the rx_data */
out:
return err;
}
@@ -11943,6 +12038,10 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
.get_sset_count = tg3_get_sset_count,
+ .get_rxnfc = tg3_get_rxnfc,
+ .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
+ .get_rxfh_indir = tg3_get_rxfh_indir,
+ .set_rxfh_indir = tg3_set_rxfh_indir,
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -12612,7 +12711,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tg3_get_57780_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -13218,8 +13317,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
{
- u32 adv = ADVERTISED_Autoneg |
- ADVERTISED_Pause;
+ u32 adv = ADVERTISED_Autoneg;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
adv |= ADVERTISED_1000baseT_Half |
@@ -13322,7 +13420,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
!tg3_flag(tp, ENABLE_APE) &&
!tg3_flag(tp, ENABLE_ASF)) {
- u32 bmsr, mask;
+ u32 bmsr, dummy;
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -13335,10 +13433,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
tg3_phy_set_wirespeed(tp);
- mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
- if (!tg3_copper_is_advertising_all(tp, mask)) {
+ if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
tp->link_config.flowctrl);
@@ -13460,6 +13555,17 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+ strcpy(tp->board_part_number, "BCM57762");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+ strcpy(tp->board_part_number, "BCM57766");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+ strcpy(tp->board_part_number, "BCM57782");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+ strcpy(tp->board_part_number, "BCM57786");
+ else
+ goto nomatch;
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
@@ -13798,7 +13904,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
pci_read_config_dword(tp->pdev,
TG3PCI_GEN15_PRODID_ASICREV,
&prod_id_asic_rev);
@@ -13945,7 +14055,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, 5717_PLUS);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- tg3_flag(tp, 5717_PLUS))
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tg3_flag_set(tp, 57765_CLASS);
+
+ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
@@ -13997,9 +14110,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+ tp->fw_needed) {
+ /* For firmware TSO, assume ASF is disabled.
+ * We'll disable TSO later if we discover ASF
+ * is enabled in tg3_get_eeprom_hw_cfg().
+ */
tg3_flag_set(tp, TSO_CAPABLE);
- else {
+ } else {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -14027,6 +14144,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, 57765_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSIX);
tp->irq_max = TG3_IRQ_MAX_VECS;
+ tg3_rss_init_dflt_indir_tbl(tp);
}
}
@@ -14034,9 +14152,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, SHORT_DMA_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
- tg3_flag_set(tp, 4K_FIFO_LIMIT);
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
- if (tg3_flag(tp, 5717_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
@@ -14056,12 +14178,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, PCI_EXPRESS);
- tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
- tp->pcie_readrq = 2048;
-
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ int readrq = pcie_get_readrq(tp->pdev);
+ if (readrq > 2048)
+ pcie_set_readrq(tp->pdev, 2048);
+ }
pci_read_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
@@ -14273,6 +14394,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
*/
tg3_get_eeprom_hw_cfg(tp);
+ if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+ }
+
if (tg3_flag(tp, ENABLE_APE)) {
/* Allow reads and writes to the
* APE register and memory space.
@@ -14311,7 +14438,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14548,11 +14675,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tg3_flag_clear(tp, POLL_SERDES);
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
- tp->rx_offset = 0;
+ tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
#endif
@@ -15313,7 +15440,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
u32 sndmbx, rcvmbx, intmbx;
char str[40];
u64 dma_mask, persist_dma_mask;
- u32 features = 0;
+ netdev_features_t features = 0;
printk_once(KERN_INFO "%s\n", version);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 94b4bd049a3..aea8f72c24f 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -31,6 +31,8 @@
#define TG3_RX_RET_MAX_SIZE_5705 512
#define TG3_RX_RET_MAX_SIZE_5717 4096
+#define TG3_RSS_INDIR_TBL_SIZE 128
+
/* First 256 bytes are a mirror of PCI config space. */
#define TG3PCI_VENDOR 0x00000000
#define TG3PCI_VENDOR_BROADCOM 0x14e4
@@ -57,6 +59,10 @@
#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
#define TG3PCI_DEVICE_TIGON3_5719 0x1657
#define TG3PCI_DEVICE_TIGON3_5720 0x165f
+#define TG3PCI_DEVICE_TIGON3_57762 0x1682
+#define TG3PCI_DEVICE_TIGON3_57766 0x1686
+#define TG3PCI_DEVICE_TIGON3_57786 0x16b3
+#define TG3PCI_DEVICE_TIGON3_57782 0x16b7
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -168,6 +174,7 @@
#define ASIC_REV_57765 0x57785
#define ASIC_REV_5719 0x5719
#define ASIC_REV_5720 0x5720
+#define ASIC_REV_57766 0x57766
#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
@@ -1340,6 +1347,7 @@
#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_JMB_2K_MMRR 0x00800000
#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
@@ -2174,6 +2182,7 @@
#define MII_TG3_EXT_CTRL_TBI 0x8000
#define MII_TG3_EXT_STAT 0x11 /* Extended status register */
+#define MII_TG3_EXT_STAT_MDIX 0x2000
#define MII_TG3_EXT_STAT_LPASS 0x0100
#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */
@@ -2277,6 +2286,9 @@
#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+#define MII_TG3_FET_GEN_STAT 0x1c
+#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000
+
#define MII_TG3_FET_TEST 0x1f
#define MII_TG3_FET_SHADOW_EN 0x0080
@@ -2662,9 +2674,13 @@ struct tg3_hw_stats {
/* 'mapping' is superfluous as the chip does not write into
* the tx/rx post rings so we could just fetch it from there.
* But the cache behavior is better how we are doing it now.
+ *
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
*/
struct ring_info {
- struct sk_buff *skb;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -2690,6 +2706,7 @@ struct tg3_link_config {
#define DUPLEX_INVALID 0xff
#define AUTONEG_INVALID 0xff
u16 active_speed;
+ u32 rmt_adv;
/* When we go in and out of low power mode we need
* to swap with this state.
@@ -2865,6 +2882,8 @@ enum TG3_FLAGS {
TG3_FLAG_NVRAM_BUFFERED,
TG3_FLAG_SUPPORT_MSI,
TG3_FLAG_SUPPORT_MSIX,
+ TG3_FLAG_USING_MSI,
+ TG3_FLAG_USING_MSIX,
TG3_FLAG_PCIX_MODE,
TG3_FLAG_PCI_HIGH_SPEED,
TG3_FLAG_PCI_32BIT,
@@ -2880,7 +2899,6 @@ enum TG3_FLAGS {
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
TG3_FLAG_TSO_BUG,
- TG3_FLAG_IS_5788,
TG3_FLAG_MAX_RXPEND_64,
TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
@@ -2889,14 +2907,9 @@ enum TG3_FLAGS {
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
TG3_FLAG_HW_TSO_1,
- TG3_FLAG_5705_PLUS,
- TG3_FLAG_5750_PLUS,
+ TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
- TG3_FLAG_USING_MSI,
- TG3_FLAG_USING_MSIX,
TG3_FLAG_ICH_WORKAROUND,
- TG3_FLAG_5780_CLASS,
- TG3_FLAG_HW_TSO_2,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
TG3_FLAG_NO_NVRAM_ADDR_TRANS,
@@ -2910,18 +2923,23 @@ enum TG3_FLAGS {
TG3_FLAG_RGMII_EXT_IBND_RX_EN,
TG3_FLAG_RGMII_EXT_IBND_TX_EN,
TG3_FLAG_CLKREQ_BUG,
- TG3_FLAG_5755_PLUS,
TG3_FLAG_NO_NVRAM,
TG3_FLAG_ENABLE_RSS,
TG3_FLAG_ENABLE_TSS,
TG3_FLAG_SHORT_DMA_BUG,
TG3_FLAG_USE_JUMBO_BDFLAG,
TG3_FLAG_L1PLLPD_EN,
- TG3_FLAG_57765_PLUS,
TG3_FLAG_APE_HAS_NCSI,
- TG3_FLAG_5717_PLUS,
TG3_FLAG_4K_FIFO_LIMIT,
TG3_FLAG_RESET_TASK_PENDING,
+ TG3_FLAG_5705_PLUS,
+ TG3_FLAG_IS_5788,
+ TG3_FLAG_5750_PLUS,
+ TG3_FLAG_5780_CLASS,
+ TG3_FLAG_5755_PLUS,
+ TG3_FLAG_57765_PLUS,
+ TG3_FLAG_57765_CLASS,
+ TG3_FLAG_5717_PLUS,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -2985,6 +3003,7 @@ struct tg3 {
/* begin "tx thread" cacheline section */
void (*write32_tx_mbox) (struct tg3 *, u32,
u32);
+ u32 dma_limit;
/* begin "rx thread" cacheline section */
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
@@ -3005,7 +3024,6 @@ struct tg3 {
unsigned long rx_dropped;
unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
- struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
@@ -3131,10 +3149,12 @@ struct tg3 {
#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
#define TG3_PHYFLG_EEE_CAP 0x00040000
+#define TG3_PHYFLG_MDIX_STATE 0x00200000
u32 led_ctrl;
u32 phy_otp;
u32 setlpicnt;
+ u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
#define TG3_BPN_SIZE 24
char board_part_number[TG3_BPN_SIZE];
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 74d3abca196..6027302ae73 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BNA) += bna.o
-bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 8e627186507..29f284f79e0 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -185,6 +185,41 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
+ * bfa_cee_get_attr()
+ *
+ * @brief Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+enum bfa_status
+bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req *cmd;
+
+ BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+ if (!bfa_nw_ioc_is_operational(cee->ioc))
+ return BFA_STATUS_IOC_FAILURE;
+
+ if (cee->get_attr_pending == true)
+ return BFA_STATUS_DEVBUSY;
+
+ cee->get_attr_pending = true;
+ cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+ cee->attr = attr;
+ cee->cbfn.get_attr_cbfn = cbfn;
+ cee->cbfn.get_attr_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+ bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);
+
+ return BFA_STATUS_OK;
+}
+
+/**
* bfa_cee_isrs()
*
* @brief Handles Mail-box interrupts for CEE module.
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
index 58d54e98d59..93fde633d6f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
@@ -59,5 +59,7 @@ u32 bfa_nw_cee_meminfo(void);
void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
u64 dma_pa);
void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
-
+enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee,
+ struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 2f12d68021d..871c6309334 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -219,41 +219,39 @@ enum {
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_block {
- u8 version; /*!< manufacturing block version */
- u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
- u16 mfgsize; /*!< mfg block size */
- u16 u16_chksum; /*!< old u16 checksum */
- char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
- char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
- u8 mfg_day; /*!< manufacturing day */
- u8 mfg_month; /*!< manufacturing month */
- u16 mfg_year; /*!< manufacturing year */
- u64 mfg_wwn; /*!< wwn base for this adapter */
- u8 num_wwn; /*!< number of wwns assigned */
- u8 mfg_speeds; /*!< speeds allowed for this adapter */
- u8 rsv[2];
- char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
- char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
- char
- supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
- char
- supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
- mac_t mfg_mac; /*!< mac address */
- u8 num_mac; /*!< number of mac addresses */
- u8 rsv2;
- u32 card_type; /*!< card type */
- char cap_nic; /*!< capability nic */
- char cap_cna; /*!< capability cna */
- char cap_hba; /*!< capability hba */
- char cap_fc16g; /*!< capability fc 16g */
- char cap_sriov; /*!< capability sriov */
- char cap_mezz; /*!< capability mezz */
- u8 rsv3;
- u8 mfg_nports; /*!< number of ports */
- char media[8]; /*!< xfi/xaui */
- char initial_mode[8];/*!< initial mode: hba/cna/nic */
- u8 rsv4[84];
- u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+ u8 version; /* manufacturing block version */
+ u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
+ u16 mfgsize; /* mfg block size */
+ u16 u16_chksum; /* old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /* manufacturing day */
+ u8 mfg_month; /* manufacturing month */
+ u16 mfg_year; /* manufacturing year */
+ u64 mfg_wwn; /* wwn base for this adapter */
+ u8 num_wwn; /* number of wwns assigned */
+ u8 mfg_speeds; /* speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /* base mac address */
+ u8 num_mac; /* number of mac addresses */
+ u8 rsv2;
+ u32 card_type; /* card type */
+ char cap_nic; /* capability nic */
+ char cap_cna; /* capability cna */
+ char cap_hba; /* capability hba */
+ char cap_fc16g; /* capability fc 16g */
+ char cap_sriov; /* capability sriov */
+ char cap_mezz; /* capability mezz */
+ u8 rsv3;
+ u8 mfg_nports; /* number of ports */
+ char media[8]; /* xfi/xaui */
+ char initial_mode[8]; /* initial mode: hba/cna/nic */
+ u8 rsv4[84];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
};
#pragma pack()
@@ -293,4 +291,34 @@ enum bfa_mode {
BFA_MODE_NIC = 3
};
+/*
+ * Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
+#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
+#define BFA_TOTAL_FLASH_SIZE 0x400000
+#define BFA_FLASH_PART_MFG 7
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr {
+ u32 part_type; /* partition type */
+ u32 part_instance; /* partition instance */
+ u32 part_off; /* partition offset */
+ u32 part_size; /* partition size */
+ u32 part_len; /* partition content length */
+ u32 part_status; /* partition status */
+ char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr {
+ u32 status; /* flash overall status */
+ u32 npart; /* num of partitions */
+ struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX];
+};
+
#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b0307a00a10..abfad275b5f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -74,6 +74,7 @@ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
@@ -997,6 +998,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
{
+ bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1743,6 +1745,114 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
bfa_q_deq(&mod->cmd_q, &cmd);
}
+/**
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] tbuf app memory to store data from smem
+ * @param[in] soff smem offset
+ * @param[in] sz size of smem in bytes
+ */
+static int
+bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
+{
+ u32 pgnum, loff, r32;
+ int i, len;
+ u32 *buf = tbuf;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+ loff = PSS_SMEM_PGOFF(soff);
+
+ /*
+ * Hold semaphore to serialize pll init and fwtrc.
+ */
+ if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+ return 1;
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ len = sz/sizeof(u32);
+ for (i = 0; i < len; i++) {
+ r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ buf[i] = be32_to_cpu(r32);
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * release semaphore
+ */
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+ return 0;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+ u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
+ int tlen, status = 0;
+
+ tlen = *trclen;
+ if (tlen > BNA_DBG_FWTRC_LEN)
+ tlen = BNA_DBG_FWTRC_LEN;
+
+ status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
+ *trclen = tlen;
+ return status;
+}
+
+/**
+ * Save firmware trace if configured.
+ */
+static void
+bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_once) {
+ ioc->dbg_fwsave_once = 0;
+ if (ioc->dbg_fwsave_len) {
+ tlen = ioc->dbg_fwsave_len;
+ bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+ }
+ }
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_len == 0)
+ return BFA_STATUS_ENOFSAVE;
+
+ tlen = *trclen;
+ if (tlen > ioc->dbg_fwsave_len)
+ tlen = ioc->dbg_fwsave_len;
+
+ memcpy(trcdata, ioc->dbg_fwsave, tlen);
+ *trclen = tlen;
+ return BFA_STATUS_OK;
+}
+
static void
bfa_ioc_fail_notify(struct bfa_ioc *ioc)
{
@@ -1751,6 +1861,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+ bfa_nw_ioc_debug_save_ftrc(ioc);
}
/**
@@ -2058,6 +2169,16 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
+/**
+ * Initialize memory for saving firmware trace.
+ */
+void
+bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
+{
+ ioc->dbg_fwsave = dbg_fwsave;
+ ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
+}
+
static u32
bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
{
@@ -2172,6 +2293,15 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
}
/**
+ * return true if IOC is operational
+ */
+bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+/**
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
@@ -2471,3 +2601,366 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
msecs_to_jiffies(BFA_IOC_POLL_TOV));
}
}
+
+/*
+ * Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ 2048
+#define BFA_FLASH_DMA_BUF_SZ \
+ roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_cb(struct bfa_flash *flash)
+{
+ flash->op_busy = 0;
+ if (flash->cbfn)
+ flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
+{
+ struct bfa_flash *flash = cbarg;
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (flash->op_busy) {
+ flash->status = BFA_STATUS_IOC_FAILURE;
+ flash->cbfn(flash->cbarg, flash->status);
+ flash->op_busy = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash *flash)
+{
+ struct bfi_flash_write_req *msg =
+ (struct bfi_flash_write_req *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+
+ /* indicate if it's the last msg of the whole write operation */
+ msg->last = (len == flash->residue) ? 1 : 0;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+ flash->residue -= len;
+ flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+ struct bfa_flash *flash = cbarg;
+ struct bfi_flash_read_req *msg =
+ (struct bfi_flash_read_req *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
+{
+ struct bfa_flash *flash = flasharg;
+ u32 status;
+
+ union {
+ struct bfi_flash_query_rsp *query;
+ struct bfi_flash_write_rsp *write;
+ struct bfi_flash_read_rsp *read;
+ struct bfi_mbmsg *msg;
+ } m;
+
+ m.msg = msg;
+
+ /* receiving response after ioc failure */
+ if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
+ return;
+
+ switch (msg->mh.msg_id) {
+ case BFI_FLASH_I2H_QUERY_RSP:
+ status = be32_to_cpu(m.query->status);
+ if (status == BFA_STATUS_OK) {
+ u32 i;
+ struct bfa_flash_attr *attr, *f;
+
+ attr = (struct bfa_flash_attr *) flash->ubuf;
+ f = (struct bfa_flash_attr *) flash->dbuf_kva;
+ attr->status = be32_to_cpu(f->status);
+ attr->npart = be32_to_cpu(f->npart);
+ for (i = 0; i < attr->npart; i++) {
+ attr->part[i].part_type =
+ be32_to_cpu(f->part[i].part_type);
+ attr->part[i].part_instance =
+ be32_to_cpu(f->part[i].part_instance);
+ attr->part[i].part_off =
+ be32_to_cpu(f->part[i].part_off);
+ attr->part[i].part_size =
+ be32_to_cpu(f->part[i].part_size);
+ attr->part[i].part_len =
+ be32_to_cpu(f->part[i].part_len);
+ attr->part[i].part_status =
+ be32_to_cpu(f->part[i].part_status);
+ }
+ }
+ flash->status = status;
+ bfa_flash_cb(flash);
+ break;
+ case BFI_FLASH_I2H_WRITE_RSP:
+ status = be32_to_cpu(m.write->status);
+ if (status != BFA_STATUS_OK || flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_write_send(flash);
+ break;
+ case BFI_FLASH_I2H_READ_RSP:
+ status = be32_to_cpu(m.read->status);
+ if (status != BFA_STATUS_OK) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else {
+ u32 len = be32_to_cpu(m.read->length);
+ memcpy(flash->ubuf + flash->offset,
+ flash->dbuf_kva, len);
+ flash->residue -= len;
+ flash->offset += len;
+ if (flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_read_send(flash);
+ }
+ break;
+ case BFI_FLASH_I2H_BOOT_VER_RSP:
+ case BFI_FLASH_I2H_EVENT:
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Flash memory info API.
+ */
+u32
+bfa_nw_flash_meminfo(void)
+{
+ return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ */
+void
+bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
+{
+ flash->ioc = ioc;
+ flash->cbfn = NULL;
+ flash->cbarg = NULL;
+ flash->op_busy = 0;
+
+ bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+ bfa_q_qe_init(&flash->ioc_notify);
+ bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+ list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ */
+void
+bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
+{
+ flash->dbuf_kva = dm_kva;
+ flash->dbuf_pa = dm_pa;
+ memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+ dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ struct bfi_flash_query_req *msg =
+ (struct bfi_flash_query_req *) flash->mb.msg;
+
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->ubuf = (u8 *) attr;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (type == BFA_FLASH_PART_MFG)
+ return BFA_STATUS_EINVAL;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_write_send(flash);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_read_send(flash);
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index ca158d1eaef..3b4460fdc14 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -27,6 +27,8 @@
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_POLL_TOV 200 /* msecs */
+#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
+ BFI_IOC_TRC_HDR_SZ)
/**
* PCI device information required by IOC
@@ -68,6 +70,16 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
}
+#define bfa_alen_set(__alen, __len, __pa) \
+ __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
+{
+ alen->al_len = cpu_to_be32(len);
+ bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
struct bfa_ioc_regs {
void __iomem *hfn_mbox_cmd;
void __iomem *hfn_mbox;
@@ -296,6 +308,7 @@ void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
@@ -307,6 +320,9 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
+int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
+int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
@@ -322,4 +338,42 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
+/*
+ * Flash module specific
+ */
+typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status);
+
+struct bfa_flash {
+ struct bfa_ioc *ioc; /* back pointer to ioc */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 op_busy; /* operation busy flag */
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ enum bfa_status status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ bfa_cb_flash cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ u32 addr_off; /* partition address offset */
+ struct bfa_mbox_cmd mb; /* mailbox */
+ struct bfa_ioc_notify ioc_notify; /* ioc event notify */
+};
+
+enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
+ struct bfa_flash_attr *attr,
+ bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
+ u32 type, u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
+ u32 type, u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg);
+u32 bfa_nw_flash_meminfo(void);
+void bfa_nw_flash_attach(struct bfa_flash *flash,
+ struct bfa_ioc *ioc, void *dev);
+void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
+
#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 7a1393aabd4..0d9df695397 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -83,6 +83,14 @@ union bfi_addr_u {
} a32;
};
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen {
+ union bfi_addr_u al_addr; /* DMA addr of buffer */
+ u32 al_len; /* length of buffer */
+};
+
/*
* Large Message structure - 128 Bytes size Msgs
*/
@@ -249,6 +257,8 @@ struct bfi_ioc_getattr_reply {
*/
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
+#define BFI_IOC_TRC_ENT_SZ 16
+#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
#define BFI_IOC_MD5SUM_SZ 4
@@ -476,6 +486,93 @@ struct bfi_msgq_i2h_cmdq_copy_req {
u16 len;
};
+/*
+ * FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+ BFI_FLASH_H2I_QUERY_REQ = 1,
+ BFI_FLASH_H2I_ERASE_REQ = 2,
+ BFI_FLASH_H2I_WRITE_REQ = 3,
+ BFI_FLASH_H2I_READ_REQ = 4,
+ BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+ BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+ BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+ BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+ BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+ BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ struct bfi_alen alen;
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ struct bfi_alen alen;
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 last;
+ u8 rsv[2];
+ u32 offset;
+ u32 length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
#pragma pack()
#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 26f5c5abfd1..9ccc586e376 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1727,6 +1727,7 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+ bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
/**
* Attach common modules (Diag, SFP, CEE, Port) and claim respective
@@ -1740,6 +1741,11 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
kva += bfa_nw_cee_meminfo();
dma += bfa_nw_cee_meminfo();
+ bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
+ bfa_nw_flash_memclaim(&bna->flash, kva, dma);
+ kva += bfa_nw_flash_meminfo();
+ dma += bfa_nw_flash_meminfo();
+
bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
bfa_msgq_memclaim(&bna->msgq, kva, dma);
bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
@@ -1892,7 +1898,8 @@ bna_res_req(struct bna_res_info *res_info)
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
(bfa_nw_cee_meminfo() +
- bfa_msgq_meminfo()), PAGE_SIZE);
+ bfa_nw_flash_meminfo() +
+ bfa_msgq_meminfo()), PAGE_SIZE);
/* DMA memory for retrieving IOC attributes */
res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
@@ -1904,8 +1911,8 @@ bna_res_req(struct bna_res_info *res_info)
/* Virtual memory for retreiving fw_trc */
res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
/* DMA memory for retreiving stats */
res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d090fbfb12f..8e57fc5c586 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -966,6 +966,7 @@ struct bna {
struct bna_ioceth ioceth;
struct bfa_cee cee;
+ struct bfa_flash flash;
struct bfa_msgq msgq;
struct bna_ethport ethport;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7f3091e7eb4..2eddbaa5db4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -44,11 +44,18 @@ static uint bnad_ioc_auto_recover = 1;
module_param(bnad_ioc_auto_recover, uint, 0444);
MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+static uint bna_debugfs_enable = 1;
+module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
+ " Range[false:0|true:1]");
+
/*
* Global variables
*/
u32 bnad_rxqs_per_cq = 2;
-
+u32 bna_id;
+struct mutex bnad_list_mutex;
+LIST_HEAD(bnad_list);
static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*
@@ -75,6 +82,23 @@ do { \
#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
+static void
+bnad_add_to_list(struct bnad *bnad)
+{
+ mutex_lock(&bnad_list_mutex);
+ list_add_tail(&bnad->list_entry, &bnad_list);
+ bnad->id = bna_id++;
+ mutex_unlock(&bnad_list_mutex);
+}
+
+static void
+bnad_remove_from_list(struct bnad *bnad)
+{
+ mutex_lock(&bnad_list_mutex);
+ list_del(&bnad->list_entry);
+ mutex_unlock(&bnad_list_mutex);
+}
+
/*
* Reinitialize completions in CQ, once Rx is taken down
*/
@@ -723,7 +747,7 @@ void
bnad_cb_ethport_link_status(struct bnad *bnad,
enum bna_link_status link_status)
{
- bool link_up = 0;
+ bool link_up = false;
link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
@@ -1084,6 +1108,16 @@ bnad_cb_enet_mtu_set(struct bnad *bnad)
complete(&bnad->bnad_completions.mtu_comp);
}
+void
+bnad_cb_completion(void *arg, enum bfa_status status)
+{
+ struct bnad_iocmd_comp *iocmd_comp =
+ (struct bnad_iocmd_comp *)arg;
+
+ iocmd_comp->comp_status = (u32) status;
+ complete(&iocmd_comp->comp);
+}
+
/* Resource allocation, free functions */
static void
@@ -2968,7 +3002,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
-static void
+static int
bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2976,7 +3010,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned long flags;
if (!bnad->rx_info[0].rx)
- return;
+ return 0;
mutex_lock(&bnad->conf_mutex);
@@ -2986,9 +3020,11 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
}
-static void
+static int
bnad_vlan_rx_kill_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2996,7 +3032,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
unsigned long flags;
if (!bnad->rx_info[0].rx)
- return;
+ return 0;
mutex_lock(&bnad->conf_mutex);
@@ -3006,6 +3042,8 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3163,12 +3201,14 @@ bnad_lock_init(struct bnad *bnad)
{
spin_lock_init(&bnad->bna_lock);
mutex_init(&bnad->conf_mutex);
+ mutex_init(&bnad_list_mutex);
}
static void
bnad_lock_uninit(struct bnad *bnad)
{
mutex_destroy(&bnad->conf_mutex);
+ mutex_destroy(&bnad_list_mutex);
}
/* PCI Initialization */
@@ -3186,7 +3226,7 @@ bnad_pci_init(struct bnad *bnad,
goto disable_device;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
!dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- *using_dac = 1;
+ *using_dac = true;
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
@@ -3195,7 +3235,7 @@ bnad_pci_init(struct bnad *bnad,
if (err)
goto release_regions;
}
- *using_dac = 0;
+ *using_dac = false;
}
pci_set_master(pdev);
return 0;
@@ -3249,8 +3289,8 @@ bnad_pci_probe(struct pci_dev *pdev,
return err;
}
bnad = netdev_priv(netdev);
-
bnad_lock_init(bnad);
+ bnad_add_to_list(bnad);
mutex_lock(&bnad->conf_mutex);
/*
@@ -3277,6 +3317,10 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Set link to down state */
netif_carrier_off(netdev);
+ /* Setup the debugfs node for this bfad */
+ if (bna_debugfs_enable)
+ bnad_debugfs_init(bnad);
+
/* Get resource requirement form bna */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_res_req(&bnad->res_info[0]);
@@ -3398,11 +3442,15 @@ disable_ioceth:
res_free:
bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
drv_uninit:
+ /* Remove the debugfs node for this bnad */
+ kfree(bnad->regdata);
+ bnad_debugfs_uninit(bnad);
bnad_uninit(bnad);
pci_uninit:
bnad_pci_uninit(pdev);
unlock_mutex:
mutex_unlock(&bnad->conf_mutex);
+ bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
free_netdev(netdev);
return err;
@@ -3441,7 +3489,11 @@ bnad_pci_remove(struct pci_dev *pdev)
bnad_disable_msix(bnad);
bnad_pci_uninit(pdev);
mutex_unlock(&bnad->conf_mutex);
+ bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
+ /* Remove the debugfs node for this bnad */
+ kfree(bnad->regdata);
+ bnad_debugfs_uninit(bnad);
bnad_uninit(bnad);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 5487ca42d01..c975ce672f4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -124,6 +124,12 @@ enum bnad_link_state {
BNAD_LS_UP = 1
};
+struct bnad_iocmd_comp {
+ struct bnad *bnad;
+ struct completion comp;
+ int comp_status;
+};
+
struct bnad_completion {
struct completion ioc_comp;
struct completion ucast_comp;
@@ -251,6 +257,8 @@ struct bnad_unmap_q {
struct bnad {
struct net_device *netdev;
+ u32 id;
+ struct list_head list_entry;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TX];
@@ -320,6 +328,20 @@ struct bnad {
char adapter_name[BNAD_NAME_LEN];
char port_name[BNAD_NAME_LEN];
char mbox_irq_name[BNAD_NAME_LEN];
+
+ /* debugfs specific data */
+ char *regdata;
+ u32 reglen;
+ struct dentry *bnad_dentry_files[5];
+ struct dentry *port_debugfs_root;
+};
+
+struct bnad_drvinfo {
+ struct bfa_ioc_attr ioc_attr;
+ struct bfa_cee_attr cee_attr;
+ struct bfa_flash_attr flash_attr;
+ u32 cee_status;
+ u32 flash_status;
};
/*
@@ -340,6 +362,7 @@ extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
extern int bnad_enable_default_bcast(struct bnad *bnad);
extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
extern void bnad_set_ethtool_ops(struct net_device *netdev);
+extern void bnad_cb_completion(void *arg, enum bfa_status status);
/* Configuration & setup */
extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
@@ -359,6 +382,10 @@ extern void bnad_netdev_qstats_fill(struct bnad *bnad,
extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
struct rtnl_link_stats64 *stats);
+/* Debugfs */
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
+
/**
* MACROS
*/
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
new file mode 100644
index 00000000000..592ad3929f5
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -0,0 +1,623 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include "bnad.h"
+
+/*
+ * BNA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ * mount -t debugfs none /sys/kernel/debug
+ *
+ * BNA Hierarchy:
+ * - bna/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna
+ *
+ * Debugging service available per pci_dev:
+ * fwtrc: To collect current firmware trace.
+ * fwsave: To collect last saved fw trace as a result of firmware crash.
+ * regwr: To write one word to chip register
+ * regrd: To read one or more words from chip register.
+ */
+
+struct bnad_debug_info {
+ char *debug_buffer;
+ void *i_private;
+ int buffer_len;
+};
+
+static int
+bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+ fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (rc != BFA_STATUS_OK) {
+ kfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bnad %s: Failed to collect fwtrc\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+ fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to allocate fwsave buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) {
+ kfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to collect fwsave\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bnad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *reg_debug;
+
+ reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!reg_debug)
+ return -ENOMEM;
+
+ reg_debug->i_private = inode->i_private;
+
+ file->private_data = reg_debug;
+
+ return 0;
+}
+
+static int
+bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
+{
+ struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer;
+ struct bnad_iocmd_comp fcomp;
+ unsigned long flags = 0;
+ int ret = BFA_STATUS_FAILED;
+
+ /* Get IOC info */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Retrieve CEE related info */
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ drvinfo->cee_status = fcomp.comp_status;
+
+ /* Retrieve flash partition info */
+ fcomp.comp_status = 0;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ drvinfo->flash_status = fcomp.comp_status;
+out:
+ return ret;
+}
+
+static int
+bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *drv_info;
+ int rc;
+
+ drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!drv_info)
+ return -ENOMEM;
+
+ drv_info->buffer_len = sizeof(struct bnad_drvinfo);
+
+ drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL);
+ if (!drv_info->debug_buffer) {
+ kfree(drv_info);
+ drv_info = NULL;
+ pr_warn("bna %s: Failed to allocate drv info buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ mutex_lock(&bnad->conf_mutex);
+ rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer,
+ drv_info->buffer_len);
+ mutex_unlock(&bnad->conf_mutex);
+ if (rc != BFA_STATUS_OK) {
+ kfree(drv_info->debug_buffer);
+ drv_info->debug_buffer = NULL;
+ kfree(drv_info);
+ drv_info = NULL;
+ pr_warn("bna %s: Failed to collect drvinfo\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = drv_info;
+
+ return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t pos = file->f_pos;
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return -EINVAL;
+
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ break;
+ case 1:
+ file->f_pos += offset;
+ break;
+ case 2:
+ file->f_pos = debug->buffer_len - offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
+ file->f_pos = pos;
+ return -EINVAL;
+ }
+
+ return file->f_pos;
+}
+
+static ssize_t
+bnad_debugfs_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug || !debug->debug_buffer)
+ return 0;
+
+ return simple_read_from_buffer(buf, nbytes, pos,
+ debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ (0x40000)
+#define BFA_REG_CB_ADDRSZ (0x20000)
+#define BFA_REG_ADDRSZ(__ioc) \
+ ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \
+ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
+
+/*
+ * Function to check if the register offset passed is valid.
+ */
+static int
+bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
+{
+ u8 area;
+
+ /* check [16:15] */
+ area = (offset >> 15) & 0x7;
+ if (area == 0) {
+ /* PCIe core register */
+ if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else if (area == 0x1) {
+ /* CB 32 KB memory page */
+ if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else {
+ /* CB register space 64KB */
+ if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+ return BFA_STATUS_EINVAL;
+ }
+ return BFA_STATUS_OK;
+}
+
+static ssize_t
+bnad_debugfs_read_regrd(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bnad_debug_info *regrd_debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+ ssize_t rc;
+
+ if (!bnad->regdata)
+ return 0;
+
+ rc = simple_read_from_buffer(buf, nbytes, pos,
+ bnad->regdata, bnad->reglen);
+
+ if ((*pos + nbytes) >= bnad->reglen) {
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+ }
+
+ return rc;
+}
+
+static ssize_t
+bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bnad_debug_info *regrd_debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+ struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+ int addr, len, rc, i;
+ u32 *regbuf;
+ void __iomem *rb, *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ /* Allocate memory to store the user space buf */
+ kern_buf = kzalloc(nbytes, GFP_KERNEL);
+ if (!kern_buf) {
+ pr_warn("bna %s: Failed to allocate user buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
+ kfree(kern_buf);
+ return -ENOMEM;
+ }
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+ if (rc < 2) {
+ pr_warn("bna %s: Failed to read user buffer\n",
+ pci_name(bnad->pcidev));
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+
+ kfree(kern_buf);
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+
+ bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
+ if (!bnad->regdata) {
+ pr_warn("bna %s: Failed to allocate regrd buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ bnad->reglen = len << 2;
+ rb = bfa_ioc_bar0(ioc);
+ addr &= BFA_REG_ADDRMSK(ioc);
+
+ /* offset and len sanity check */
+ rc = bna_reg_offset_check(ioc, addr, len);
+ if (rc) {
+ pr_warn("bna %s: Failed reg offset check\n",
+ pci_name(bnad->pcidev));
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+ return -EINVAL;
+ }
+
+ reg_addr = rb + addr;
+ regbuf = (u32 *)bnad->regdata;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < len; i++) {
+ *regbuf = readl(reg_addr);
+ regbuf++;
+ reg_addr += sizeof(u32);
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return nbytes;
+}
+
+static ssize_t
+bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bnad_debug_info *debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)debug->i_private;
+ struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+ int addr, val, rc;
+ void __iomem *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ /* Allocate memory to store the user space buf */
+ kern_buf = kzalloc(nbytes, GFP_KERNEL);
+ if (!kern_buf) {
+ pr_warn("bna %s: Failed to allocate user buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
+ kfree(kern_buf);
+ return -ENOMEM;
+ }
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+ if (rc < 2) {
+ pr_warn("bna %s: Failed to read user buffer\n",
+ pci_name(bnad->pcidev));
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ kfree(kern_buf);
+
+ addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+ /* offset and len sanity check */
+ rc = bna_reg_offset_check(ioc, addr, 1);
+ if (rc) {
+ pr_warn("bna %s: Failed reg offset check\n",
+ pci_name(bnad->pcidev));
+ return -EINVAL;
+ }
+
+ reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ writel(val, reg_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return nbytes;
+}
+
+static int
+bnad_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static int
+bnad_debugfs_buffer_release(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ kfree(debug->debug_buffer);
+
+ file->private_data = NULL;
+ kfree(debug);
+ debug = NULL;
+ return 0;
+}
+
+static const struct file_operations bnad_debugfs_op_fwtrc = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_fwtrc,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_fwsave = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_fwsave,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regrd = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_reg,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read_regrd,
+ .write = bnad_debugfs_write_regrd,
+ .release = bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regwr = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_reg,
+ .llseek = bnad_debugfs_lseek,
+ .write = bnad_debugfs_write_regwr,
+ .release = bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_drvinfo = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_drvinfo,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+struct bnad_debugfs_entry {
+ const char *name;
+ mode_t mode;
+ const struct file_operations *fops;
+};
+
+static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
+ { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
+ { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
+ { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
+ { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
+ { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
+};
+
+static struct dentry *bna_debugfs_root;
+static atomic_t bna_debugfs_port_count;
+
+/* Initialize debugfs interface for BNA */
+void
+bnad_debugfs_init(struct bnad *bnad)
+{
+ const struct bnad_debugfs_entry *file;
+ char name[64];
+ int i;
+
+ /* Setup the BNA debugfs root directory*/
+ if (!bna_debugfs_root) {
+ bna_debugfs_root = debugfs_create_dir("bna", NULL);
+ atomic_set(&bna_debugfs_port_count, 0);
+ if (!bna_debugfs_root) {
+ pr_warn("BNA: debugfs root dir creation failed\n");
+ return;
+ }
+ }
+
+ /* Setup the pci_dev debugfs directory for the port */
+ snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev));
+ if (!bnad->port_debugfs_root) {
+ bnad->port_debugfs_root =
+ debugfs_create_dir(name, bna_debugfs_root);
+ if (!bnad->port_debugfs_root) {
+ pr_warn("bna pci_dev %s: root dir creation failed\n",
+ pci_name(bnad->pcidev));
+ return;
+ }
+
+ atomic_inc(&bna_debugfs_port_count);
+
+ for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+ file = &bnad_debugfs_files[i];
+ bnad->bnad_dentry_files[i] =
+ debugfs_create_file(file->name,
+ file->mode,
+ bnad->port_debugfs_root,
+ bnad,
+ file->fops);
+ if (!bnad->bnad_dentry_files[i]) {
+ pr_warn(
+ "BNA pci_dev:%s: create %s entry failed\n",
+ pci_name(bnad->pcidev), file->name);
+ return;
+ }
+ }
+ }
+}
+
+/* Uninitialize debugfs interface for BNA */
+void
+bnad_debugfs_uninit(struct bnad *bnad)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+ if (bnad->bnad_dentry_files[i]) {
+ debugfs_remove(bnad->bnad_dentry_files[i]);
+ bnad->bnad_dentry_files[i] = NULL;
+ }
+ }
+
+ /* Remove the pci_dev debugfs directory for the port */
+ if (bnad->port_debugfs_root) {
+ debugfs_remove(bnad->port_debugfs_root);
+ bnad->port_debugfs_root = NULL;
+ atomic_dec(&bna_debugfs_port_count);
+ }
+
+ /* Remove the BNA debugfs root directory */
+ if (atomic_read(&bna_debugfs_port_count) == 0) {
+ debugfs_remove(bna_debugfs_root);
+ bna_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index fd3dcc1e914..5f7be5ac32a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -296,8 +296,8 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strcpy(drvinfo->driver, BNAD_NAME);
- strcpy(drvinfo->version, BNAD_VERSION);
+ strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -305,12 +305,13 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
- sizeof(drvinfo->fw_version) - 1);
+ strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ sizeof(drvinfo->bus_info));
}
static void
@@ -934,6 +935,143 @@ bnad_get_sset_count(struct net_device *netdev, int sset)
}
}
+static u32
+bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
+ u32 *base_offset)
+{
+ struct bfa_flash_attr *flash_attr;
+ struct bnad_iocmd_comp fcomp;
+ u32 i, flash_part = 0, ret;
+ unsigned long flags = 0;
+
+ flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
+ if (!flash_attr)
+ return -ENOMEM;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ kfree(flash_attr);
+ goto out_err;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+
+ /* Check for the flash type & base offset value */
+ if (ret == BFA_STATUS_OK) {
+ for (i = 0; i < flash_attr->npart; i++) {
+ if (offset >= flash_attr->part[i].part_off &&
+ offset < (flash_attr->part[i].part_off +
+ flash_attr->part[i].part_size)) {
+ flash_part = flash_attr->part[i].part_type;
+ *base_offset = flash_attr->part[i].part_off;
+ break;
+ }
+ }
+ }
+ kfree(flash_attr);
+ return flash_part;
+out_err:
+ return -EINVAL;
+}
+
+static int
+bnad_get_eeprom_len(struct net_device *netdev)
+{
+ return BFA_TOTAL_FLASH_SIZE;
+}
+
+static int
+bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ u32 flash_part = 0, base_offset = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* Check if the flash read request is valid */
+ if (eeprom->magic != (bnad->pcidev->vendor |
+ (bnad->pcidev->device << 16)))
+ return -EFAULT;
+
+ /* Query the flash partition based on the offset */
+ flash_part = bnad_get_flash_partition_by_offset(bnad,
+ eeprom->offset, &base_offset);
+ if (flash_part <= 0)
+ return -EFAULT;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
+ bnad->id, bytes, eeprom->len,
+ eeprom->offset - base_offset,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+done:
+ return ret;
+}
+
+static int
+bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ u32 flash_part = 0, base_offset = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* Check if the flash update request is valid */
+ if (eeprom->magic != (bnad->pcidev->vendor |
+ (bnad->pcidev->device << 16)))
+ return -EINVAL;
+
+ /* Query the flash partition based on the offset */
+ flash_part = bnad_get_flash_partition_by_offset(bnad,
+ eeprom->offset, &base_offset);
+ if (flash_part <= 0)
+ return -EFAULT;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
+ bnad->id, bytes, eeprom->len,
+ eeprom->offset - base_offset,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+done:
+ return ret;
+}
+
static struct ethtool_ops bnad_ethtool_ops = {
.get_settings = bnad_get_settings,
.set_settings = bnad_set_settings,
@@ -948,7 +1086,10 @@ static struct ethtool_ops bnad_ethtool_ops = {
.set_pauseparam = bnad_set_pauseparam,
.get_strings = bnad_get_strings,
.get_ethtool_stats = bnad_get_ethtool_stats,
- .get_sset_count = bnad_get_sset_count
+ .get_sset_count = bnad_get_sset_count,
+ .get_eeprom_len = bnad_get_eeprom_len,
+ .get_eeprom = bnad_get_eeprom,
+ .set_eeprom = bnad_set_eeprom,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 1b3e90dfbd9..32e8f178ab7 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -43,8 +43,7 @@ extern char bfa_version[];
#pragma pack(1)
-#define MAC_ADDRLEN (6)
-typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
#pragma pack()
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 98849a1fc74..b48378a41e4 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -7,6 +7,7 @@ config HAVE_NET_MACB
config NET_ATMEL
bool "Atmel devices"
+ default y
depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
new file mode 100644
index 00000000000..aba435c3d4a
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -0,0 +1,7 @@
+config NET_CALXEDA_XGMAC
+ tristate "Calxeda 1G/10G XGMAC Ethernet driver"
+ depends on HAS_IOMEM
+ select CRC32
+ help
+ This is the driver for the XGMAC Ethernet IP block found on Calxeda
+ Highbank platforms.
diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile
new file mode 100644
index 00000000000..f0ef08067f9
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
new file mode 100644
index 00000000000..107c1b01080
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -0,0 +1,1928 @@
+/*
+ * Copyright 2010-2011 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+/* XGMAC Register definitions */
+#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
+#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
+#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
+#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
+#define XGMAC_VERSION 0x00000020 /* Version */
+#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
+#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
+#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
+#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
+#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
+#define XGMAC_DEBUG 0x00000038 /* Debug */
+#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
+#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
+#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
+#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
+#define XGMAC_NUM_HASH 16
+#define XGMAC_OMR 0x00000400
+#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
+#define XGMAC_PMT 0x00000704 /* PMT Control and Status */
+#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
+#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
+
+/* Hardware TX Statistics Counters */
+#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
+#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
+#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
+#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
+#define XGMAC_MMC_TXBCFRAME_G 0x00000824
+#define XGMAC_MMC_TXMCFRAME_G 0x0000082C
+#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
+#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
+#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
+#define XGMAC_MMC_TXUNDERFLOW 0x0000087C
+#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
+#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
+#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
+#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
+#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
+#define XGMAC_MMC_TXVLANFRAME 0x0000089C
+
+/* Hardware RX Statistics Counters */
+#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
+#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
+#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
+#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
+#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
+#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
+#define XGMAC_MMC_RXBCFRAME_G 0x00000918
+#define XGMAC_MMC_RXMCFRAME_G 0x00000920
+#define XGMAC_MMC_RXCRCERR 0x00000928
+#define XGMAC_MMC_RXRUNT 0x00000930
+#define XGMAC_MMC_RXJABBER 0x00000934
+#define XGMAC_MMC_RXUCFRAME_G 0x00000970
+#define XGMAC_MMC_RXLENGTHERR 0x00000978
+#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
+#define XGMAC_MMC_RXOVERFLOW 0x00000990
+#define XGMAC_MMC_RXVLANFRAME 0x00000998
+#define XGMAC_MMC_RXWATCHDOG 0x000009a0
+
+/* DMA Control and Status Registers */
+#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
+#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
+#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
+#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
+#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
+#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
+#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
+#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
+#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
+#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
+#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
+#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
+#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
+
+#define XGMAC_ADDR_AE 0x80000000
+#define XGMAC_MAX_FILTER_ADDR 31
+
+/* PMT Control and Status */
+#define XGMAC_PMT_POINTER_RESET 0x80000000
+#define XGMAC_PMT_GLBL_UNICAST 0x00000200
+#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
+#define XGMAC_PMT_MAGIC_PKT 0x00000020
+#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
+#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
+#define XGMAC_PMT_POWERDOWN 0x00000001
+
+#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
+#define XGMAC_CONTROL_SPD_MASK 0x60000000
+#define XGMAC_CONTROL_SPD_1G 0x60000000
+#define XGMAC_CONTROL_SPD_2_5G 0x40000000
+#define XGMAC_CONTROL_SPD_10G 0x00000000
+#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
+#define XGMAC_CONTROL_SARK_MASK 0x18000000
+#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
+#define XGMAC_CONTROL_CAR_MASK 0x06000000
+#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
+#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
+#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
+#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
+#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+/* XGMAC Frame Filter defines */
+#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
+#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
+#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+
+/* XGMAC FLOW CTRL defines */
+#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define XGMAC_FLOW_CTRL_PT_SHIFT 16
+#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
+#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
+#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
+#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
+#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
+
+/* Programmable burst length */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_8PBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TUE)
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
+ DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
+ DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
+ DMA_INTR_ENA_TSE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+/* Common MAC defines */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
+
+/* XGMAC Operation Mode Register */
+#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
+#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
+#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC_MASK 0x00030000
+#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
+#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
+#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
+#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
+#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
+#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
+#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
+#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
+#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
+
+/* XGMAC HW Features Register */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
+
+#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
+
+/* XGMAC Descriptor Defines */
+#define MAX_DESC_BUF_SZ (0x2000 - 8)
+
+#define RXDESC_EXT_STATUS 0x00000001
+#define RXDESC_CRC_ERR 0x00000002
+#define RXDESC_RX_ERR 0x00000008
+#define RXDESC_RX_WDOG 0x00000010
+#define RXDESC_FRAME_TYPE 0x00000020
+#define RXDESC_GIANT_FRAME 0x00000080
+#define RXDESC_LAST_SEG 0x00000100
+#define RXDESC_FIRST_SEG 0x00000200
+#define RXDESC_VLAN_FRAME 0x00000400
+#define RXDESC_OVERFLOW_ERR 0x00000800
+#define RXDESC_LENGTH_ERR 0x00001000
+#define RXDESC_SA_FILTER_FAIL 0x00002000
+#define RXDESC_DESCRIPTOR_ERR 0x00004000
+#define RXDESC_ERROR_SUMMARY 0x00008000
+#define RXDESC_FRAME_LEN_OFFSET 16
+#define RXDESC_FRAME_LEN_MASK 0x3fff0000
+#define RXDESC_DA_FILTER_FAIL 0x40000000
+
+#define RXDESC1_END_RING 0x00008000
+
+#define RXDESC_IP_PAYLOAD_MASK 0x00000003
+#define RXDESC_IP_PAYLOAD_UDP 0x00000001
+#define RXDESC_IP_PAYLOAD_TCP 0x00000002
+#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
+#define RXDESC_IP_HEADER_ERR 0x00000008
+#define RXDESC_IP_PAYLOAD_ERR 0x00000010
+#define RXDESC_IPV4_PACKET 0x00000040
+#define RXDESC_IPV6_PACKET 0x00000080
+#define TXDESC_UNDERFLOW_ERR 0x00000001
+#define TXDESC_JABBER_TIMEOUT 0x00000002
+#define TXDESC_LOCAL_FAULT 0x00000004
+#define TXDESC_REMOTE_FAULT 0x00000008
+#define TXDESC_VLAN_FRAME 0x00000010
+#define TXDESC_FRAME_FLUSHED 0x00000020
+#define TXDESC_IP_HEADER_ERR 0x00000040
+#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
+#define TXDESC_ERROR_SUMMARY 0x00008000
+#define TXDESC_SA_CTRL_INSERT 0x00040000
+#define TXDESC_SA_CTRL_REPLACE 0x00080000
+#define TXDESC_2ND_ADDR_CHAINED 0x00100000
+#define TXDESC_END_RING 0x00200000
+#define TXDESC_CSUM_IP 0x00400000
+#define TXDESC_CSUM_IP_PAYLD 0x00800000
+#define TXDESC_CSUM_ALL 0x00C00000
+#define TXDESC_CRC_EN_REPLACE 0x01000000
+#define TXDESC_CRC_EN_APPEND 0x02000000
+#define TXDESC_DISABLE_PAD 0x04000000
+#define TXDESC_FIRST_SEG 0x10000000
+#define TXDESC_LAST_SEG 0x20000000
+#define TXDESC_INTERRUPT 0x40000000
+
+#define DESC_OWN 0x80000000
+#define DESC_BUFFER1_SZ_MASK 0x00001fff
+#define DESC_BUFFER2_SZ_MASK 0x1fff0000
+#define DESC_BUFFER2_SZ_OFFSET 16
+
+struct xgmac_dma_desc {
+ __le32 flags;
+ __le32 buf_size;
+ __le32 buf1_addr; /* Buffer 1 Address Pointer */
+ __le32 buf2_addr; /* Buffer 2 Address Pointer */
+ __le32 ext_status;
+ __le32 res[3];
+};
+
+struct xgmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ unsigned long tx_local_fault;
+ unsigned long tx_remote_fault;
+ /* Receive errors */
+ unsigned long rx_watchdog;
+ unsigned long rx_da_filter_fail;
+ unsigned long rx_sa_filter_fail;
+ unsigned long rx_payload_error;
+ unsigned long rx_ip_header_error;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow;
+ unsigned long tx_process_stopped;
+ unsigned long rx_buf_unav;
+ unsigned long rx_process_stopped;
+ unsigned long tx_early;
+ unsigned long fatal_bus_error;
+};
+
+struct xgmac_priv {
+ struct xgmac_dma_desc *dma_rx;
+ struct sk_buff **rx_skbuff;
+ unsigned int rx_tail;
+ unsigned int rx_head;
+
+ struct xgmac_dma_desc *dma_tx;
+ struct sk_buff **tx_skbuff;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+
+ void __iomem *base;
+ struct sk_buff_head rx_recycle;
+ unsigned int dma_buf_sz;
+ dma_addr_t dma_rx_phy;
+ dma_addr_t dma_tx_phy;
+
+ struct net_device *dev;
+ struct device *device;
+ struct napi_struct napi;
+
+ struct xgmac_extra_stats xstats;
+
+ spinlock_t stats_lock;
+ int pmt_irq;
+ char rx_pause;
+ char tx_pause;
+ int wolopts;
+};
+
+/* XGMAC Configuration Settings */
+#define MAX_MTU 9000
+#define PAUSE_TIME 0x400
+
+#define DMA_RX_RING_SZ 256
+#define DMA_TX_RING_SZ 128
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TX_THRESH (DMA_TX_RING_SZ/4)
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
+#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
+
+/* XGMAC Descriptor Access Helpers */
+static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
+{
+ if (buf_sz > MAX_DESC_BUF_SZ)
+ p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
+ (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
+ else
+ p->buf_size = cpu_to_le32(buf_sz);
+}
+
+static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
+{
+ u32 len = cpu_to_le32(p->flags);
+ return (len & DESC_BUFFER1_SZ_MASK) +
+ ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
+}
+
+static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
+ int buf_sz)
+{
+ struct xgmac_dma_desc *end = p + ring_size - 1;
+
+ memset(p, 0, sizeof(*p) * ring_size);
+
+ for (; p <= end; p++)
+ desc_set_buf_len(p, buf_sz);
+
+ end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
+}
+
+static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
+{
+ memset(p, 0, sizeof(*p) * ring_size);
+ p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
+}
+
+static inline int desc_get_owner(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & DESC_OWN;
+}
+
+static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
+{
+ /* Clear all fields and set the owner */
+ p->flags = cpu_to_le32(DESC_OWN);
+}
+
+static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
+{
+ u32 tmpflags = le32_to_cpu(p->flags);
+ tmpflags &= TXDESC_END_RING;
+ tmpflags |= flags | DESC_OWN;
+ p->flags = cpu_to_le32(tmpflags);
+}
+
+static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
+}
+
+static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->buf1_addr);
+}
+
+static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
+ u32 paddr, int len)
+{
+ p->buf1_addr = cpu_to_le32(paddr);
+ if (len > MAX_DESC_BUF_SZ)
+ p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
+}
+
+static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
+ u32 paddr, int len)
+{
+ desc_set_buf_len(p, len);
+ desc_set_buf_addr(p, paddr, len);
+}
+
+static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
+{
+ u32 data = le32_to_cpu(p->flags);
+ u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
+ if (data & RXDESC_FRAME_TYPE)
+ len -= ETH_FCS_LEN;
+
+ return len;
+}
+
+static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+ int timeout = 1000;
+ u32 reg = readl(ioaddr + XGMAC_OMR);
+ writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
+
+ while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
+ udelay(1);
+}
+
+static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+ struct xgmac_extra_stats *x = &priv->xstats;
+ u32 status = le32_to_cpu(p->flags);
+
+ if (!(status & TXDESC_ERROR_SUMMARY))
+ return 0;
+
+ netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
+ if (status & TXDESC_JABBER_TIMEOUT)
+ x->tx_jabber++;
+ if (status & TXDESC_FRAME_FLUSHED)
+ x->tx_frame_flushed++;
+ if (status & TXDESC_UNDERFLOW_ERR)
+ xgmac_dma_flush_tx_fifo(priv->base);
+ if (status & TXDESC_IP_HEADER_ERR)
+ x->tx_ip_header_error++;
+ if (status & TXDESC_LOCAL_FAULT)
+ x->tx_local_fault++;
+ if (status & TXDESC_REMOTE_FAULT)
+ x->tx_remote_fault++;
+ if (status & TXDESC_PAYLOAD_CSUM_ERR)
+ x->tx_payload_error++;
+
+ return -1;
+}
+
+static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+ struct xgmac_extra_stats *x = &priv->xstats;
+ int ret = CHECKSUM_UNNECESSARY;
+ u32 status = le32_to_cpu(p->flags);
+ u32 ext_status = le32_to_cpu(p->ext_status);
+
+ if (status & RXDESC_DA_FILTER_FAIL) {
+ netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
+ x->rx_da_filter_fail++;
+ return -1;
+ }
+
+ /* Check if packet has checksum already */
+ if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
+ !(ext_status & RXDESC_IP_PAYLOAD_MASK))
+ ret = CHECKSUM_NONE;
+
+ netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
+ (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
+
+ if (!(status & RXDESC_ERROR_SUMMARY))
+ return ret;
+
+ /* Handle any errors */
+ if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
+ RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
+ return -1;
+
+ if (status & RXDESC_EXT_STATUS) {
+ if (ext_status & RXDESC_IP_HEADER_ERR)
+ x->rx_ip_header_error++;
+ if (ext_status & RXDESC_IP_PAYLOAD_ERR)
+ x->rx_payload_error++;
+ netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
+ ext_status);
+ return CHECKSUM_NONE;
+ }
+
+ return ret;
+}
+
+static inline void xgmac_mac_enable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_CONTROL);
+ value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+ writel(value, ioaddr + XGMAC_CONTROL);
+
+ value = readl(ioaddr + XGMAC_DMA_CONTROL);
+ value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+}
+
+static inline void xgmac_mac_disable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
+ value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+ value = readl(ioaddr + XGMAC_CONTROL);
+ value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+ writel(value, ioaddr + XGMAC_CONTROL);
+}
+
+static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ int num)
+{
+ u32 data;
+
+ data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+ writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+}
+
+static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ int num)
+{
+ u32 hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
+ lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
+{
+ u32 reg;
+ unsigned int flow = 0;
+
+ priv->rx_pause = rx;
+ priv->tx_pause = tx;
+
+ if (rx || tx) {
+ if (rx)
+ flow |= XGMAC_FLOW_CTRL_RFE;
+ if (tx)
+ flow |= XGMAC_FLOW_CTRL_TFE;
+
+ flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
+ flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
+
+ writel(flow, priv->base + XGMAC_FLOW_CTRL);
+
+ reg = readl(priv->base + XGMAC_OMR);
+ reg |= XGMAC_OMR_EFC;
+ writel(reg, priv->base + XGMAC_OMR);
+ } else {
+ writel(0, priv->base + XGMAC_FLOW_CTRL);
+
+ reg = readl(priv->base + XGMAC_OMR);
+ reg &= ~XGMAC_OMR_EFC;
+ writel(reg, priv->base + XGMAC_OMR);
+ }
+
+ return 0;
+}
+
+static void xgmac_rx_refill(struct xgmac_priv *priv)
+{
+ struct xgmac_dma_desc *p;
+ dma_addr_t paddr;
+
+ while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
+ int entry = priv->rx_head;
+ struct sk_buff *skb;
+
+ p = priv->dma_rx + entry;
+
+ if (priv->rx_skbuff[entry] != NULL)
+ continue;
+
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ paddr = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+
+ netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
+ priv->rx_head, priv->rx_tail);
+
+ priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
+ /* Ensure descriptor is in memory before handing to h/w */
+ wmb();
+ desc_set_rx_owner(p);
+ }
+}
+
+/**
+ * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static int xgmac_dma_desc_rings_init(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ unsigned int bfsize;
+
+ /* Set the Buffer size according to the MTU;
+ * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ */
+ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
+ 64);
+
+ netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
+
+ priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ return -ENOMEM;
+
+ priv->dma_rx = dma_alloc_coherent(priv->device,
+ DMA_RX_RING_SZ *
+ sizeof(struct xgmac_dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma_rx;
+
+ priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skb;
+
+ priv->dma_tx = dma_alloc_coherent(priv->device,
+ DMA_TX_RING_SZ *
+ sizeof(struct xgmac_dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_tx)
+ goto err_dma_tx;
+
+ netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+ priv->dma_rx, priv->dma_tx,
+ (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+ priv->rx_tail = 0;
+ priv->rx_head = 0;
+ priv->dma_buf_sz = bfsize;
+ desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
+ xgmac_rx_refill(priv);
+
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+
+ writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
+ writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
+
+ return 0;
+
+err_dma_tx:
+ kfree(priv->tx_skbuff);
+err_tx_skb:
+ dma_free_coherent(priv->device,
+ DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+err_dma_rx:
+ kfree(priv->rx_skbuff);
+ return -ENOMEM;
+}
+
+static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
+{
+ int i;
+ struct xgmac_dma_desc *p;
+
+ if (!priv->rx_skbuff)
+ return;
+
+ for (i = 0; i < DMA_RX_RING_SZ; i++) {
+ if (priv->rx_skbuff[i] == NULL)
+ continue;
+
+ p = priv->dma_rx + i;
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->rx_skbuff[i] = NULL;
+ }
+}
+
+static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
+{
+ int i, f;
+ struct xgmac_dma_desc *p;
+
+ if (!priv->tx_skbuff)
+ return;
+
+ for (i = 0; i < DMA_TX_RING_SZ; i++) {
+ if (priv->tx_skbuff[i] == NULL)
+ continue;
+
+ p = priv->dma_tx + i;
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
+ p = priv->dma_tx + i++;
+ dma_unmap_page(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ }
+
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+}
+
+static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
+{
+ /* Release the DMA TX/RX socket buffers */
+ xgmac_free_rx_skbufs(priv);
+ xgmac_free_tx_skbufs(priv);
+
+ /* Free the consistent memory allocated for descriptor rings */
+ if (priv->dma_tx) {
+ dma_free_coherent(priv->device,
+ DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ priv->dma_tx = NULL;
+ }
+ if (priv->dma_rx) {
+ dma_free_coherent(priv->device,
+ DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ priv->dma_rx = NULL;
+ }
+ kfree(priv->rx_skbuff);
+ priv->rx_skbuff = NULL;
+ kfree(priv->tx_skbuff);
+ priv->tx_skbuff = NULL;
+}
+
+/**
+ * xgmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void xgmac_tx_complete(struct xgmac_priv *priv)
+{
+ int i;
+ void __iomem *ioaddr = priv->base;
+
+ writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
+
+ while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
+ unsigned int entry = priv->tx_tail;
+ struct sk_buff *skb = priv->tx_skbuff[entry];
+ struct xgmac_dma_desc *p = priv->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (desc_get_owner(p))
+ break;
+
+ /* Verify tx error by looking at the last segment */
+ if (desc_get_tx_ls(p))
+ desc_get_tx_status(priv, p);
+
+ netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
+ priv->tx_head, priv->tx_tail);
+
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ priv->tx_skbuff[entry] = NULL;
+ priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+ if (!skb) {
+ continue;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
+ DMA_TX_RING_SZ);
+ p = priv->dma_tx + priv->tx_tail;
+
+ dma_unmap_page(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ }
+
+ /*
+ * If there's room in the queue (limit it to size)
+ * we add this skb back into the pool,
+ * if it's the right size.
+ */
+ if ((skb_queue_len(&priv->rx_recycle) <
+ DMA_RX_RING_SZ) &&
+ skb_recycle_check(skb, priv->dma_buf_sz))
+ __skb_queue_head(&priv->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+ }
+
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
+ TX_THRESH)
+ netif_wake_queue(priv->dev);
+}
+
+/**
+ * xgmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void xgmac_tx_err(struct xgmac_priv *priv)
+{
+ u32 reg, value, inten;
+
+ netif_stop_queue(priv->dev);
+
+ inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+ reg = readl(priv->base + XGMAC_DMA_CONTROL);
+ writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+ do {
+ value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
+ } while (value && (value != 0x600000));
+
+ xgmac_free_tx_skbufs(priv);
+ desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+
+ writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
+ priv->base + XGMAC_DMA_STATUS);
+ writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+
+ netif_wake_queue(priv->dev);
+}
+
+static int xgmac_hw_init(struct net_device *dev)
+{
+ u32 value, ctrl;
+ int limit;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ /* Save the ctrl register value */
+ ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
+
+ /* SW reset */
+ value = DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+ limit = 15000;
+ while (limit-- &&
+ (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ cpu_relax();
+ if (limit < 0)
+ return -EBUSY;
+
+ value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
+ (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
+ DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
+ writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+ /* XGMAC requires AXI bus init. This is a 'magic number' for now */
+ writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+
+ ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
+ XGMAC_CONTROL_CAR;
+ if (dev->features & NETIF_F_RXCSUM)
+ ctrl |= XGMAC_CONTROL_IPC;
+ writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+ value = DMA_CONTROL_DFF;
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+ /* Set the HW DMA mode and the COE */
+ writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+ ioaddr + XGMAC_OMR);
+
+ /* Reset the MMC counters */
+ writel(1, ioaddr + XGMAC_MMC_CTRL);
+ return 0;
+}
+
+/**
+ * xgmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int xgmac_open(struct net_device *dev)
+{
+ int ret;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ /* Check that the MAC address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using the following linux command:
+ * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ netdev_dbg(priv->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ skb_queue_head_init(&priv->rx_recycle);
+ memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
+
+ /* Initialize the XGMAC and descriptors */
+ xgmac_hw_init(dev);
+ xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+ xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
+
+ ret = xgmac_dma_desc_rings_init(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the MAC Rx/Tx */
+ xgmac_mac_enable(ioaddr);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/**
+ * xgmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int xgmac_stop(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ if (readl(priv->base + XGMAC_DMA_INTR_ENA))
+ napi_disable(&priv->napi);
+
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ skb_queue_purge(&priv->rx_recycle);
+
+ /* Disable the MAC core */
+ xgmac_mac_disable(priv->base);
+
+ /* Release and free the Rx/Tx resources */
+ xgmac_free_dma_desc_rings(priv);
+
+ return 0;
+}
+
+/**
+ * xgmac_xmit:
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : Tx entry point of the driver.
+ */
+static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ unsigned int entry;
+ int i;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct xgmac_dma_desc *desc, *first;
+ unsigned int desc_flags;
+ unsigned int len;
+ dma_addr_t paddr;
+
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+ (nfrags + 1)) {
+ writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
+ priv->base + XGMAC_DMA_INTR_ENA);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
+ TXDESC_CSUM_ALL : 0;
+ entry = priv->tx_head;
+ desc = priv->dma_tx + entry;
+ first = desc;
+
+ len = skb_headlen(skb);
+ paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ priv->tx_skbuff[entry] = skb;
+ desc_set_buf_addr_and_size(desc, paddr, len);
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+
+ paddr = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+ desc = priv->dma_tx + entry;
+ priv->tx_skbuff[entry] = NULL;
+
+ desc_set_buf_addr_and_size(desc, paddr, len);
+ if (i < (nfrags - 1))
+ desc_set_tx_owner(desc, desc_flags);
+ }
+
+ /* Interrupt on completition only for the latest segment */
+ if (desc != first)
+ desc_set_tx_owner(desc, desc_flags |
+ TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+ else
+ desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+
+ /* Set owner on first desc last to avoid race condition */
+ wmb();
+ desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+
+ priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+ writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
+ return NETDEV_TX_OK;
+}
+
+static int xgmac_rx(struct xgmac_priv *priv, int limit)
+{
+ unsigned int entry;
+ unsigned int count = 0;
+ struct xgmac_dma_desc *p;
+
+ while (count < limit) {
+ int ip_checksum;
+ struct sk_buff *skb;
+ int frame_len;
+
+ writel(DMA_STATUS_RI | DMA_STATUS_NIS,
+ priv->base + XGMAC_DMA_STATUS);
+
+ entry = priv->rx_tail;
+ p = priv->dma_rx + entry;
+ if (desc_get_owner(p))
+ break;
+
+ count++;
+ priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
+
+ /* read the status of the incoming frame */
+ ip_checksum = desc_get_rx_status(priv, p);
+ if (ip_checksum < 0)
+ continue;
+
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
+ break;
+ }
+ priv->rx_skbuff[entry] = NULL;
+
+ frame_len = desc_get_rx_frame_len(p);
+ netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
+ frame_len, ip_checksum);
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ frame_len, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ skb->ip_summed = ip_checksum;
+ if (ip_checksum == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
+ }
+
+ xgmac_rx_refill(priv);
+
+ writel(1, priv->base + XGMAC_DMA_RX_POLL);
+
+ return count;
+}
+
+/**
+ * xgmac_poll - xgmac poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * This function implements the the reception process.
+ * Also it runs the TX completion thread
+ */
+static int xgmac_poll(struct napi_struct *napi, int budget)
+{
+ struct xgmac_priv *priv = container_of(napi,
+ struct xgmac_priv, napi);
+ int work_done = 0;
+
+ xgmac_tx_complete(priv);
+ work_done = xgmac_rx(priv, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+ }
+ return work_done;
+}
+
+/**
+ * xgmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable tmrate. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void xgmac_tx_timeout(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ /* Clear Tx resources and restart transmitting again */
+ xgmac_tx_err(priv);
+}
+
+/**
+ * xgmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void xgmac_set_rx_mode(struct net_device *dev)
+{
+ int i;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ unsigned int value = 0;
+ u32 hash_filter[XGMAC_NUM_HASH];
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+ bool use_hash = false;
+
+ netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
+ netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC) {
+ writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
+ return;
+ }
+
+ memset(hash_filter, 0, sizeof(hash_filter));
+
+ if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+ use_hash = true;
+ value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
+ }
+ netdev_for_each_uc_addr(ha, dev) {
+ if (use_hash) {
+ u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+ /* The most significant 4 bits determine the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ } else {
+ xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ value |= XGMAC_FRAME_FILTER_PM;
+ goto out;
+ }
+
+ if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+ use_hash = true;
+ value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+ }
+ netdev_for_each_mc_addr(ha, dev) {
+ if (use_hash) {
+ u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+ /* The most significant 4 bits determine the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ } else {
+ xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+out:
+ for (i = 0; i < XGMAC_NUM_HASH; i++)
+ writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
+
+ writel(value, ioaddr + XGMAC_FRAME_FILTER);
+}
+
+/**
+ * xgmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ int old_mtu;
+
+ if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
+ netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
+ return -EINVAL;
+ }
+
+ old_mtu = dev->mtu;
+ dev->mtu = new_mtu;
+
+ /* return early if the buffer sizes will not change */
+ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+ return 0;
+ if (old_mtu == new_mtu)
+ return 0;
+
+ /* Stop everything, get ready to change the MTU */
+ if (!netif_running(dev))
+ return 0;
+
+ /* Bring the interface down and then back up */
+ xgmac_stop(dev);
+ return xgmac_open(dev);
+}
+
+static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
+{
+ u32 intr_status;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ intr_status = readl(ioaddr + XGMAC_INT_STAT);
+ if (intr_status & XGMAC_INT_STAT_PMT) {
+ netdev_dbg(priv->dev, "received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT */
+ readl(ioaddr + XGMAC_PMT);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
+{
+ u32 intr_status;
+ bool tx_err = false;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ struct xgmac_extra_stats *x = &priv->xstats;
+
+ /* read the status register (CSR5) */
+ intr_status = readl(priv->base + XGMAC_DMA_STATUS);
+ intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
+ writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+
+ /* It displays the DMA process states (CSR5 register) */
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ if (intr_status & DMA_STATUS_TJT) {
+ netdev_err(priv->dev, "transmit jabber\n");
+ x->tx_jabber++;
+ }
+ if (intr_status & DMA_STATUS_RU)
+ x->rx_buf_unav++;
+ if (intr_status & DMA_STATUS_RPS) {
+ netdev_err(priv->dev, "receive process stopped\n");
+ x->rx_process_stopped++;
+ }
+ if (intr_status & DMA_STATUS_ETI) {
+ netdev_err(priv->dev, "transmit early interrupt\n");
+ x->tx_early++;
+ }
+ if (intr_status & DMA_STATUS_TPS) {
+ netdev_err(priv->dev, "transmit process stopped\n");
+ x->tx_process_stopped++;
+ tx_err = true;
+ }
+ if (intr_status & DMA_STATUS_FBI) {
+ netdev_err(priv->dev, "fatal bus error\n");
+ x->fatal_bus_error++;
+ tx_err = true;
+ }
+
+ if (tx_err)
+ xgmac_tx_err(priv);
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
+ writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void xgmac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ xgmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+struct rtnl_link_stats64 *
+xgmac_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *base = priv->base;
+ u32 count;
+
+ spin_lock_bh(&priv->stats_lock);
+ writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
+
+ storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
+ storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
+
+ storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
+ storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
+ storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
+ storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
+ storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
+
+ storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
+ storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
+
+ count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
+ storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
+ storage->tx_packets = count;
+ storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
+
+ writel(0, base + XGMAC_MMC_CTRL);
+ spin_unlock_bh(&priv->stats_lock);
+ return storage;
+}
+
+static int xgmac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+
+ return 0;
+}
+
+static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
+{
+ u32 ctrl;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ u32 changed = dev->features ^ features;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ ctrl = readl(ioaddr + XGMAC_CONTROL);
+ if (features & NETIF_F_RXCSUM)
+ ctrl |= XGMAC_CONTROL_IPC;
+ else
+ ctrl &= ~XGMAC_CONTROL_IPC;
+ writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+ return 0;
+}
+
+static const struct net_device_ops xgmac_netdev_ops = {
+ .ndo_open = xgmac_open,
+ .ndo_start_xmit = xgmac_xmit,
+ .ndo_stop = xgmac_stop,
+ .ndo_change_mtu = xgmac_change_mtu,
+ .ndo_set_rx_mode = xgmac_set_rx_mode,
+ .ndo_tx_timeout = xgmac_tx_timeout,
+ .ndo_get_stats64 = xgmac_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgmac_poll_controller,
+#endif
+ .ndo_set_mac_address = xgmac_set_mac_address,
+ .ndo_set_features = xgmac_set_features,
+};
+
+static int xgmac_ethtool_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ cmd->autoneg = 0;
+ cmd->duplex = DUPLEX_FULL;
+ ethtool_cmd_speed_set(cmd, 10000);
+ cmd->supported = 0;
+ cmd->advertising = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static void xgmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv *priv = netdev_priv(netdev);
+
+ pause->rx_pause = priv->rx_pause;
+ pause->tx_pause = priv->tx_pause;
+}
+
+static int xgmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv *priv = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
+}
+
+struct xgmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+ bool is_reg;
+};
+
+#define XGMAC_STAT(m) \
+ { #m, offsetof(struct xgmac_priv, xstats.m), false }
+#define XGMAC_HW_STAT(m, reg_offset) \
+ { #m, reg_offset, true }
+
+static const struct xgmac_stats xgmac_gstrings_stats[] = {
+ XGMAC_STAT(tx_frame_flushed),
+ XGMAC_STAT(tx_payload_error),
+ XGMAC_STAT(tx_ip_header_error),
+ XGMAC_STAT(tx_local_fault),
+ XGMAC_STAT(tx_remote_fault),
+ XGMAC_STAT(tx_early),
+ XGMAC_STAT(tx_process_stopped),
+ XGMAC_STAT(tx_jabber),
+ XGMAC_STAT(rx_buf_unav),
+ XGMAC_STAT(rx_process_stopped),
+ XGMAC_STAT(rx_payload_error),
+ XGMAC_STAT(rx_ip_header_error),
+ XGMAC_STAT(rx_da_filter_fail),
+ XGMAC_STAT(rx_sa_filter_fail),
+ XGMAC_STAT(fatal_bus_error),
+ XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
+ XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
+ XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
+ XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
+ XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
+};
+#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
+
+static void xgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy,
+ u64 *data)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void *p = priv;
+ int i;
+
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ if (xgmac_gstrings_stats[i].is_reg)
+ *data++ = readl(priv->base +
+ xgmac_gstrings_stats[i].stat_offset);
+ else
+ *data++ = *(u32 *)(p +
+ xgmac_gstrings_stats[i].stat_offset);
+ }
+}
+
+static int xgmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return XGMAC_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void xgmac_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ memcpy(p, xgmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void xgmac_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ if (device_can_wakeup(priv->device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = priv->wolopts;
+ }
+}
+
+static int xgmac_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(priv->device))
+ return -ENOTSUPP;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ priv->wolopts = wol->wolopts;
+
+ if (wol->wolopts) {
+ device_set_wakeup_enable(priv->device, 1);
+ enable_irq_wake(dev->irq);
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ disable_irq_wake(dev->irq);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops xgmac_ethtool_ops = {
+ .get_settings = xgmac_ethtool_getsettings,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = xgmac_get_pauseparam,
+ .set_pauseparam = xgmac_set_pauseparam,
+ .get_ethtool_stats = xgmac_get_ethtool_stats,
+ .get_strings = xgmac_get_strings,
+ .get_wol = xgmac_get_wol,
+ .set_wol = xgmac_set_wol,
+ .get_sset_count = xgmac_get_sset_count,
+};
+
+/**
+ * xgmac_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int xgmac_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct net_device *ndev = NULL;
+ struct xgmac_priv *priv = NULL;
+ u32 uid;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name))
+ return -EBUSY;
+
+ ndev = alloc_etherdev(sizeof(struct xgmac_priv));
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ priv = netdev_priv(ndev);
+ platform_set_drvdata(pdev, ndev);
+ ether_setup(ndev);
+ ndev->netdev_ops = &xgmac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+ spin_lock_init(&priv->stats_lock);
+
+ priv->device = &pdev->dev;
+ priv->dev = ndev;
+ priv->rx_pause = 1;
+ priv->tx_pause = 1;
+
+ priv->base = ioremap(res->start, resource_size(res));
+ if (!priv->base) {
+ netdev_err(ndev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+
+ uid = readl(priv->base + XGMAC_VERSION);
+ netdev_info(ndev, "h/w version is 0x%x\n", uid);
+
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq == -ENXIO) {
+ netdev_err(ndev, "No irq resource\n");
+ ret = ndev->irq;
+ goto err_irq;
+ }
+
+ ret = request_irq(ndev->irq, xgmac_interrupt, 0,
+ dev_name(&pdev->dev), ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+ ndev->irq, ret);
+ goto err_irq;
+ }
+
+ priv->pmt_irq = platform_get_irq(pdev, 1);
+ if (priv->pmt_irq == -ENXIO) {
+ netdev_err(ndev, "No pmt irq resource\n");
+ ret = priv->pmt_irq;
+ goto err_pmt_irq;
+ }
+
+ ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
+ dev_name(&pdev->dev), ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+ priv->pmt_irq, ret);
+ goto err_pmt_irq;
+ }
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ if (device_can_wakeup(priv->device))
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ ndev->features |= ndev->hw_features;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Get the MAC address */
+ xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ netdev_warn(ndev, "MAC address %pM not valid",
+ ndev->dev_addr);
+
+ netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ ret = register_netdev(ndev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ netif_napi_del(&priv->napi);
+ free_irq(priv->pmt_irq, ndev);
+err_pmt_irq:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ iounmap(priv->base);
+err_io:
+ free_netdev(ndev);
+err_alloc:
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+/**
+ * xgmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int xgmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+
+ xgmac_mac_disable(priv->base);
+
+ /* Free the IRQ lines */
+ free_irq(ndev->irq, ndev);
+ free_irq(priv->pmt_irq, ndev);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi);
+
+ iounmap(priv->base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC)
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+ if (mode & WAKE_UCAST)
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
+
+ writel(pmt, ioaddr + XGMAC_PMT);
+}
+
+static int xgmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ u32 value;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ napi_disable(&priv->napi);
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+ if (device_may_wakeup(priv->device)) {
+ /* Stop TX/RX DMA Only */
+ value = readl(priv->base + XGMAC_DMA_CONTROL);
+ value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+ writel(value, priv->base + XGMAC_DMA_CONTROL);
+
+ xgmac_pmt(priv->base, priv->wolopts);
+ } else
+ xgmac_mac_disable(priv->base);
+
+ return 0;
+}
+
+static int xgmac_resume(struct device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ void __iomem *ioaddr = priv->base;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ xgmac_pmt(ioaddr, 0);
+
+ /* Enable the MAC and DMA */
+ xgmac_mac_enable(ioaddr);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+ netif_device_attach(ndev);
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
+#define XGMAC_PM_OPS (&xgmac_pm_ops)
+#else
+#define XGMAC_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id xgmac_of_match[] = {
+ { .compatible = "calxeda,hb-xgmac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_of_match);
+
+static struct platform_driver xgmac_driver = {
+ .driver = {
+ .name = "calxedaxgmac",
+ .of_match_table = xgmac_of_match,
+ },
+ .probe = xgmac_probe,
+ .remove = xgmac_remove,
+ .driver.pm = XGMAC_PM_OPS,
+};
+
+module_platform_driver(xgmac_driver);
+
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index ca26d97171b..1d17c92f2dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -434,10 +434,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(adapter->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
}
static int get_sset_count(struct net_device *dev, int sset)
@@ -849,7 +849,8 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static u32 t1_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t t1_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -863,9 +864,9 @@ static u32 t1_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int t1_set_features(struct net_device *dev, u32 features)
+static int t1_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
struct adapter *adapter = dev->ml_priv;
if (changed & NETIF_F_HW_VLAN_RX)
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index f9b60230004..47a84359d4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -742,7 +742,7 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
/*
* Enable/disable VLAN acceleration.
*/
-void t1_vlan_mode(struct adapter *adapter, u32 features)
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
{
struct sge *sge = adapter->sge;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index e03980bcdd6..b9bf16b385f 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int);
netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t1_vlan_mode(struct adapter *adapter, u32 features);
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
int t1_sge_intr_error_handler(struct sge *);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 4d15c8f99c3..857cc254cab 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1576,12 +1576,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(adapter->pdev));
- if (!fw_vers)
- strcpy(info->fw_version, "N/A");
- else {
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%s %u.%u.%u TP %u.%u.%u",
G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
@@ -1591,7 +1590,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
G_TP_VERSION_MAJOR(tp_vers),
G_TP_VERSION_MINOR(tp_vers),
G_TP_VERSION_MICRO(tp_vers));
- }
}
static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -2531,7 +2529,7 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
}
}
-static void cxgb_vlan_mode(struct net_device *dev, u32 features)
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2552,7 +2550,8 @@ static void cxgb_vlan_mode(struct net_device *dev, u32 features)
t3_synchronize_rx(adapter, pi);
}
-static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -2566,9 +2565,9 @@ static u32 cxgb_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
cxgb_vlan_mode(dev, features);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 90ff1318cc0..65e4b280619 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(dst_get_neighbour(nr->new));
+ cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
break;
}
default:
@@ -1072,8 +1072,11 @@ static int is_offloading(struct net_device *dev)
static void cxgb_neigh_update(struct neighbour *neigh)
{
- struct net_device *dev = neigh->dev;
+ struct net_device *dev;
+ if (!neigh)
+ return;
+ dev = neigh->dev;
if (dev && (is_offloading(dev))) {
struct t3cdev *tdev = dev2t3cdev(dev);
@@ -1107,6 +1110,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
{
struct net_device *olddev, *newdev;
+ struct neighbour *n;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1114,8 +1118,16 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = dst_get_neighbour(old)->dev;
- newdev = dst_get_neighbour(new)->dev;
+ n = dst_get_neighbour_noref(old);
+ if (!n)
+ return;
+ olddev = n->dev;
+
+ n = dst_get_neighbour_noref(new);
+ if (!n)
+ return;
+ newdev = n->dev;
+
if (!is_offloading(olddev))
return;
if (!is_offloading(newdev)) {
@@ -1132,7 +1144,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+ e = t3_l2t_get(tdev, new, newdev);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
@@ -1301,7 +1313,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
out_free_l2t:
t3_free_l2t(L2DATA(dev));
- rcu_assign_pointer(dev->l2opt, NULL);
+ RCU_INIT_POINTER(dev->l2opt, NULL);
out_free:
kfree(t);
return err;
@@ -1329,7 +1341,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
rcu_read_lock();
d = L2DATA(tdev);
rcu_read_unlock();
- rcu_assign_pointer(tdev->l2opt, NULL);
+ RCU_INIT_POINTER(tdev->l2opt, NULL);
call_rcu(&d->rcu_head, clean_l2_data);
if (t->nofail_skb)
kfree_skb(t->nofail_skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 70fec8b1140..3fa3c8833ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -298,18 +298,31 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
spin_unlock(&e->lock);
}
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev)
{
struct l2t_entry *e = NULL;
+ struct neighbour *neigh;
+ struct port_info *p;
struct l2t_data *d;
int hash;
- u32 addr = *(u32 *) neigh->primary_key;
- int ifidx = neigh->dev->ifindex;
- struct port_info *p = netdev_priv(dev);
- int smt_idx = p->port_id;
+ u32 addr;
+ int ifidx;
+ int smt_idx;
rcu_read_lock();
+ neigh = dst_get_neighbour_noref(dst);
+ if (!neigh)
+ goto done_rcu;
+
+ addr = *(u32 *) neigh->primary_key;
+ ifidx = neigh->dev->ifindex;
+
+ if (!dev)
+ dev = neigh->dev;
+ p = netdev_priv(dev);
+ smt_idx = p->port_id;
+
d = L2DATA(cdev);
if (!d)
goto done_rcu;
@@ -323,7 +336,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
l2t_hold(d, e);
if (atomic_read(&e->refcnt) == 1)
reuse_entry(e, neigh);
- goto done;
+ goto done_unlock;
}
/* Need to allocate a new entry */
@@ -344,7 +357,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
e->vlan = VLAN_NONE;
spin_unlock(&e->lock);
}
-done:
+done_unlock:
write_unlock_bh(&d->lock);
done_rcu:
rcu_read_unlock();
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c5f54796e2c..c4e86436975 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4c8f42afa3c..7b6b43d576d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -243,7 +243,7 @@ module_param_array(intr_cnt, uint, NULL, 0644);
MODULE_PARM_DESC(intr_cnt,
"thresholds 1..3 for queue interrupt packet counters");
-static int vf_acls;
+static bool vf_acls;
#ifdef CONFIG_PCI_IOV
module_param(vf_acls, bool, 0644);
@@ -1002,13 +1002,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(adapter->pdev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
- if (!adapter->params.fw_vers)
- strcpy(info->fw_version, "N/A");
- else
+ if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
@@ -1855,10 +1854,10 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return err;
}
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
const struct port_info *pi = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
int err;
if (!(changed & NETIF_F_HW_VLAN_RX))
@@ -1872,30 +1871,30 @@ static int cxgb_set_features(struct net_device *dev, u32 features)
return err;
}
-static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+static u32 get_rss_table_size(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p)
{
const struct port_info *pi = netdev_priv(dev);
- unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
+ unsigned int n = pi->rss_size;
- p->size = pi->rss_size;
while (n--)
- p->ring_index[n] = pi->rss[n];
+ p[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev,
- const struct ethtool_rxfh_indir *p)
+static int set_rss_table(struct net_device *dev, const u32 *p)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
- if (p->size != pi->rss_size)
- return -EINVAL;
- for (i = 0; i < p->size; i++)
- if (p->ring_index[i] >= pi->nqsets)
- return -EINVAL;
- for (i = 0; i < p->size; i++)
- pi->rss[i] = p->ring_index[i];
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
if (pi->adapter->flags & FULL_INIT_DONE)
return write_rss(pi, pi->rss);
return 0;
@@ -1990,6 +1989,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.set_wol = set_wol,
.get_rxnfc = get_rxnfc,
+ .get_rxfh_indir_size = get_rss_table_size,
.get_rxfh_indir = get_rss_table,
.set_rxfh_indir = set_rss_table,
.flash_device = set_flash,
@@ -3449,7 +3449,7 @@ static int __devinit init_rss(struct adapter *adap)
if (!pi->rss)
return -ENOMEM;
for (j = 0; j < pi->rss_size; j++)
- pi->rss[j] = j % pi->nqsets;
+ pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
}
return 0;
}
@@ -3537,7 +3537,7 @@ static int __devinit init_one(struct pci_dev *pdev,
{
int func, i, err;
struct port_info *pi;
- unsigned int highdma = 0;
+ bool highdma = false;
struct adapter *adapter = NULL;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -3563,7 +3563,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- highdma = NETIF_F_HIGHDMA;
+ highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
@@ -3637,7 +3637,9 @@ static int __devinit init_one(struct pci_dev *pdev,
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- netdev->features |= netdev->hw_features | highdma;
+ if (highdma)
+ netdev->hw_features |= NETIF_F_HIGHDMA;
+ netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 140254c7cba..2dae7959f00 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
- gfp |= __GFP_NOWARN; /* failures are expected */
+ gfp |= __GFP_NOWARN | __GFP_COLD;
#if FL_PG_ORDER > 0
/*
@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
#endif
while (n--) {
- pg = __netdev_alloc_page(adap->port[0], gfp);
+ pg = alloc_page(gfp);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
- netdev_free_page(adap->port[0], pg);
+ put_page(pg);
goto out;
}
*d++ = cpu_to_be64(mapping);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index da9072bfca8..8155cfecae1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1092,7 +1092,8 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1106,10 +1107,11 @@ static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+static int cxgb4vf_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
@@ -1203,9 +1205,10 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strcpy(drvinfo->driver, KBUILD_MODNAME);
- strcpy(drvinfo->version, DRV_VERSION);
- strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8d5d55ad102..c381db23e71 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
alloc_small_pages:
while (n--) {
- page = __netdev_alloc_page(adapter->port[0],
- gfp | __GFP_NOWARN);
+ page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
if (unlikely(!page)) {
fl->alloc_failed++;
break;
@@ -664,7 +663,7 @@ alloc_small_pages:
dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
- netdev_free_page(adapter->port[0], page);
+ put_page(page);
break;
}
*d++ = cpu_to_be64(dma_addr);
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index fd6247b3c0e..bf0fc56dba1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -212,23 +212,29 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
}
/* rtnl lock is held */
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
+ int err;
spin_lock(&enic->devcmd_lock);
- enic_add_vlan(enic, vid);
+ err = enic_add_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
/* rtnl lock is held */
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
+ int err;
spin_lock(&enic->devcmd_lock);
- enic_del_vlan(enic, vid);
+ err = enic_del_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
int enic_dev_enable2(struct enic *enic, int active)
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 1f83a4747ba..da1cba3c410 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
int enic_dev_add_addr(struct enic *enic, u8 *addr);
int enic_dev_del_addr(struct enic *enic, u8 *addr);
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
int enic_dev_hang_notify(struct enic *enic);
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c3786fda11d..2fd9db4b1be 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -217,11 +217,11 @@ static void enic_get_drvinfo(struct net_device *netdev,
enic_dev_fw_info(enic, &fw_info);
- strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, fw_info->fw_version,
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
@@ -2379,7 +2379,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
#endif
/* Allocate structure for port profiles */
- enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+ enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
if (!enic->pp) {
pr_err("port profile alloc failed, aborting\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 438f4580bf6..f801754c71a 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -474,10 +474,11 @@ static int dm9000_nway_reset(struct net_device *dev)
return mii_nway_restart(&dm->mii);
}
-static int dm9000_set_features(struct net_device *dev, u32 features)
+static int dm9000_set_features(struct net_device *dev,
+ netdev_features_t features)
{
board_info_t *dm = to_dm9000_board(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
unsigned long flags;
if (!(changed & NETIF_F_RXCSUM))
@@ -613,7 +614,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
- else if (dm->wake_state & !opts)
+ else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 1427739d9a5..1eb46a0bb48 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1598,9 +1598,9 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(de->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
info->eedump_len = DE_EEPROM_SIZE;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 871bcaa7068..4d71f5ae20c 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2127,14 +2127,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
u_long iobase = 0; /* Clear upper 32 bits in Alphas */
int i, j;
struct de4x5_private *lp = netdev_priv(dev);
- struct list_head *walk;
-
- list_for_each(walk, &pdev->bus_list) {
- struct pci_dev *this_dev = pci_dev_b(walk);
-
- /* Skip the pci_bus list entry */
- if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
+ struct pci_dev *this_dev;
+ list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
vendor = this_dev->vendor;
device = this_dev->device << 8;
if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
@@ -5196,7 +5191,7 @@ de4x5_parse_params(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
char *p, *q, t;
- lp->params.fdx = 0;
+ lp->params.fdx = false;
lp->params.autosense = AUTO;
if (args == NULL) return;
@@ -5206,7 +5201,7 @@ de4x5_parse_params(struct net_device *dev)
t = *q;
*q = '\0';
- if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+ if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
if (strstr(p, "TP")) {
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 17b11ee1745..51f7542eb45 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1085,10 +1085,11 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (np->pdev)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9656dd0647d..4eb0d76145c 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -871,9 +871,9 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 7a44a7a6adc..48b0b6566ee 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -960,10 +960,11 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (np->pdev)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 4d01219ba22..52da7b2fe3b 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1390,9 +1390,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
index 23a65398d01..c24fab1e9cb 100644
--- a/drivers/net/ethernet/dlink/de600.c
+++ b/drivers/net/ethernet/dlink/de600.c
@@ -59,7 +59,7 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
#include "de600.h"
-static unsigned int check_lost = 1;
+static bool check_lost = true;
module_param(check_lost, bool, 0);
MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index dcd7f7a71ad..28a3a9b50b8 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1634,9 +1634,9 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index c1063d1540c..ce88c0f399f 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -804,9 +804,9 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, "0");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -977,18 +977,7 @@ static struct platform_driver dnet_driver = {
},
};
-static int __init dnet_init(void)
-{
- return platform_driver_register(&dnet_driver);
-}
-
-static void __exit dnet_exit(void)
-{
- platform_driver_unregister(&dnet_driver);
-}
-
-module_init(dnet_init);
-module_exit(dnet_exit);
+module_platform_driver(dnet_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Dave DNET Ethernet driver");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 644e8fed836..cbdec2536da 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -40,6 +40,7 @@
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
+#define OC_NAME_SH OC_NAME "(Skyhawk)"
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
@@ -50,6 +51,7 @@
#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
+#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
static inline char *nic_name(struct pci_dev *pdev)
{
@@ -63,6 +65,8 @@ static inline char *nic_name(struct pci_dev *pdev)
return OC_NAME_LANCER;
case BE_DEVICE_ID2:
return BE3_NAME;
+ case OC_DEVICE_ID5:
+ return OC_NAME_SH;
default:
return BE_NAME;
}
@@ -288,14 +292,14 @@ struct be_drv_stats {
};
struct be_vf_cfg {
- unsigned char vf_mac_addr[ETH_ALEN];
- u32 vf_if_handle;
- u32 vf_pmac_id;
- u16 vf_vlan_tag;
- u32 vf_tx_rate;
+ unsigned char mac_addr[ETH_ALEN];
+ int if_handle;
+ int pmac_id;
+ u16 vlan_tag;
+ u32 tx_rate;
};
-#define BE_INVALID_PMAC_ID 0xffffffff
+#define BE_FLAGS_LINK_STATUS_INIT 1
struct be_adapter {
struct pci_dev *pdev;
@@ -345,13 +349,16 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
+ u32 flags;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
- u32 if_handle; /* Used to configure filtering */
+ int if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
bool eeh_err;
+ bool ue_detected;
+ bool fw_timeout;
u32 port_num;
bool promiscuous;
bool wol;
@@ -359,7 +366,6 @@ struct be_adapter {
u32 function_caps;
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
- bool ue_detected;
bool stats_cmd_sent;
int link_speed;
u8 port_type;
@@ -369,16 +375,20 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
- bool be3_native;
- bool sriov_enabled;
- struct be_vf_cfg *vf_cfg;
+ u32 num_vfs;
u8 is_virtfn;
+ struct be_vf_cfg *vf_cfg;
+ bool be3_native;
u32 sli_family;
u8 hba_port_num;
u16 pvid;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
+#define sriov_enabled(adapter) (adapter->num_vfs > 0)
+#define for_all_vfs(adapter, vf_cfg, i) \
+ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
+ i++, vf_cfg++)
/* BladeEngine Generation numbers */
#define BE_GEN2 2
@@ -524,9 +534,14 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
return adapter->num_rx_qs > 1;
}
+static inline bool be_error(struct be_adapter *adapter)
+{
+ return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
+extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
extern void be_parse_stats(struct be_adapter *adapter);
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 2c7b36673df..0fcb4562479 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -31,11 +31,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
- if (adapter->eeh_err) {
- dev_info(&adapter->pdev->dev,
- "Error in Card Detected! Cannot issue commands\n");
+ if (be_error(adapter))
return;
- }
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -128,7 +125,14 @@ done:
static void be_async_link_state_process(struct be_adapter *adapter,
struct be_async_event_link_state *evt)
{
- be_link_status_update(adapter, evt->port_link_status);
+ /* When link status changes, link speed must be re-queried from FW */
+ adapter->link_speed = -1;
+
+ /* For the initial link status do not rely on the ASYNC event as
+ * it may not be received in some cases.
+ */
+ if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
+ be_link_status_update(adapter, evt->port_link_status);
}
/* Grp5 CoS Priority evt */
@@ -266,10 +270,10 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
int i, num, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- if (adapter->eeh_err)
- return -EIO;
-
for (i = 0; i < mcc_timeout; i++) {
+ if (be_error(adapter))
+ return -EIO;
+
num = be_process_mcc(adapter, &status);
if (num)
be_cq_notify(adapter, mcc_obj->cq.id,
@@ -280,7 +284,8 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
udelay(100);
}
if (i == mcc_timeout) {
- dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ adapter->fw_timeout = true;
return -1;
}
return status;
@@ -298,26 +303,21 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
int msecs = 0;
u32 ready;
- if (adapter->eeh_err) {
- dev_err(&adapter->pdev->dev,
- "Error detected in card.Cannot issue commands\n");
- return -EIO;
- }
-
do {
+ if (be_error(adapter))
+ return -EIO;
+
ready = ioread32(db);
- if (ready == 0xffffffff) {
- dev_err(&adapter->pdev->dev,
- "pci slot disconnected\n");
+ if (ready == 0xffffffff)
return -1;
- }
ready &= MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
if (msecs > 4000) {
- dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ adapter->fw_timeout = true;
be_detect_dump_ue(adapter);
return -1;
}
@@ -555,9 +555,6 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -619,7 +616,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
/* Use MCC */
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle)
+ u8 type, bool permanent, u32 if_handle, u32 pmac_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mac_query *req;
@@ -641,6 +638,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req->permanent = 1;
} else {
req->if_id = cpu_to_le16((u16) if_handle);
+ req->pmac_id = cpu_to_le32(pmac_id);
req->permanent = 0;
}
@@ -695,12 +693,15 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_del *req;
int status;
+ if (pmac_id == -1)
+ return 0;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -923,10 +924,14 @@ int be_cmd_txq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
- wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
ctxt = &req->context;
@@ -952,14 +957,15 @@ int be_cmd_txq_create(struct be_adapter *adapter,
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
txq->id = le16_to_cpu(resp->cid);
txq->created = true;
}
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1018,9 +1024,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -1136,16 +1139,13 @@ err:
}
/* Uses MCCQ */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_destroy *req;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
- if (!interface_id)
+ if (interface_id == -1)
return 0;
spin_lock_bh(&adapter->mcc_lock);
@@ -1239,7 +1239,7 @@ err:
/* Uses synchronous mcc */
int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
- u16 *link_speed, u32 dom)
+ u16 *link_speed, u8 *link_status, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_link_status *req;
@@ -1247,6 +1247,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
spin_lock_bh(&adapter->mcc_lock);
+ if (link_status)
+ *link_status = LINK_DOWN;
+
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
@@ -1254,6 +1257,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
}
req = embedded_payload(wrb);
+ if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+ req->hdr.version = 1;
+
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
@@ -1261,10 +1267,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
- *link_speed = le16_to_cpu(resp->link_speed);
+ if (link_speed)
+ *link_speed = le16_to_cpu(resp->link_speed);
if (mac_speed)
*mac_speed = resp->mac_speed;
}
+ if (link_status)
+ *link_status = resp->logical_link_status;
}
err:
@@ -1673,8 +1682,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
- 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+ u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
+ 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
+ 0x3ea83c02, 0x4a110304};
int status;
if (mutex_lock_interruptible(&adapter->mbox_lock))
@@ -1836,6 +1846,53 @@ err_unlock:
return status;
}
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_read_object *req;
+ struct lancer_cmd_resp_read_object *resp;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
+
+ req->desired_read_len = cpu_to_le32(data_size);
+ req->read_offset = cpu_to_le32(data_offset);
+ strcpy(req->object_name, obj_name);
+ req->descriptor_count = cpu_to_le32(1);
+ req->buf_len = cpu_to_le32(data_size);
+ req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
+ req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
+
+ status = be_mcc_notify_wait(adapter);
+
+ resp = embedded_payload(wrb);
+ if (!status) {
+ *data_read = le32_to_cpu(resp->actual_read_len);
+ *eof = le32_to_cpu(resp->eof);
+ } else {
+ *addn_status = resp->additional_status;
+ }
+
+err_unlock:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
@@ -2238,3 +2295,99 @@ err:
mutex_unlock(&adapter->mbox_lock);
return status;
}
+
+/* Uses synchronous MCCQ */
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+ u32 *pmac_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_mac_list *req;
+ int status;
+ int mac_count;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_mac_list *resp =
+ embedded_payload(wrb);
+ int i;
+ u8 *ctxt = &resp->context[0][0];
+ status = -EIO;
+ mac_count = resp->mac_count;
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+ for (i = 0; i < mac_count; i++) {
+ if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
+ act, ctxt)) {
+ *pmac_id = AMAP_GET_BITS
+ (struct amap_get_mac_list_context,
+ macid, ctxt);
+ status = 0;
+ break;
+ }
+ ctxt += sizeof(struct amap_get_mac_list_context) / 8;
+ }
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_mac_list *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+ &cmd.dma, GFP_KERNEL);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
+
+ req->hdr.domain = domain;
+ req->mac_count = mac_count;
+ if (mac_count)
+ memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ dma_free_coherent(&adapter->pdev->dev, cmd.size,
+ cmd.va, cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a35cd03fac4..dca89249088 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -189,6 +189,9 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
+#define OPCODE_COMMON_GET_MAC_LIST 147
+#define OPCODE_COMMON_SET_MAC_LIST 148
+#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
#define OPCODE_ETH_RSS_CONFIG 1
@@ -294,6 +297,7 @@ struct be_cmd_req_mac_query {
u8 type;
u8 permanent;
u16 if_id;
+ u32 pmac_id;
} __packed;
struct be_cmd_resp_mac_query {
@@ -956,7 +960,8 @@ struct be_cmd_resp_link_status {
u8 mgmt_mac_duplex;
u8 mgmt_mac_speed;
u16 link_speed;
- u32 rsvd0;
+ u8 logical_link_status;
+ u8 rsvd1[3];
} __packed;
/******************** Port Identification ***************************/
@@ -1161,6 +1166,38 @@ struct lancer_cmd_resp_write_object {
u32 actual_write_len;
};
+/************************ Lancer Read FW info **************/
+#define LANCER_READ_FILE_CHUNK (32*1024)
+#define LANCER_READ_FILE_EOF_MASK 0x80000000
+
+#define LANCER_FW_DUMP_FILE "/dbg/dump.bin"
+#define LANCER_VPD_PF_FILE "/vpd/ntr_pf.vpd"
+#define LANCER_VPD_VF_FILE "/vpd/ntr_vf.vpd"
+
+struct lancer_cmd_req_read_object {
+ struct be_cmd_req_hdr hdr;
+ u32 desired_read_len;
+ u32 read_offset;
+ u8 object_name[104];
+ u32 descriptor_count;
+ u32 buf_len;
+ u32 addr_low;
+ u32 addr_high;
+};
+
+struct lancer_cmd_resp_read_object {
+ u8 opcode;
+ u8 subsystem;
+ u8 rsvd1[2];
+ u8 status;
+ u8 additional_status;
+ u8 rsvd2[2];
+ u32 resp_len;
+ u32 actual_resp_len;
+ u32 actual_read_len;
+ u32 eof;
+};
+
/************************ WOL *******************************/
struct be_cmd_req_acpi_wol_magic_config{
struct be_cmd_req_hdr hdr;
@@ -1307,6 +1344,34 @@ struct be_cmd_resp_set_func_cap {
u8 rsvd[212];
};
+/******************** GET/SET_MACLIST **************************/
+#define BE_MAX_MAC 64
+struct amap_get_mac_list_context {
+ u8 macid[31];
+ u8 act;
+} __packed;
+
+struct be_cmd_req_get_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_mac_list {
+ struct be_cmd_resp_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC];
+} __packed;
+
+struct be_cmd_req_set_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ struct macaddr mac[BE_MAX_MAC];
+} __packed;
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1413,15 +1478,15 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle);
+ u8 type, bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain);
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
- u32 pmac_id, u32 domain);
+ int pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
@@ -1443,8 +1508,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter,
- u8 *mac_speed, u16 *link_speed, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+ u16 *link_speed, u8 *link_status, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter);
extern int be_cmd_get_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -1480,6 +1545,9 @@ extern int lancer_cmd_write_object(struct be_adapter *adapter,
u32 data_size, u32 data_offset,
const char *obj_name,
u32 *data_written, u8 *addn_status);
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
int offset);
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -1506,4 +1574,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+ u32 *pmac_id);
+extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index bf8153ea4ed..6db6b6ae5e9 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -127,8 +127,8 @@ static void be_get_drvinfo(struct net_device *netdev,
memset(fw_on_flash, 0 , sizeof(fw_on_flash));
be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VER);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
strcat(drvinfo->fw_version, " [");
@@ -136,21 +136,84 @@ static void be_get_drvinfo(struct net_device *netdev,
strcat(drvinfo->fw_version, "]");
}
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
+static u32
+lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+{
+ u32 data_read = 0, eof;
+ u8 addn_status;
+ struct be_dma_mem data_len_cmd;
+ int status;
+
+ memset(&data_len_cmd, 0, sizeof(data_len_cmd));
+ /* data_offset and data_size should be 0 to get reg len */
+ status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
+ file_name, &data_read, &eof, &addn_status);
+
+ return data_read;
+}
+
+static int
+lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
+{
+ struct be_dma_mem read_cmd;
+ u32 read_len = 0, total_read_len = 0, chunk_size;
+ u32 eof = 0;
+ u8 addn_status;
+ int status = 0;
+
+ read_cmd.size = LANCER_READ_FILE_CHUNK;
+ read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+ &read_cmd.dma);
+
+ if (!read_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while reading dump\n");
+ return -ENOMEM;
+ }
+
+ while ((total_read_len < buf_len) && !eof) {
+ chunk_size = min_t(u32, (buf_len - total_read_len),
+ LANCER_READ_FILE_CHUNK);
+ chunk_size = ALIGN(chunk_size, 4);
+ status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
+ total_read_len, file_name, &read_len,
+ &eof, &addn_status);
+ if (!status) {
+ memcpy(buf + total_read_len, read_cmd.va, read_len);
+ total_read_len += read_len;
+ eof &= LANCER_READ_FILE_EOF_MASK;
+ } else {
+ status = -EIO;
+ break;
+ }
+ }
+ pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+ read_cmd.dma);
+
+ return status;
+}
+
static int
be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
- if (be_physfn(adapter))
- be_cmd_get_reg_len(adapter, &log_size);
-
+ if (be_physfn(adapter)) {
+ if (lancer_chip(adapter))
+ log_size = lancer_cmd_get_file_len(adapter,
+ LANCER_FW_DUMP_FILE);
+ else
+ be_cmd_get_reg_len(adapter, &log_size);
+ }
return log_size;
}
@@ -161,7 +224,11 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
if (be_physfn(adapter)) {
memset(buf, 0, regs->len);
- be_cmd_get_regs(adapter, regs->len, buf);
+ if (lancer_chip(adapter))
+ lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+ regs->len, buf);
+ else
+ be_cmd_get_regs(adapter, regs->len, buf);
}
}
@@ -362,11 +429,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct be_phy_info phy_info;
u8 mac_speed = 0;
u16 link_speed = 0;
+ u8 link_status;
int status;
if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
status = be_cmd_link_status_query(adapter, &mac_speed,
- &link_speed, 0);
+ &link_speed, &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
/* link_speed is in units of 10 Mbps */
if (link_speed) {
@@ -453,16 +523,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void be_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct be_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = adapter->rx_obj[0].q.len;
- ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
- ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
- ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+ ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
+ ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
}
static void
@@ -636,7 +703,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
if (be_cmd_link_status_query(adapter, &mac_speed,
- &qos_link_speed, 0) != 0) {
+ &qos_link_speed, NULL, 0) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
} else if (!mac_speed) {
@@ -660,7 +727,17 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
static int
be_get_eeprom_len(struct net_device *netdev)
{
- return BE_READ_SEEPROM_LEN;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_PF_FILE);
+ else
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_VF_FILE);
+ } else {
+ return BE_READ_SEEPROM_LEN;
+ }
}
static int
@@ -675,6 +752,15 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!eeprom->len)
return -EINVAL;
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
+ eeprom->len, data);
+ else
+ return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
+ eeprom->len, data);
+ }
+
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bf266a00c77..6c46753aeb4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -27,13 +27,14 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
-static ushort rx_frag_size = 2048;
static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -41,6 +42,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,7 +239,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
status = be_cmd_mac_addr_query(adapter, current_mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ adapter->if_handle, 0);
if (status)
goto err;
@@ -315,6 +318,8 @@ static void populate_be3_stats(struct be_adapter *adapter)
struct be_drv_stats *drvs = &adapter->drv_stats;
be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
drvs->rx_pause_frames = port_stats->rx_pause_frames;
drvs->rx_crc_errors = port_stats->rx_crc_errors;
drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -491,19 +496,19 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
return stats;
}
-void be_link_status_update(struct be_adapter *adapter, u32 link_status)
+void be_link_status_update(struct be_adapter *adapter, u8 link_status)
{
struct net_device *netdev = adapter->netdev;
- /* when link status changes, link speed must be re-queried from card */
- adapter->link_speed = -1;
- if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
- netif_carrier_on(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
- } else {
+ if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
netif_carrier_off(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+ adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
}
+
+ if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
}
static void be_tx_stats_update(struct be_tx_obj *txo,
@@ -549,11 +554,26 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
}
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u8 vlan_prio;
+ u16 vlan_tag;
+
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ /* If vlan priority provided by OS is NOT in available bmap */
+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+ adapter->recommended_prio;
+
+ return vlan_tag;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
- u8 vlan_prio = 0;
- u16 vlan_tag = 0;
+ u16 vlan_tag;
memset(hdr, 0, sizeof(*hdr));
@@ -584,12 +604,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
if (vlan_tx_tag_present(skb)) {
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
- vlan_tag = vlan_tx_tag_get(skb);
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- /* If vlan priority provided by OS is NOT in available bmap */
- if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
- vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
- adapter->recommended_prio;
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
}
@@ -692,6 +707,25 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 start = txq->head;
bool dummy_wrb, stopped = false;
+ /* For vlan tagged pkts, BE
+ * 1) calculates checksum even when CSO is not requested
+ * 2) calculates checksum wrongly for padded pkt less than
+ * 60 bytes long.
+ * As a workaround disable TX vlan offloading in such cases.
+ */
+ if (unlikely(vlan_tx_tag_present(skb) &&
+ (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto tx_drop;
+
+ skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ if (unlikely(!skb))
+ goto tx_drop;
+
+ skb->vlan_tci = 0;
+ }
+
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
@@ -719,6 +753,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
txq->head = start;
dev_kfree_skb_any(skb);
}
+tx_drop:
return NETDEV_TX_OK;
}
@@ -746,15 +781,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
*/
static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
int status = 0;
- u32 if_handle;
if (vf) {
- if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
- vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
- status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+ vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
+ status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
+ 1, 1, 0);
}
/* No need to further configure vids if in promiscuous mode */
@@ -779,31 +814,48 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
return status;
}
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
- adapter->vlans_added++;
- if (!be_physfn(adapter))
- return;
+ if (!be_physfn(adapter)) {
+ status = -EINVAL;
+ goto ret;
+ }
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
- be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter, false, 0);
+
+ if (!status)
+ adapter->vlans_added++;
+ else
+ adapter->vlan_tag[vid] = 0;
+ret:
+ return status;
}
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
- adapter->vlans_added--;
-
- if (!be_physfn(adapter))
- return;
+ if (!be_physfn(adapter)) {
+ status = -EINVAL;
+ goto ret;
+ }
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
- be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter, false, 0);
+
+ if (!status)
+ adapter->vlans_added--;
+ else
+ adapter->vlan_tag[vid] = 1;
+ret:
+ return status;
}
static void be_set_rx_mode(struct net_device *netdev)
@@ -840,28 +892,30 @@ done:
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
return -EINVAL;
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- status = be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ if (lancer_chip(adapter)) {
+ status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
+ } else {
+ status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ }
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
mac, vf);
else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
return status;
}
@@ -870,18 +924,19 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if (vf >= num_vfs)
+ if (vf >= adapter->num_vfs)
return -EINVAL;
vi->vf = vf;
- vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
- vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+ vi->tx_rate = vf_cfg->tx_rate;
+ vi->vlan = vf_cfg->vlan_tag;
vi->qos = 0;
- memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+ memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
return 0;
}
@@ -892,17 +947,17 @@ static int be_set_vf_vlan(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if ((vf >= num_vfs) || (vlan > 4095))
+ if (vf >= adapter->num_vfs || vlan > 4095)
return -EINVAL;
if (vlan) {
- adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+ adapter->vf_cfg[vf].vlan_tag = vlan;
adapter->vlans_added++;
} else {
- adapter->vf_cfg[vf].vf_vlan_tag = 0;
+ adapter->vf_cfg[vf].vlan_tag = 0;
adapter->vlans_added--;
}
@@ -920,21 +975,25 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if ((vf >= num_vfs) || (rate < 0))
+ if (vf >= adapter->num_vfs)
return -EINVAL;
- if (rate > 10000)
- rate = 10000;
+ if (rate < 100 || rate > 10000) {
+ dev_err(&adapter->pdev->dev,
+ "tx rate must be between 100 and 10000 Mbps\n");
+ return -EINVAL;
+ }
- adapter->vf_cfg[vf].vf_tx_rate = rate;
status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
- dev_info(&adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"tx rate %d on VF %d failed\n", rate, vf);
+ else
+ adapter->vf_cfg[vf].tx_rate = rate;
return status;
}
@@ -1645,8 +1704,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
static int be_num_txqs_want(struct be_adapter *adapter)
{
- if ((num_vfs && adapter->sriov_enabled) ||
- be_is_mc(adapter) ||
+ if (sriov_enabled(adapter) || be_is_mc(adapter) ||
lancer_chip(adapter) || !be_physfn(adapter) ||
adapter->generation == BE_GEN2)
return 1;
@@ -1662,9 +1720,12 @@ static int be_tx_queues_create(struct be_adapter *adapter)
u8 i;
adapter->num_tx_qs = be_num_txqs_want(adapter);
- if (adapter->num_tx_qs != MAX_TX_QS)
+ if (adapter->num_tx_qs != MAX_TX_QS) {
+ rtnl_lock();
netif_set_real_num_tx_queues(adapter->netdev,
adapter->num_tx_qs);
+ rtnl_unlock();
+ }
adapter->tx_eq.max_eqd = 0;
adapter->tx_eq.min_eqd = 0;
@@ -1693,9 +1754,6 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_queue_alloc(adapter, q, TX_Q_LEN,
sizeof(struct be_eth_wrb)))
goto err;
-
- if (be_cmd_txq_create(adapter, q, cq))
- goto err;
}
return 0;
@@ -1728,8 +1786,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !adapter->sriov_enabled && be_physfn(adapter) &&
- !be_is_mc(adapter)) {
+ !sriov_enabled(adapter) && be_physfn(adapter) &&
+ !be_is_mc(adapter)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
dev_warn(&adapter->pdev->dev,
@@ -1929,6 +1987,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter =
container_of(tx_eq, struct be_adapter, tx_eq);
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
struct be_tx_obj *txo;
struct be_eth_tx_compl *txcp;
int tx_compl, mcc_compl, status = 0;
@@ -1965,12 +2024,19 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
mcc_compl = be_process_mcc(adapter, &status);
if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
}
napi_complete(napi);
+ /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
+ if (lancer_chip(adapter) && !msix_enabled(adapter)) {
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, true, 0);
+
+ be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+ }
+
be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
adapter->drv_stats.tx_events++;
return 1;
@@ -1982,6 +2048,9 @@ void be_detect_dump_ue(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
+ if (adapter->eeh_err || adapter->ue_detected)
+ return;
+
if (lancer_chip(adapter)) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2008,7 +2077,8 @@ void be_detect_dump_ue(struct be_adapter *adapter)
sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->ue_detected = true;
adapter->eeh_err = true;
- dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ dev_err(&adapter->pdev->dev,
+ "Unrecoverable error in the card\n");
}
if (ue_lo) {
@@ -2036,53 +2106,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
}
}
-static void be_worker(struct work_struct *work)
-{
- struct be_adapter *adapter =
- container_of(work, struct be_adapter, work.work);
- struct be_rx_obj *rxo;
- int i;
-
- if (!adapter->ue_detected)
- be_detect_dump_ue(adapter);
-
- /* when interrupts are not yet enabled, just reap any pending
- * mcc completions */
- if (!netif_running(adapter->netdev)) {
- int mcc_compl, status = 0;
-
- mcc_compl = be_process_mcc(adapter, &status);
-
- if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
- }
-
- goto reschedule;
- }
-
- if (!adapter->stats_cmd_sent) {
- if (lancer_chip(adapter))
- lancer_cmd_get_pport_stats(adapter,
- &adapter->stats_cmd);
- else
- be_cmd_get_stats(adapter, &adapter->stats_cmd);
- }
-
- for_all_rx_queues(adapter, rxo, i) {
- be_rx_eqd_update(adapter, rxo);
-
- if (rxo->rx_post_starved) {
- rxo->rx_post_starved = false;
- be_post_rx_frags(rxo, GFP_KERNEL);
- }
- }
-
-reschedule:
- adapter->work_counter++;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
static void be_msix_disable(struct be_adapter *adapter)
{
if (msix_enabled(adapter)) {
@@ -2119,27 +2142,28 @@ done:
static int be_sriov_enable(struct be_adapter *adapter)
{
be_check_sriov_fn_type(adapter);
+
#ifdef CONFIG_PCI_IOV
if (be_physfn(adapter) && num_vfs) {
int status, pos;
- u16 nvfs;
+ u16 dev_vfs;
pos = pci_find_ext_capability(adapter->pdev,
PCI_EXT_CAP_ID_SRIOV);
pci_read_config_word(adapter->pdev,
- pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+ pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
- if (num_vfs > nvfs) {
+ adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
+ if (adapter->num_vfs != num_vfs)
dev_info(&adapter->pdev->dev,
- "Device supports %d VFs and not %d\n",
- nvfs, num_vfs);
- num_vfs = nvfs;
- }
+ "Device supports %d VFs and not %d\n",
+ adapter->num_vfs, num_vfs);
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- adapter->sriov_enabled = status ? false : true;
+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (status)
+ adapter->num_vfs = 0;
- if (adapter->sriov_enabled) {
+ if (adapter->num_vfs) {
adapter->vf_cfg = kcalloc(num_vfs,
sizeof(struct be_vf_cfg),
GFP_KERNEL);
@@ -2154,10 +2178,10 @@ static int be_sriov_enable(struct be_adapter *adapter)
static void be_sriov_disable(struct be_adapter *adapter)
{
#ifdef CONFIG_PCI_IOV
- if (adapter->sriov_enabled) {
+ if (sriov_enabled(adapter)) {
pci_disable_sriov(adapter->pdev);
kfree(adapter->vf_cfg);
- adapter->sriov_enabled = false;
+ adapter->num_vfs = 0;
}
#endif
}
@@ -2351,8 +2375,8 @@ static int be_close(struct net_device *netdev)
static int be_rx_queues_setup(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
- int rc, i;
- u8 rsstable[MAX_RSS_QS];
+ int rc, i, j;
+ u8 rsstable[128];
for_all_rx_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -2364,11 +2388,15 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for_all_rss_queues(adapter, rxo, i)
- rsstable[i] = rxo->rss_id;
+ for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for_all_rss_queues(adapter, rxo, i) {
+ if ((j + i) >= 128)
+ break;
+ rsstable[j + i] = rxo->rss_id;
+ }
+ }
+ rc = be_cmd_rss_config(adapter, rsstable, 128);
- rc = be_cmd_rss_config(adapter, rsstable,
- adapter->num_rx_qs - 1);
if (rc)
return rc;
}
@@ -2386,6 +2414,7 @@ static int be_open(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *tx_eq = &adapter->tx_eq;
struct be_rx_obj *rxo;
+ u8 link_status;
int status, i;
status = be_rx_queues_setup(adapter);
@@ -2409,6 +2438,11 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
+ status = be_cmd_link_status_query(adapter, NULL, NULL,
+ &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
+
return 0;
err:
be_close(adapter->netdev);
@@ -2465,19 +2499,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
u32 vf;
int status = 0;
u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
be_vf_eth_addr_generate(adapter, mac);
- for (vf = 0; vf < num_vfs; vf++) {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id,
- vf + 1);
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (lancer_chip(adapter)) {
+ status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
+ } else {
+ status = be_cmd_pmac_add(adapter, mac,
+ vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ }
+
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address add failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n", vf);
else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
mac[5] += 1;
}
@@ -2486,24 +2525,23 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
static void be_vf_clear(struct be_adapter *adapter)
{
+ struct be_vf_cfg *vf_cfg;
u32 vf;
- for (vf = 0; vf < num_vfs; vf++) {
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
- }
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (lancer_chip(adapter))
+ be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
+ else
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle, vf + 1);
+ be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+ }
}
static int be_clear(struct be_adapter *adapter)
{
- if (be_physfn(adapter) && adapter->sriov_enabled)
+ if (sriov_enabled(adapter))
be_vf_clear(adapter);
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -2511,61 +2549,94 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
- adapter->eq_next_idx = 0;
-
- adapter->be3_native = false;
- adapter->promiscuous = false;
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
return 0;
}
+static void be_vf_setup_init(struct be_adapter *adapter)
+{
+ struct be_vf_cfg *vf_cfg;
+ int vf;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ vf_cfg->if_handle = -1;
+ vf_cfg->pmac_id = -1;
+ }
+}
+
static int be_vf_setup(struct be_adapter *adapter)
{
+ struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
u16 lnk_speed;
int status;
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
- for (vf = 0; vf < num_vfs; vf++) {
+ be_vf_setup_init(adapter);
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+ for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
- &adapter->vf_cfg[vf].vf_if_handle,
- NULL, vf+1);
+ &vf_cfg->if_handle, NULL, vf + 1);
if (status)
goto err;
- adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
}
- if (!lancer_chip(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto err;
- }
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto err;
- for (vf = 0; vf < num_vfs; vf++) {
+ for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
- vf + 1);
+ NULL, vf + 1);
if (status)
goto err;
- adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ vf_cfg->tx_rate = lnk_speed * 10;
}
return 0;
err:
return status;
}
+static void be_setup_init(struct be_adapter *adapter)
+{
+ adapter->vlan_prio_bmap = 0xff;
+ adapter->link_speed = -1;
+ adapter->if_handle = -1;
+ adapter->be3_native = false;
+ adapter->promiscuous = false;
+ adapter->eq_next_idx = 0;
+}
+
+static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+{
+ u32 pmac_id;
+ int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
+ if (status != 0)
+ goto do_none;
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, adapter->if_handle, pmac_id);
+ if (status != 0)
+ goto do_none;
+ status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id, 0);
+do_none:
+ return status;
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
- int status;
+ int status, i;
u8 mac[ETH_ALEN];
+ struct be_tx_obj *txo;
- /* Allow all priorities by default. A GRP5 evt may modify this */
- adapter->vlan_prio_bmap = 0xff;
- adapter->link_speed = -1;
+ be_setup_init(adapter);
be_cmd_req_native_mode(adapter);
@@ -2583,7 +2654,7 @@ static int be_setup(struct be_adapter *adapter)
memset(mac, 0, ETH_ALEN);
status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
- true /*permanent */, 0);
+ true /*permanent */, 0, 0);
if (status)
return status;
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
@@ -2592,7 +2663,8 @@ static int be_setup(struct be_adapter *adapter)
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS;
+ BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
+
if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
@@ -2603,12 +2675,23 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto err;
- /* For BEx, the VF's permanent mac queried from card is incorrect.
- * Query the mac configued by the PF using if_handle
- */
- if (!be_physfn(adapter) && !lancer_chip(adapter)) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ for_all_tx_queues(adapter, txo, i) {
+ status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+ if (status)
+ goto err;
+ }
+
+ /* The VF's permanent mac queried from card is incorrect.
+ * For BEx: Query the mac configued by the PF using if_handle
+ * For Lancer: Get and use mac_list to obtain mac address.
+ */
+ if (!be_physfn(adapter)) {
+ if (lancer_chip(adapter))
+ status = be_configure_mac_from_list(adapter, mac);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ adapter->if_handle, 0);
if (!status) {
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
@@ -2624,18 +2707,21 @@ static int be_setup(struct be_adapter *adapter)
be_set_rx_mode(adapter->netdev);
status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
- if (status)
+ /* For Lancer: It is legal for this cmd to fail on VF */
+ if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
goto err;
+
if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (status)
+ /* For Lancer: It is legal for this cmd to fail on VF */
+ if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
goto err;
}
pcie_set_readrq(adapter->pdev, 4096);
- if (be_physfn(adapter) && adapter->sriov_enabled) {
+ if (sriov_enabled(adapter)) {
status = be_vf_setup(adapter);
if (status)
goto err;
@@ -2647,6 +2733,19 @@ err:
return status;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ int i;
+
+ event_handle(adapter, &adapter->tx_eq, false);
+ for_all_rx_queues(adapter, rxo, i)
+ event_handle(adapter, &rxo->rx_eq, true);
+}
+#endif
+
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -2995,7 +3094,10 @@ static struct net_device_ops be_netdev_ops = {
.ndo_set_vf_mac = be_set_vf_mac,
.ndo_set_vf_vlan = be_set_vf_vlan,
.ndo_set_vf_tx_rate = be_set_vf_tx_rate,
- .ndo_get_vf_config = be_get_vf_config
+ .ndo_get_vf_config = be_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = be_netpoll,
+#endif
};
static void be_netdev_init(struct net_device *netdev)
@@ -3242,6 +3344,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID2:
+ case OC_DEVICE_ID5:
adapter->generation = BE_GEN3;
break;
case OC_DEVICE_ID3:
@@ -3267,7 +3370,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
static int lancer_wait_ready(struct be_adapter *adapter)
{
-#define SLIPORT_READY_TIMEOUT 500
+#define SLIPORT_READY_TIMEOUT 30
u32 sliport_status;
int status = 0, i;
@@ -3276,7 +3379,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
if (sliport_status & SLIPORT_STATUS_RDY_MASK)
break;
- msleep(20);
+ msleep(1000);
}
if (i == SLIPORT_READY_TIMEOUT)
@@ -3313,6 +3416,104 @@ static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
return status;
}
+static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status;
+
+ if (adapter->eeh_err || adapter->ue_detected)
+ return;
+
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter in error state."
+ "Trying to recover.\n");
+
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status)
+ goto err;
+
+ netif_device_detach(adapter->netdev);
+
+ if (netif_running(adapter->netdev))
+ be_close(adapter->netdev);
+
+ be_clear(adapter);
+
+ adapter->fw_timeout = false;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(adapter->netdev)) {
+ status = be_open(adapter->netdev);
+ if (status)
+ goto err;
+ }
+
+ netif_device_attach(adapter->netdev);
+
+ dev_err(&adapter->pdev->dev,
+ "Adapter error recovery succeeded\n");
+ }
+ return;
+err:
+ dev_err(&adapter->pdev->dev,
+ "Adapter error recovery failed\n");
+}
+
+static void be_worker(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (lancer_chip(adapter))
+ lancer_test_and_recover_fn_err(adapter);
+
+ be_detect_dump_ue(adapter);
+
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+
+ goto reschedule;
+ }
+
+ if (!adapter->stats_cmd_sent) {
+ if (lancer_chip(adapter))
+ lancer_cmd_get_pport_stats(adapter,
+ &adapter->stats_cmd);
+ else
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
+ }
+
+ for_all_rx_queues(adapter, rxo, i) {
+ be_rx_eqd_update(adapter, rxo);
+
+ if (rxo->rx_post_starved) {
+ rxo->rx_post_starved = false;
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ }
+ }
+
+reschedule:
+ adapter->work_counter++;
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
static int __devinit be_probe(struct pci_dev *pdev,
const struct pci_device_id *pdev_id)
{
@@ -3365,7 +3566,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_sriov;
if (lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_test_and_set_rdy_state(adapter);
+ }
if (status) {
dev_err(&pdev->dev, "Adapter in non recoverable error\n");
goto ctrl_clean;
@@ -3559,6 +3765,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
dev_info(&adapter->pdev->dev, "EEH reset\n");
adapter->eeh_err = false;
+ adapter->ue_detected = false;
+ adapter->fw_timeout = false;
status = pci_enable_device(pdev);
if (status)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 251b635fe75..60f0e788cc2 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1185,18 +1185,7 @@ static struct platform_driver ethoc_driver = {
},
};
-static int __init ethoc_init(void)
-{
- return platform_driver_register(&ethoc_driver);
-}
-
-static void __exit ethoc_exit(void)
-{
- platform_driver_unregister(&ethoc_driver);
-}
-
-module_init(ethoc_init);
-module_exit(ethoc_exit);
+module_platform_driver(ethoc_driver);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 61d2bddec1f..c82d444b582 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1818,9 +1818,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index c520cfd3b29..820de8b9ff0 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -21,9 +21,10 @@ config NET_VENDOR_FREESCALE
if NET_VENDOR_FREESCALE
config FEC
- bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS)
+ default ARCH_MXC || ARCH_MXS if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0a159..b0b04453c7c 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = {
MODULE_DEVICE_TABLE(platform, fec_devtype);
enum imx_fec_type {
- IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
IMX27_FEC, /* runs on i.mx27/35/51 */
IMX28_FEC,
IMX6Q_FEC,
@@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#elif defined (CONFIG_M5272C3)
#define FEC_FLASHMAC (0xffe04000 + 4)
#elif defined(CONFIG_MOD5272)
-#define FEC_FLASHMAC 0xffc0406b
+#define FEC_FLASHMAC 0xffc0406b
#else
#define FEC_FLASHMAC 0
#endif
@@ -232,6 +232,7 @@ struct fec_enet_private {
struct platform_device *pdev;
int opened;
+ int dev_id;
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
@@ -254,11 +255,13 @@ struct fec_enet_private {
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
-#define FEC_MII_TIMEOUT 1000 /* us */
+#define FEC_MII_TIMEOUT 30000 /* us */
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+static int mii_cnt;
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -515,6 +518,7 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -531,8 +535,10 @@ fec_stop(struct net_device *ndev)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
writel(2, fep->hwp + FEC_ECNTRL);
+ writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+ }
}
@@ -818,7 +824,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
iap = (unsigned char *)FEC_FLASHMAC;
#else
if (pdata)
- memcpy(iap, pdata->mac, ETH_ALEN);
+ iap = (unsigned char *)&pdata->mac;
#endif
}
@@ -837,7 +843,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
}
/* ------------------------------------------------------------------------- */
@@ -865,6 +871,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
if (phy_dev->link) {
if (fep->full_duplex != phy_dev->duplex) {
fec_restart(ndev, phy_dev->duplex);
+ /* prevent unnecessary second fec_restart() below */
+ fep->link = phy_dev->link;
status_change = 1;
}
}
@@ -953,7 +961,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
- int dev_id = fep->pdev->id;
+ int dev_id = fep->dev_id;
fep->phy_dev = NULL;
@@ -972,8 +980,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
if (phy_id >= PHY_MAX_ADDR) {
- printk(KERN_INFO "%s: no PHY, assuming direct connection "
- "to switch\n", ndev->name);
+ printk(KERN_INFO
+ "%s: no PHY, assuming direct connection to switch\n",
+ ndev->name);
strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -998,8 +1007,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+ printk(KERN_INFO
+ "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ ndev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
@@ -1031,10 +1041,14 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
- fep->mii_bus = fec0_mii_bus;
- return 0;
+ if (mii_cnt && fec0_mii_bus) {
+ fep->mii_bus = fec0_mii_bus;
+ mii_cnt++;
+ return 0;
+ }
+ return -ENOENT;
}
fep->mii_timeout = 0;
@@ -1063,7 +1077,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
@@ -1079,6 +1093,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
+ mii_cnt++;
+
/* save fec0 mii_bus */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fec0_mii_bus = fep->mii_bus;
@@ -1095,11 +1111,11 @@ err_out:
static void fec_enet_mii_remove(struct fec_enet_private *fep)
{
- if (fep->phy_dev)
- phy_disconnect(fep->phy_dev);
- mdiobus_unregister(fep->mii_bus);
- kfree(fep->mii_bus->irq);
- mdiobus_free(fep->mii_bus);
+ if (--mii_cnt == 0) {
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
+ }
}
static int fec_enet_get_settings(struct net_device *ndev,
@@ -1521,6 +1537,7 @@ fec_probe(struct platform_device *pdev)
int i, irq, ret = 0;
struct resource *r;
const struct of_device_id *of_id;
+ static int dev_id;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1548,6 +1565,7 @@ fec_probe(struct platform_device *pdev)
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
+ fep->dev_id = dev_id++;
if (!fep->hwp) {
ret = -ENOMEM;
@@ -1571,8 +1589,12 @@ fec_probe(struct platform_device *pdev)
for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
- if (i && irq < 0)
- break;
+ if (irq < 0) {
+ if (i)
+ break;
+ ret = irq;
+ goto failed_irq;
+ }
ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
if (ret) {
while (--i >= 0) {
@@ -1583,7 +1605,7 @@ fec_probe(struct platform_device *pdev)
}
}
- fep->clk = clk_get(&pdev->dev, "fec_clk");
+ fep->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(fep->clk)) {
ret = PTR_ERR(fep->clk);
goto failed_clk;
@@ -1635,13 +1657,18 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct resource *r;
+ int i;
- fec_stop(ndev);
+ unregister_netdev(ndev);
fec_enet_mii_remove(fep);
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ int irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
clk_disable(fep->clk);
clk_put(fep->clk);
iounmap(fep->hwp);
- unregister_netdev(ndev);
free_netdev(ndev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5bf5471f06f..910a8e18a9a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1171,16 +1171,6 @@ static struct platform_driver fs_enet_driver = {
.remove = fs_enet_remove,
};
-static int __init fs_init(void)
-{
- return platform_driver_register(&fs_enet_driver);
-}
-
-static void __exit fs_cleanup(void)
-{
- platform_driver_unregister(&fs_enet_driver);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev)
{
@@ -1190,7 +1180,4 @@ static void fs_enet_netpoll(struct net_device *dev)
}
#endif
-/**************************************************************************************/
-
-module_init(fs_init);
-module_exit(fs_cleanup);
+module_platform_driver(fs_enet_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index b09270b5d0a..0f2d1a71090 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -232,15 +232,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
.remove = fs_enet_mdio_remove,
};
-static int fs_enet_mdio_bb_init(void)
-{
- return platform_driver_register(&fs_enet_bb_mdio_driver);
-}
-
-static void fs_enet_mdio_bb_exit(void)
-{
- platform_driver_unregister(&fs_enet_bb_mdio_driver);
-}
-
-module_init(fs_enet_mdio_bb_init);
-module_exit(fs_enet_mdio_bb_exit);
+module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index e0e9d6c35d8..55bb867258e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -237,15 +237,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
.remove = fs_enet_mdio_remove,
};
-static int fs_enet_mdio_fec_init(void)
-{
- return platform_driver_register(&fs_enet_fec_mdio_driver);
-}
-
-static void fs_enet_mdio_fec_exit(void)
-{
- platform_driver_unregister(&fs_enet_fec_mdio_driver);
-}
-
-module_init(fs_enet_mdio_fec_init);
-module_exit(fs_enet_mdio_fec_exit);
+module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e..9eb815941df 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
- int i;
-
- for (i = PHY_MAX_ADDR; i > 0; i--) {
- u32 phy_id;
-
- if (get_phy_id(new_bus, i, &phy_id))
- return -1;
-
- if (phy_id == 0xffffffff)
- break;
- }
- return i;
-}
-
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
- } else
- return NULL;
-}
+ }
#endif
+ return NULL;
+}
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
struct device_node *np = NULL;
int err = 0;
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
return err;
else
return -EINVAL;
-}
+#else
+ return -ENODEV;
#endif
-
+}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else {
err = -ENODEV;
goto err_free_irqs;
@@ -386,23 +359,12 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
}
if (tbiaddr == -1) {
- out_be32(tbipa, 0);
-
- tbiaddr = fsl_pq_mdio_find_free(new_bus);
- }
-
- /*
- * We define TBIPA at 0 to be illegal, opting to fail for boards that
- * have PHYs at 1-31, rather than change tbipa and rescan.
- */
- if (tbiaddr == 0) {
err = -EBUSY;
-
goto err_free_irqs;
+ } else {
+ out_be32(tbipa, tbiaddr);
}
- out_be32(tbipa, tbiaddr);
-
err = of_mdiobus_register(new_bus, np);
if (err) {
printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
@@ -480,15 +442,6 @@ static struct platform_driver fsl_pq_mdio_driver = {
.remove = fsl_pq_mdio_remove,
};
-int __init fsl_pq_mdio_init(void)
-{
- return platform_driver_register(&fsl_pq_mdio_driver);
-}
-module_init(fsl_pq_mdio_init);
+module_platform_driver(fsl_pq_mdio_driver);
-void fsl_pq_mdio_exit(void)
-{
- platform_driver_unregister(&fsl_pq_mdio_driver);
-}
-module_exit(fsl_pq_mdio_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 83199fd0d62..e01cdaa722a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -734,7 +734,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
priv->device_flags =
@@ -2306,7 +2306,7 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
}
/* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, u32 features)
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
@@ -3114,7 +3114,7 @@ static void gfar_set_multi(struct net_device *dev)
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
- static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+ static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
@@ -3137,7 +3137,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
struct gfar_private *priv = netdev_priv(dev);
- u32 result = ether_crc(MAC_ADDR_LEN, addr);
+ u32 result = ether_crc(ETH_ALEN, addr);
int width = priv->hash_width;
u8 whichbit = (result >> (32 - width)) & 0x1f;
u8 whichreg = result >> (32 - width + 5);
@@ -3158,7 +3158,7 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
int idx;
- char tmpbuf[MAC_ADDR_LEN];
+ char tmpbuf[ETH_ALEN];
u32 tempval;
u32 __iomem *macptr = &regs->macstnaddr1;
@@ -3166,8 +3166,8 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
/* Now copy it into the mac registers backwards, cuz */
/* little endian is silly */
- for (idx = 0; idx < MAC_ADDR_LEN; idx++)
- tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
+ for (idx = 0; idx < ETH_ALEN; idx++)
+ tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
gfar_write(macptr, *((u32 *) (tmpbuf)));
@@ -3281,16 +3281,4 @@ static struct platform_driver gfar_driver = {
.remove = gfar_remove,
};
-static int __init gfar_init(void)
-{
- return platform_driver_register(&gfar_driver);
-}
-
-static void __exit gfar_exit(void)
-{
- platform_driver_unregister(&gfar_driver);
-}
-
-module_init(gfar_init);
-module_exit(gfar_exit);
-
+module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 9aa43773e8e..fe7ac3a8319 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -74,9 +74,6 @@ struct ethtool_rx_list {
* will be the next highest multiple of 512 bytes. */
#define INCREMENTAL_BUFFER_SIZE 512
-
-#define MAC_ADDR_LEN 6
-
#define PHY_INIT_TIMEOUT 100000
#define GFAR_PHY_CHANGE_TIME 2
@@ -1179,9 +1176,9 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
extern void gfar_configure_coalescing(struct gfar_private *priv,
unsigned long tx_mask, unsigned long rx_mask);
void gfar_init_sysfs(struct net_device *dev);
-int gfar_set_features(struct net_device *dev, u32 features);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, u32 features);
+extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 212736bab6b..5890f4b0c0d 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -519,12 +519,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
return err;
}
-int gfar_set_features(struct net_device *dev, u32 features)
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
unsigned long flags;
int err = 0, i = 0;
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
gfar_vlan_mode(dev, features);
@@ -1410,10 +1410,9 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
/* We need a copy of the filer table because
* we want to change its order */
- temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+ temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
- memcpy(temp_table, tab, sizeof(*temp_table));
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
sizeof(struct gfar_mask_entry), GFP_KERNEL);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index f67b8aebc89..83e0ed757e3 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -562,21 +562,7 @@ static struct platform_driver gianfar_ptp_driver = {
.remove = gianfar_ptp_remove,
};
-/* module operations */
-
-static int __init ptp_gianfar_init(void)
-{
- return platform_driver_register(&gianfar_ptp_driver);
-}
-
-module_init(ptp_gianfar_init);
-
-static void __exit ptp_gianfar_exit(void)
-{
- platform_driver_unregister(&gianfar_ptp_driver);
-}
-
-module_exit(ptp_gianfar_exit);
+module_platform_driver(gianfar_ptp_driver);
MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
MODULE_DESCRIPTION("PTP clock using the eTSEC");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index b5dc0273a1d..ba2dc083bfc 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -443,7 +443,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
static inline int compare_addr(u8 **addr1, u8 **addr2)
{
- return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+ return memcmp(addr1, addr2, ETH_ALEN);
}
#ifdef DEBUG
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index d12fcad145e..2e395a2566b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/if_ether.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
@@ -881,7 +882,6 @@ struct ucc_geth_hardware_statistics {
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
-#define ENET_NUM_OCTETS_PER_ADDRESS 6
#define ENET_GROUP_ADDR 0x01 /* Group address mask
for ethernet
addresses */
@@ -1051,7 +1051,7 @@ enum ucc_geth_num_of_station_addresses {
/* UCC GETH 82xx Ethernet Address Container */
struct enet_addr_container {
- u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 address[ETH_ALEN]; /* ethernet address */
enum ucc_geth_enet_address_recognition_location location; /* location in
82xx address
recognition
@@ -1194,7 +1194,7 @@ struct ucc_geth_private {
u16 cpucount[NUM_TX_QUEUES];
u16 __iomem *p_cpucount[NUM_TX_QUEUES];
int indAddrRegUsed[NUM_OF_PADDRS];
- u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */
u8 numGroupAddrInHash;
u8 numIndAddrInHash;
u8 numIndAddrInReg;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 15416752c13..ee84b472cee 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1058,9 +1058,10 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
index 067c46069a1..114cda7721f 100644
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ b/drivers/net/ethernet/i825xx/eepro.c
@@ -1726,9 +1726,10 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
static void eepro_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VERSION);
- sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+ "ISA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops eepro_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 410d6a1984e..6650068c996 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -61,9 +61,9 @@
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
#define EHEA_DEF_ENTRIES_SQ 1023
-#define EHEA_DEF_ENTRIES_RQ1 4095
+#define EHEA_DEF_ENTRIES_RQ1 1023
#define EHEA_DEF_ENTRIES_RQ2 1023
-#define EHEA_DEF_ENTRIES_RQ3 1023
+#define EHEA_DEF_ENTRIES_RQ3 511
#else
#define EHEA_MAX_CQE_COUNT 4080
#define EHEA_DEF_ENTRIES_SQ 4080
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 37b70f7052b..3554414eb5e 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
out_herr:
free_page((unsigned long)cb2);
resched:
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
}
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2113,17 +2114,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2131,6 +2134,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2139,24 +2143,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
- return;
+ return err;
}
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2164,6 +2172,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2172,10 +2181,13 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
+ return err;
}
int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2434,7 +2446,8 @@ static int ehea_open(struct net_device *dev)
}
mutex_unlock(&port->port_lock);
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
return ret;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ed79b2d3ad3..2abce965c7b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2924,6 +2924,9 @@ static int __devexit emac_remove(struct platform_device *ofdev)
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_detach(dev->zmii_dev, dev->zmii_port);
+ busy_phy_map &= ~(1 << dev->phy.address);
+ DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
+
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b1cd41b9c61..e877371680a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -735,7 +735,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
}
-static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ibmveth_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since the ibmveth firmware interface does not have the
@@ -838,7 +839,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
return rc1 ? rc1 : rc2;
}
-static int ibmveth_set_features(struct net_device *dev, u32 features)
+static int ibmveth_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index 4326681df38..acc31af6594 100644
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
/* FIXME: do we need this? */
memset(local_list, 0, sizeof(local_list));
- memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+ memset(remote_list, 0, sizeof(remote_list));
/* a 0 address marks the end of the valid entries */
if (senddata->addr[startchunk] == 0)
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 8fd80a00b89..075451d0207 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -371,16 +371,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
}
/* The last cycle is a tri-state, so read from the PHY. */
- for (j = 7; j < 8; j++) {
- for (i = 0; i < p[j].len; i++) {
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
-
- p[j].field |= ((ipg_r8(PHY_CTRL) &
- IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
-
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
- }
- }
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
+ ipg_r8(PHY_CTRL);
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
}
static void ipg_set_led_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5a2fdf7a00c..9436397e572 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2376,10 +2376,10 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(nic->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(nic->pdev),
+ sizeof(info->bus_info));
}
#define E100_PHY_REGS 0x1C
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2b223ac99c4..3103f0b6bf5 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -515,14 +515,14 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- strncpy(drvinfo->driver, e1000_driver_name, 32);
- strncpy(drvinfo->version, e1000_driver_version, 32);
+ strlcpy(drvinfo->driver, e1000_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000_driver_version,
+ sizeof(drvinfo->version));
- sprintf(firmware_version, "N/A");
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index 5c9a8403668..cf7e3c09447 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -448,7 +448,6 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
#define NODE_ADDRESS_SIZE 6
-#define ETH_LENGTH_OF_ADDRESS 6
/* MAC decode size is 128K - This is the size of BAR0 */
#define MAC_DECODE_SIZE (128 * 1024)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cf480b55462..985d58943a0 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -167,9 +167,10 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
static bool e1000_vlan_used(struct e1000_adapter *adapter);
-static void e1000_vlan_mode(struct net_device *netdev, u32 features);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
#ifdef CONFIG_PM
@@ -806,7 +807,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
}
}
-static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -820,10 +822,11 @@ static u32 e1000_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & NETIF_F_HW_VLAN_RX)
e1000_vlan_mode(netdev, features);
@@ -1182,7 +1185,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
else
- adapter->quad_port_a = 1;
+ adapter->quad_port_a = true;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
@@ -1676,7 +1679,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* need this to apply a workaround later in the send path. */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
- adapter->pcix_82544 = 1;
+ adapter->pcix_82544 = true;
ew32(TCTL, tctl);
@@ -1999,7 +2002,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->last_tx_tso = 0;
+ tx_ring->last_tx_tso = false;
writel(0, hw->hw_addr + tx_ring->tdh);
writel(0, hw->hw_addr + tx_ring->tdt);
@@ -2848,7 +2851,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
- tx_ring->last_tx_tso = 0;
+ tx_ring->last_tx_tso = false;
size -= 4;
}
@@ -3216,7 +3219,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (likely(tso)) {
if (likely(hw->mac_type != e1000_82544))
- tx_ring->last_tx_tso = 1;
+ tx_ring->last_tx_tso = true;
tx_flags |= E1000_TX_FLAGS_TSO;
} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
tx_flags |= E1000_TX_FLAGS_CSUM;
@@ -4577,7 +4580,8 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
e1000_irq_enable(adapter);
}
-static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+static void e1000_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4600,7 +4604,7 @@ static void e1000_vlan_mode(struct net_device *netdev, u32 features)
e1000_irq_enable(adapter);
}
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4609,7 +4613,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
(vid == adapter->mng_vlan_id))
- return;
+ return 0;
if (!e1000_vlan_used(adapter))
e1000_vlan_filter_on_off(adapter, true);
@@ -4621,9 +4625,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
e1000_write_vfta(hw, index, vfta);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4644,6 +4650,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!e1000_vlan_used(adapter))
e1000_vlan_filter_on_off(adapter, false);
+
+ return 0;
}
static void e1000_restore_vlan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 9fe18d1d53d..f478a22ed57 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -309,6 +309,7 @@ struct e1000_adapter {
u32 txd_cmd;
bool detect_tx_hung;
+ bool tx_hang_recheck;
u8 tx_timeout_factor;
u32 tx_int_delay;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 69c9d219914..fb2c28e799a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -579,26 +579,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- strncpy(drvinfo->driver, e1000e_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, e1000e_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version));
/*
* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
- snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
(adapter->eeprom_vers & 0xF000) >> 12,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a855db1ad24..3911401ed65 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -163,16 +163,13 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
regs[n] = __er32(hw, E1000_TARC(n));
break;
default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, __er32(hw, reginfo->ofs));
+ pr_info("%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 2; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
+ pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
/*
@@ -208,16 +205,15 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name, netdev->state, netdev->trans_start,
- netdev->last_rx);
+ pr_info("Device Name state trans_start last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n",
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
reginfo->name; reginfo++) {
e1000_regdump(hw, reginfo);
@@ -228,15 +224,14 @@ static void e1000e_dump(struct e1000_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
- 0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (unsigned long long)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp);
+ pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
/* Print Tx Ring */
if (!netif_msg_tx_done(adapter))
@@ -271,37 +266,32 @@ static void e1000e_dump(struct e1000_adapter *adapter)
* +----------------------------------------------------------------+
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*/
- printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Legacy format\n");
- printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Context format\n");
- printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Data format\n");
+ pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
+ pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
+ pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
- "%04X %3X %016llX %p",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->length, buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp,
- buffer_info->skb);
if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
+ next_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
+ next_desc = " NTU";
else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
+ next_desc = " NTC";
else
- printk(KERN_CONT "\n");
+ next_desc = "";
+ pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length, buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
@@ -312,9 +302,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print Rx Ring Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
- printk(KERN_INFO " %5d %5X %5X\n", 0,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info("Queue [NTU] [NTC]\n");
+ pr_info(" %5d %5X %5X\n",
+ 0, rx_ring->next_to_use, rx_ring->next_to_clean);
/* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
@@ -337,10 +327,7 @@ rx_ring_summary:
* 24 | Buffer Address 3 [63:0] |
* +-----------------------------------------------------+
*/
- printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
- "[buffer 1 63:0 ] "
- "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext Pkt Split format\n");
+ pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
/* [Extended] Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 13 12 8 7 4 3 0
@@ -352,35 +339,40 @@ rx_ring_summary:
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
- printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
- "[vl l0 ee es] "
- "[ l3 l2 l1 hs] [reserved ] ---------------- "
- "[bi->skb] <-- Ext Rx Write-Back format\n");
+ pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
buffer_info = &rx_ring->buffer_info[i];
rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc_ps;
staterr =
le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX %016llX %016llX "
- "---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -388,13 +380,6 @@ rx_ring_summary:
phys_to_virt(buffer_info->dma),
adapter->rx_ps_bsize0, true);
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
}
break;
default:
@@ -407,9 +392,7 @@ rx_ring_summary:
* 8 | Reserved |
* +-----------------------------------------------------+
*/
- printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
- "[reserved 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext (Read) format\n");
+ pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
/* Extended Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 24 23 4 3 0
@@ -423,29 +406,37 @@ rx_ring_summary:
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
- printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
- "[vt ln xe xs] "
- "[bi->skb] <-- Ext (Write-Back) format\n");
+ pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+
buffer_info = &rx_ring->buffer_info[i];
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -456,13 +447,6 @@ rx_ring_summary:
adapter->rx_buffer_len,
true);
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
}
}
@@ -875,7 +859,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
u32 length, staterr;
unsigned int i;
int cleaned_count = 0;
- bool cleaned = 0;
+ bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
@@ -904,7 +888,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
- cleaned = 1;
+ cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev,
buffer_info->dma,
@@ -1030,6 +1014,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
print_hang_task);
+ struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1041,6 +1026,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
+ if (!adapter->tx_hang_recheck &&
+ (adapter->flags2 & FLAG2_DMA_BURST)) {
+ /* May be block on write-back, flush and detect again
+ * flush pending descriptor writebacks to memory
+ */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ /* execute the writes immediately */
+ e1e_flush();
+ adapter->tx_hang_recheck = true;
+ return;
+ }
+ /* Real hang detected */
+ adapter->tx_hang_recheck = false;
+ netif_stop_queue(netdev);
+
e1e_rphy(hw, PHY_STATUS, &phy_status);
e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1095,6 +1095,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
unsigned int i, eop;
unsigned int count = 0;
unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1112,6 +1113,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
if (cleaned) {
total_tx_packets += buffer_info->segs;
total_tx_bytes += buffer_info->bytecount;
+ if (buffer_info->skb) {
+ bytes_compl += buffer_info->skb->len;
+ pkts_compl++;
+ }
}
e1000_put_txbuf(adapter, buffer_info);
@@ -1130,6 +1135,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring->next_to_clean = i;
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
#define TX_WAKE_THRESHOLD 32
if (count && netif_carrier_ok(netdev) &&
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
@@ -1150,14 +1157,14 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i
*/
- adapter->detect_tx_hung = 0;
+ adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[i].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ (adapter->tx_timeout_factor * HZ)) &&
- !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+ !(er32(STATUS) & E1000_STATUS_TXOFF))
schedule_work(&adapter->print_hang_task);
- netif_stop_queue(netdev);
- }
+ else
+ adapter->tx_hang_recheck = false;
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
@@ -1185,7 +1192,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
unsigned int i, j;
u32 length, staterr;
int cleaned_count = 0;
- bool cleaned = 0;
+ bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
@@ -1211,7 +1218,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
- cleaned = 1;
+ cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
@@ -1222,8 +1229,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
- e_dbg("Packet Split buffers didn't pick up the full "
- "packet\n");
+ e_dbg("Packet Split buffers didn't pick up the full packet\n");
dev_kfree_skb_irq(skb);
if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
@@ -1238,8 +1244,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->wb.middle.length0);
if (!length) {
- e_dbg("Last part of the packet spanning multiple "
- "descriptors\n");
+ e_dbg("Last part of the packet spanning multiple descriptors\n");
dev_kfree_skb_irq(skb);
goto next_desc;
}
@@ -1917,8 +1922,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
return;
}
/* MSI-X failed, so fall through and try MSI */
- e_err("Failed to initialize MSI-X interrupts. "
- "Falling back to MSI interrupts.\n");
+ e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
e1000e_reset_interrupt_capability(adapter);
}
adapter->int_mode = E1000E_INT_MODE_MSI;
@@ -1928,8 +1932,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
adapter->flags |= FLAG_MSI_ENABLED;
} else {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
- e_err("Failed to initialize MSI interrupts. Falling "
- "back to legacy interrupts.\n");
+ e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
}
/* Fall through */
case E1000E_INT_MODE_LEGACY:
@@ -2260,6 +2263,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
e1000_put_txbuf(adapter, buffer_info);
}
+ netdev_reset_queue(adapter->netdev);
size = sizeof(struct e1000_buffer) * tx_ring->count;
memset(tx_ring->buffer_info, 0, size);
@@ -2518,7 +2522,7 @@ clean_rx:
return work_done;
}
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2528,7 +2532,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id))
- return;
+ return 0;
/* add VID to filter table */
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
@@ -2539,9 +2543,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2552,7 +2558,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
e1000e_release_hw_control(adapter);
- return;
+ return 0;
}
/* remove VID from filter table */
@@ -2564,6 +2570,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
}
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
/**
@@ -3113,79 +3121,147 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/**
- * e1000_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
+ * e1000e_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ */
+static int e1000e_write_mc_addr_list(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+
+ mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* update_mc_addr_list expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
+/**
+ * e1000e_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
*
- * Updates the Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
**/
-static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count)
+static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int rar_entries = hw->mac.rar_entry_count;
+ int count = 0;
+
+ /* save a rar entry for our hardware address */
+ rar_entries--;
+
+ /* save a rar entry for the LAA workaround */
+ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
+ rar_entries--;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+
+ /*
+ * write the addresses in reverse order to avoid write
+ * combining
+ */
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ e1000e_rar_set(hw, ha->addr, rar_entries--);
+ count++;
+ }
+ }
+
+ /* zero out the remaining RAR entries not used above */
+ for (; rar_entries > 0; rar_entries--) {
+ ew32(RAH(rar_entries), 0);
+ ew32(RAL(rar_entries), 0);
+ }
+ e1e_flush();
+
+ return count;
}
/**
- * e1000_set_multi - Multicast and Promiscuous mode set
+ * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
+ * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
+ * address list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-static void e1000_set_multi(struct net_device *netdev)
+static void e1000e_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct netdev_hw_addr *ha;
- u8 *mta_list;
u32 rctl;
/* Check for Promiscuous and All Multicast modes */
-
rctl = er32(RCTL);
+ /* clear the affected bits */
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- rctl &= ~E1000_RCTL_VFE;
/* Do not hardware filter VLANs in promisc mode */
e1000e_vlan_filter_disable(adapter);
} else {
+ int count;
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
- rctl &= ~E1000_RCTL_UPE;
} else {
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = e1000e_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_MPE;
}
e1000e_vlan_filter_enable(adapter);
- }
-
- ew32(RCTL, rctl);
-
- if (!netdev_mc_empty(netdev)) {
- int i = 0;
-
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list)
- return;
-
- /* prepare a packed array of only addresses. */
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
- e1000_update_mc_addr_list(hw, mta_list, i);
- kfree(mta_list);
- } else {
/*
- * if we're called from probe, we might not have
- * anything to do here, so clear out the list
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
*/
- e1000_update_mc_addr_list(hw, NULL, 0);
+ count = e1000e_write_uc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_UPE;
}
+ ew32(RCTL, rctl);
+
if (netdev->features & NETIF_F_HW_VLAN_RX)
e1000e_vlan_strip_enable(adapter);
else
@@ -3198,7 +3274,7 @@ static void e1000_set_multi(struct net_device *netdev)
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
- e1000_set_multi(adapter->netdev);
+ e1000e_set_rx_mode(adapter->netdev);
e1000_restore_vlan(adapter);
e1000_init_manageability_pt(adapter);
@@ -3444,7 +3520,6 @@ int e1000e_up(struct e1000_adapter *adapter)
clear_bit(__E1000_DOWN, &adapter->state);
- napi_enable(&adapter->napi);
if (adapter->msix_entries)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
@@ -3506,7 +3581,6 @@ void e1000e_down(struct e1000_adapter *adapter)
e1e_flush();
usleep_range(10000, 20000);
- napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
del_timer_sync(&adapter->watchdog_timer);
@@ -3782,6 +3856,7 @@ static int e1000_open(struct net_device *netdev)
e1000_irq_enable(adapter);
+ adapter->tx_hang_recheck = false;
netif_start_queue(netdev);
adapter->idle_check = true;
@@ -3828,6 +3903,8 @@ static int e1000_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
+ napi_disable(&adapter->napi);
+
if (!test_bit(__E1000_DOWN, &adapter->state)) {
e1000e_down(adapter);
e1000_free_irq(adapter);
@@ -4168,22 +4245,19 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- adapter->netdev->name,
- adapter->link_speed,
- (adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "Rx/Tx" :
- ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
- ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+ printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- bool link_active = 0;
+ bool link_active = false;
s32 ret_val = 0;
/*
@@ -4198,7 +4272,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
ret_val = hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
} else {
- link_active = 1;
+ link_active = true;
}
break;
case e1000_media_type_fiber:
@@ -4297,7 +4371,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
- bool txb2b = 1;
+ bool txb2b = true;
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4323,21 +4397,18 @@ static void e1000_watchdog_task(struct work_struct *work)
e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
- e_info("Autonegotiated half duplex but"
- " link partner cannot autoneg. "
- " Try forcing full duplex if "
- "link gets many collisions.\n");
+ e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
}
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- txb2b = 0;
+ txb2b = false;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
- txb2b = 0;
+ txb2b = false;
adapter->tx_timeout_factor = 10;
break;
}
@@ -4473,7 +4544,7 @@ link_up:
e1000e_flush_descriptors(adapter);
/* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = 1;
+ adapter->detect_tx_hung = true;
/*
* With 82571 controllers, LAA may be overwritten due to controller
@@ -4985,6 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* if count is 0 then mapping error has occurred */
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
if (count) {
+ netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(adapter, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
@@ -5110,8 +5182,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((adapter->hw.mac.type == e1000_pch2lan) &&
!(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
(new_mtu > ETH_DATA_LEN)) {
- e_err("Jumbo Frames not supported on 82579 when CRC "
- "stripping is disabled.\n");
+ e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
return -EINVAL;
}
@@ -5331,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (wufc) {
e1000_setup_rctl(adapter);
- e1000_set_multi(netdev);
+ e1000e_set_rx_mode(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & E1000_WUFC_MC) {
@@ -5527,8 +5598,8 @@ static int __e1000_resume(struct pci_dev *pdev)
phy_data & E1000_WUS_MC ? "Multicast Packet" :
phy_data & E1000_WUS_BC ? "Broadcast Packet" :
phy_data & E1000_WUS_MAG ? "Magic Packet" :
- phy_data & E1000_WUS_LNKC ? "Link Status "
- " Change" : "other");
+ phy_data & E1000_WUS_LNKC ?
+ "Link Status Change" : "other");
}
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
@@ -5859,10 +5930,11 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
}
}
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
adapter->flags |= FLAG_TSO_FORCE;
@@ -5884,7 +5956,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats64 = e1000e_get_stats64,
- .ndo_set_rx_mode = e1000_set_multi,
+ .ndo_set_rx_mode = e1000e_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
.ndo_do_ioctl = e1000_ioctl,
@@ -5949,8 +6021,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
@@ -6076,6 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
NETIF_F_TSO6 |
NETIF_F_HW_CSUM);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
@@ -6135,7 +6208,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* Initialize link parameters. User can change them with ethtool */
adapter->hw.mac.autoneg = 1;
- adapter->fc_autoneg = 1;
+ adapter->fc_autoneg = true;
adapter->hw.fc.requested_mode = e1000_fc_default;
adapter->hw.fc.current_mode = e1000_fc_default;
adapter->hw.phy.autoneg_advertised = 0x2f;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 7881fb95a25..b8e20f037d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -29,6 +29,8 @@
* e1000_82576
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/if_ether.h>
@@ -244,8 +246,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
* Check for invalid size
*/
if ((hw->mac.type == e1000_82576) && (size > 15)) {
- printk("igb: The NVM size is not valid, "
- "defaulting to 32K.\n");
+ pr_notice("The NVM size is not valid, defaulting to 32K\n");
size = 15;
}
nvm->word_size = 1 << size;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 43873eba2f6..f1206be4e71 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -148,7 +148,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ ecmd->advertising = (ADVERTISED_TP |
+ ADVERTISED_Pause);
if (hw->mac.autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
@@ -165,7 +166,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ ADVERTISED_Autoneg |
+ ADVERTISED_Pause);
ecmd->port = PORT_FIBRE;
}
@@ -673,25 +675,22 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
u16 eeprom_data;
- strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, igb_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers */
adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- sprintf(firmware_version, "%d.%d-%d",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
eeprom_data & 0x000F);
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
drvinfo->regdump_len = igb_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ced544499f1..89d576ce577 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -25,6 +25,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -145,9 +147,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, u32 features);
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
static void igb_restore_vlan(struct igb_adapter *);
static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
static void igb_ping_all_vfs(struct igb_adapter *);
@@ -325,16 +327,13 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
regs[n] = rd32(E1000_TXDCTL(n));
break;
default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, rd32(reginfo->ofs));
+ pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 4; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
+ pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
}
/*
@@ -359,18 +358,15 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ pr_info("Device Name state trans_start "
+ "last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+ netdev->state, netdev->trans_start, netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
reginfo->name; reginfo++) {
igb_regdump(hw, reginfo);
@@ -381,18 +377,17 @@ static void igb_dump(struct igb_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igb_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp);
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
}
/* Print TX Rings */
@@ -414,36 +409,38 @@ static void igb_dump(struct igb_adapter *adapter)
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "T [desc] [address 63:0 ] "
- "[PlPOCIStDDM Ln] [bi->dma ] "
- "leng ntw timestamp bi->skb\n");
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
+ "[bi->dma ] leng ntw timestamp "
+ "bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
struct igb_tx_buffer *buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, i);
buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p", i,
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ next_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ pr_info("T [0x%03X] %016llX %016llX %016llX"
+ " %04X %p %016llX %p%s\n", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
buffer_info->length,
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
- buffer_info->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
- else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "",
@@ -456,11 +453,11 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO " %5d %5X %5X\n", n,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info(" %5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
@@ -492,36 +489,43 @@ rx_ring_summary:
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "R [desc] [ PktBuf A0] "
- "[ HeadBuf DD] [bi->dma ] [bi->skb] "
- "<-- Adv Rx Read format\n");
- printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
- "[vl er S cks ln] ---------------- [bi->skb] "
- "<-- Adv Rx Write-Back format\n");
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
+ "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+ "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
struct igb_rx_buffer *buffer_info;
buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
+ pr_info("%s[0x%03X] %016llX %016llX -------"
+ "--------- %p%s\n", "RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- buffer_info->skb);
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
+ pr_info("%s[0x%03X] %016llX %016llX %016llX"
+ " %p%s\n", "R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
- buffer_info->skb);
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter)) {
print_hex_dump(KERN_INFO, "",
@@ -538,14 +542,6 @@ rx_ring_summary:
PAGE_SIZE/2, true);
}
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
-
}
}
@@ -599,10 +595,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
+ pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
- printk(KERN_INFO "%s\n", igb_copyright);
+ pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
dca_register_notify(&dca_notifier);
@@ -1742,7 +1738,8 @@ void igb_reset(struct igb_adapter *adapter)
igb_get_phy_info(hw);
}
-static u32 igb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t igb_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1756,9 +1753,10 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int igb_set_features(struct net_device *netdev, u32 features)
+static int igb_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
igb_vlan_mode(netdev, features);
@@ -3640,23 +3638,23 @@ static void igb_watchdog_task(struct work_struct *work)
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
+ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
+ "Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) &&
- (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
+ "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) &&
+ (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+ (ctrl & E1000_CTRL_RFCE) ? "RX" :
+ (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
/* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
- printk(KERN_INFO "igb: %s The network adapter "
- "link speed was downshifted "
- "because it overheated.\n",
- netdev->name);
+ if (igb_thermal_sensor_event(hw,
+ E1000_THSTAT_LINK_THROTTLE)) {
+ netdev_info(netdev, "The network adapter link "
+ "speed was downshifted because it "
+ "overheated\n");
}
/* adjust timeout factor according to speed/duplex */
@@ -3686,11 +3684,10 @@ static void igb_watchdog_task(struct work_struct *work)
adapter->link_duplex = 0;
/* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
- printk(KERN_ERR "igb: %s The network adapter "
- "was stopped because it "
- "overheated.\n",
- netdev->name);
+ if (igb_thermal_sensor_event(hw,
+ E1000_THSTAT_PWR_DOWN)) {
+ netdev_err(netdev, "The network adapter was "
+ "stopped because it overheated\n");
}
/* Links status message must follow this format */
@@ -6138,7 +6135,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
if (!page) {
- page = netdev_alloc_page(rx_ring->netdev);
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
bi->page = page;
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
@@ -6467,7 +6464,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
return 0;
}
-static void igb_vlan_mode(struct net_device *netdev, u32 features)
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6494,7 +6491,7 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
igb_rlpml_set(adapter);
}
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6507,9 +6504,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
igb_vfta_set(hw, vid, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6524,6 +6523,8 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
igb_vfta_set(hw, vid, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -7064,15 +7065,28 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
wr32(E1000_DMCTXTH, 0);
/*
- * DMA Coalescing high water mark needs to be higher
- * than the RX threshold. set hwm to PBA - 2 * max
- * frame size
+ * DMA Coalescing high water mark needs to be greater
+ * than the Rx threshold. Set hwm to PBA - max frame
+ * size in 16B units, capping it at PBA - 6KB.
*/
- hwm = pba - (2 * adapter->max_frame_size);
+ hwm = 64 * pba - adapter->max_frame_size / 16;
+ if (hwm < 64 * (pba - 6))
+ hwm = 64 * (pba - 6);
+ reg = rd32(E1000_FCRTC);
+ reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+ reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+ & E1000_FCRTC_RTH_COAL_MASK);
+ wr32(E1000_FCRTC, reg);
+
+ /*
+ * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+ * frame size, capping it at PBA - 10KB.
+ */
+ dmac_thr = pba - adapter->max_frame_size / 512;
+ if (dmac_thr < pba - 10)
+ dmac_thr = pba - 10;
reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
- dmac_thr = pba - 4;
-
reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
& E1000_DMACR_DMACTHR_MASK);
@@ -7088,7 +7102,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
* coalescing(smart fifb)-UTRESH=0
*/
wr32(E1000_DMCRTRH, 0);
- wr32(E1000_FCRTC, hwm);
reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 2c25858cc0f..7b600a1f636 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -191,12 +191,12 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32] = "N/A";
- strncpy(drvinfo->driver, igbvf_driver_name, 32);
- strncpy(drvinfo->version, igbvf_driver_version, 32);
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igbvf_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = igbvf_get_regs_len(netdev);
drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index cca78124be3..fd3da3076c2 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -25,6 +25,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -1174,18 +1176,20 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
e1000_rlpml_set_vf(hw, max_frame_size);
}
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if (hw->mac.ops.set_vfta(hw, vid, true))
+ if (hw->mac.ops.set_vfta(hw, vid, true)) {
dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
- else
- set_bit(vid, adapter->active_vlans);
+ return -EINVAL;
+ }
+ set_bit(vid, adapter->active_vlans);
+ return 0;
}
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1195,11 +1199,13 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!test_bit(__IGBVF_DOWN, &adapter->state))
igbvf_irq_enable(adapter);
- if (hw->mac.ops.set_vfta(hw, vid, false))
+ if (hw->mac.ops.set_vfta(hw, vid, false)) {
dev_err(&adapter->pdev->dev,
"Failed to remove vlan id %d\n", vid);
- else
- clear_bit(vid, adapter->active_vlans);
+ return -EINVAL;
+ }
+ clear_bit(vid, adapter->active_vlans);
+ return 0;
}
static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
@@ -1746,10 +1752,9 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
static void igbvf_print_link_info(struct igbvf_adapter *adapter)
{
- dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
- adapter->link_speed,
- ((adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex"));
+ dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
}
static bool igbvf_has_link(struct igbvf_adapter *adapter)
@@ -2532,7 +2537,8 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
}
-static int igbvf_set_features(struct net_device *netdev, u32 features)
+static int igbvf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2842,9 +2848,8 @@ static struct pci_driver igbvf_driver = {
static int __init igbvf_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
- igbvf_driver_string, igbvf_driver_version);
- printk(KERN_INFO "%s\n", igbvf_copyright);
+ pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+ pr_info("%s\n", igbvf_copyright);
ret = pci_register_driver(&igbvf_driver);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 9dfce7dff79..dbb7dd2f8e3 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -473,10 +473,12 @@ ixgb_get_drvinfo(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, ixgb_driver_name, 32);
- strncpy(drvinfo->version, ixgb_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, ixgb_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgb_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGB_STATS_LEN;
drvinfo->regdump_len = ixgb_get_regs_len(netdev);
drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index e21148f8b16..9bd5faf64a8 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -228,7 +228,7 @@ ixgb_up(struct ixgb_adapter *adapter)
if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
err = pci_enable_msi(adapter->pdev);
if (!err) {
- adapter->have_msi = 1;
+ adapter->have_msi = true;
irq_flags = 0;
}
/* proceed to try to request regular interrupt */
@@ -325,8 +325,8 @@ ixgb_reset(struct ixgb_adapter *adapter)
}
}
-static u32
-ixgb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Tx VLAN insertion does not work per HW design when Rx stripping is
@@ -339,10 +339,10 @@ ixgb_fix_features(struct net_device *netdev, u32 features)
}
static int
-ixgb_set_features(struct net_device *netdev, u32 features)
+ixgb_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
return 0;
@@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
}
-static void
+static int
ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2230,9 +2230,11 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
vfta |= (1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void
+static int
ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2245,6 +2247,8 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
vfta &= ~(1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 4ae26a748da..772072147be 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index f1365fef4ed..a3aa6333073 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -266,10 +266,10 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
if (hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
}
return 0;
@@ -2599,7 +2599,7 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
ixgbe_link_speed speed = 0;
- bool link_up = 0;
+ bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 33b93ffb87c..da31735311f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -158,10 +158,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* Abort a bad configuration */
- if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
- return;
-
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -185,7 +181,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
- adapter->dcb_set_bitmap |= BIT_PFC;
+ adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -206,10 +202,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* Abort bad configurations */
- if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
- return;
-
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -309,6 +301,27 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_stop(dev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+ ixgbe_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_open(dev);
+
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+#endif
+
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -338,27 +351,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (ret)
return DCB_NO_HW_CHG;
-#ifdef IXGBE_FCOE
- if (up && !(up & (1 << adapter->fcoe.up)))
- adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-
- /*
- * Only take down the adapter if an app change occurred. FCoE
- * may shuffle tx rings in this case and this can not be done
- * without a reset currently.
- */
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- adapter->fcoe.up = ffs(up) - 1;
-
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- }
-#endif
-
if (adapter->dcb_cfg.pfc_mode_enable) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -385,15 +377,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
-#ifdef IXGBE_FCOE
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
- ret = DCB_HW_CHG_RST;
- }
-#endif
-
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -442,8 +425,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (adapter->dcb_cfg.pfc_mode_enable)
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
- clear_bit(__IXGBE_RESETTING, &adapter->state);
+#ifdef IXGBE_FCOE
+ /* Reprogam FCoE hardware offloads when the traffic class
+ * FCoE is using changes. This happens if the APP info
+ * changes or the up2tc mapping is updated.
+ */
+ if ((up && !(up & (1 << adapter->fcoe.up))) ||
+ (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+ adapter->fcoe.up = ffs(up) - 1;
+ ixgbe_dcbnl_devreset(netdev);
+ ret = DCB_HW_CHG_RST;
+ }
+#endif
+
adapter->dcb_set_bitmap = 0x00;
return ret;
}
@@ -661,22 +655,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
}
-#ifdef IXGBE_FCOE
-static void ixgbe_dcbnl_devreset(struct net_device *dev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (netif_running(dev))
- dev->netdev_ops->ndo_stop(dev);
-
- ixgbe_clear_interrupt_scheme(adapter);
- ixgbe_init_interrupt_scheme(adapter);
-
- if (netif_running(dev))
- dev->netdev_ops->ndo_open(dev);
-}
-#endif
-
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
@@ -761,7 +739,9 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
ixgbe_dcbnl_ieee_setets(dev, &ets);
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
- adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+ u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+
+ adapter->dcb_set_bitmap |= mask;
ixgbe_dcbnl_set_all(dev);
} else {
/* Drop into single TC mode strict priority as this
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 70d58c3849b..da7e580f517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -888,23 +888,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
u32 nvm_track_id;
- strncpy(drvinfo->driver, ixgbe_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, ixgbe_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgbe_driver_version,
+ sizeof(drvinfo->version));
nvm_track_id = (adapter->eeprom_verh << 16) |
adapter->eeprom_verl;
- snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
nvm_track_id);
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGBE_STATS_LEN;
drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -1959,12 +1955,21 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
/* WOL not supported except for the following */
switch(hw->device_id) {
case IXGBE_DEV_ID_82599_SFP:
- /* Only this subdevice supports WOL */
- if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+ /* Only these subdevices could supports WOL */
+ switch (hw->subsystem_device_id) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0) {
+ wol->supported = 0;
+ break;
+ }
+ case IXGBE_SUBDEV_ID_82599_SFP:
+ retval = 0;
+ break;
+ default:
wol->supported = 0;
break;
}
- retval = 0;
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8ef92d1a6aa..74669a8c060 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -106,6 +106,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
/* required last entry */
{0, }
};
@@ -146,7 +147,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
- /* flush memory to make sure state is correct before next watchog */
+ /* flush memory to make sure state is correct before next watchdog */
smp_mb__before_clear_bit();
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
}
@@ -1140,7 +1141,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(rx_ring->netdev);
+ bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers;
@@ -2156,7 +2157,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
- * therefore no explict interrupt disable is necessary */
+ * therefore no explicit interrupt disable is necessary */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (!eicr) {
/*
@@ -3044,7 +3045,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
hw->mac.ops.enable_rx_dma(hw, rxctrl);
}
-static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3053,9 +3054,11 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
/* add VID to filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3064,6 +3067,8 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
/* remove VID from filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
/**
@@ -3602,7 +3607,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
{
/*
- * We are assuming the worst case scenerio here, and that
+ * We are assuming the worst case scenario here, and that
* is that an SFP was inserted/removed after the reset
* but before SFP detection was enabled. As such the best
* solution is to just start searching as soon as we start
@@ -3824,7 +3829,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
- "Please be aware there may be issuesassociated with "
+ "Please be aware there may be issues associated with "
"your hardware. If you are experiencing problems "
"please contact your Intel or hardware "
"representative who provided you with this "
@@ -4019,7 +4024,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* Mark all the VFs as inactive */
for (i = 0 ; i < adapter->num_vfs; i++)
- adapter->vfinfo[i].clear_to_send = 0;
+ adapter->vfinfo[i].clear_to_send = false;
/* ping all the active vfs to let them know we are going down */
ixgbe_ping_all_vfs(adapter);
@@ -5788,9 +5793,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
* @adapter - pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
- * in order to make certain interrupts are occuring. Secondly it sets the
+ * in order to make certain interrupts are occurring. Secondly it sets the
* bits needed to check for TX hangs. As a result we should immediately
- * determine if a hang has occured.
+ * determine if a hang has occurred.
*/
static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
{
@@ -7128,7 +7133,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return -EINVAL;
/* Hardware has to reinitialize queues and interrupts to
- * match packet buffer alignment. Unfortunantly, the
+ * match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
@@ -7174,7 +7179,8 @@ void ixgbe_do_reset(struct net_device *netdev)
ixgbe_reset(adapter);
}
-static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
+ netdev_features_t data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7204,7 +7210,8 @@ static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
return data;
}
-static int ixgbe_set_features(struct net_device *netdev, u32 data)
+static int ixgbe_set_features(struct net_device *netdev,
+ netdev_features_t data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool need_reset = false;
@@ -7598,9 +7605,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->wol = 0;
switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP:
- /* Only this subdevice supports WOL */
- if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+ /* Only these subdevice supports WOL */
+ switch (pdev->subsystem_device) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0)
+ break;
+ case IXGBE_SUBDEV_ID_82599_SFP:
adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ }
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 9a56fd74e67..7cf1e1f56c6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1214,7 +1214,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u32 max_retry = 10;
u32 retry = 0;
u16 swfw_mask = 0;
- bool nack = 1;
+ bool nack = true;
*data = 0;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -1421,7 +1421,7 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
s32 i;
- bool bit = 0;
+ bool bit = false;
for (i = 7; i >= 0; i--) {
ixgbe_clock_in_i2c_bit(hw, &bit);
@@ -1443,7 +1443,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
s32 status = 0;
s32 i;
u32 i2cctl;
- bool bit = 0;
+ bool bit = false;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -1457,6 +1457,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
i2cctl |= IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
return status;
}
@@ -1473,7 +1474,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
u32 timeout = 10;
- bool ack = 1;
+ bool ack = true;
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -1646,9 +1647,9 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
bool data;
if (*i2cctl & IXGBE_I2C_DATA_IN)
- data = 1;
+ data = true;
else
- data = 0;
+ data = false;
return data;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 00fcd39ad66..cf6812dd143 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -572,7 +572,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(new_mac, vf_mac, ETH_ALEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index df04f1a3857..e8badab0335 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -33,7 +33,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6c5cca808bd..802bfa0f62c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -57,6 +57,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -65,6 +66,7 @@
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
@@ -1710,8 +1712,6 @@ enum {
#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
-
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2802,9 +2802,9 @@ struct ixgbe_eeprom_info {
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
enum ixgbe_mac_type type;
- u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
/* prefix for World Wide Node Name (WWNN) */
u16 wwnn_prefix;
/* prefix for World Wide Port Name (WWPN) */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e5101e91b6b..8cc5eccfd65 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -751,16 +751,20 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
/*
- * In order for the blink bit in the LED control register
- * to work, link and speed must be forced in the MAC. We
- * will reverse this when we stop the blinking.
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
*/
- macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
- macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
- IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (link_up == false) {
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ }
/* Set the LED to LINK_UP + BLINK. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 78abb6f1a86..2eb89cb94a0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -35,7 +35,6 @@
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 1
#define IXGBE_VF_MAX_RX_QUEUES 1
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
/* Link speed */
typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e29ba4506b7..dc8e6511c64 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -27,6 +27,8 @@
/* ethtool support for ixgbevf */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -265,11 +267,11 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
- strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
-
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgbevf_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -549,8 +551,8 @@ static const u32 register_test_patterns[] = {
writel((W & M), (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if ((W & M) != (val & M)) { \
- printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
- "expected 0x%08X\n", R, (val & M), (W & M)); \
+ pr_err("set/check reg %04X test failed: got 0x%08X expected " \
+ "0x%08X\n", R, (val & M), (W & M)); \
*data = R; \
writel(before, (adapter->hw.hw_addr + R)); \
return 1; \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4c8e19951d5..891162d1610 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -29,6 +29,9 @@
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/module.h>
@@ -363,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
if (!bi->page_dma &&
(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(adapter->netdev);
+ bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
goto no_buffers;
@@ -1400,7 +1403,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1409,9 +1412,11 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1420,6 +1425,8 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1437,7 +1444,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
int count = 0;
if ((netdev_uc_count(netdev)) > 10) {
- printk(KERN_ERR "Too many unicast filters - No Space\n");
+ pr_err("Too many unicast filters - No Space\n");
return -ENOSPC;
}
@@ -2135,7 +2142,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
err = ixgbevf_alloc_queues(adapter);
if (err) {
- printk(KERN_ERR "Unable to allocate memory for queues\n");
+ pr_err("Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
@@ -2189,7 +2196,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
} else {
err = hw->mac.ops.init_hw(hw);
if (err) {
- printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ pr_err("init_shared_code failed: %d\n", err);
goto out;
}
}
@@ -2630,8 +2637,8 @@ static int ixgbevf_open(struct net_device *netdev)
* the vf can't start. */
if (hw->adapter_stopped) {
err = IXGBE_ERR_MBX;
- printk(KERN_ERR "Unable to start - perhaps the PF"
- " Driver isn't up yet\n");
+ pr_err("Unable to start - perhaps the PF Driver isn't "
+ "up yet\n");
goto err_setup_reset;
}
}
@@ -2842,10 +2849,8 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit())) {
- printk(KERN_WARNING
- "partial checksum but "
- "proto=%x!\n",
- skb->protocol);
+ pr_warn("partial checksum but "
+ "proto=%x!\n", skb->protocol);
}
break;
}
@@ -3249,7 +3254,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+static int ixgbevf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3414,7 +3420,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
- printk(KERN_ERR "invalid MAC address\n");
+ pr_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
}
@@ -3535,10 +3541,10 @@ static struct pci_driver ixgbevf_driver = {
static int __init ixgbevf_init_module(void)
{
int ret;
- printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
- ixgbevf_driver_version);
+ pr_info("%s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
- printk(KERN_INFO "%s\n", ixgbevf_copyright);
+ pr_info("%s\n", ixgbevf_copyright);
ret = pci_register_driver(&ixgbevf_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index ea393eb03f3..9d38a94a348 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -47,8 +47,8 @@
#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
-#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x)))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn)))
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 189200eeca2..5e4d5e5cdf3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -39,29 +39,29 @@
#define IXGBE_VTEIMC 0x0010C
#define IXGBE_VTEIAC 0x00110
#define IXGBE_VTEIAM 0x00114
-#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
-#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
#define IXGBE_VTIVAR_MISC 0x00140
-#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
-#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
-#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
-#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
-#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
-#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
-#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
-#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
-#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
#define IXGBE_VFPSRTYPE 0x00300
-#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
-#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
-#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
-#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
-#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
-#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
-#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
-#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
-#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
-#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
#define IXGBE_VFGPRC 0x0101C
#define IXGBE_VFGPTC 0x0201C
#define IXGBE_VFGORC_LSB 0x01020
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index aa3682e8c47..21533e30036 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -108,7 +108,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
- memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0;
@@ -211,7 +211,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
**/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
- memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
return 0;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7becff1f387..27d651a80f3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme)
}
static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+ return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+ JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+ phy_data);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+ u32 ctrl1000, phy_data;
+
+ jme_phy_off(jme);
+ jme_phy_on(jme);
+ /* Enabel PHY test mode 1 */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ ctrl1000 |= PHY_GAD_TEST_MODE_1;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+ phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+ JM_PHY_EXT_COMM_2_CALI_ENABLE;
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+ msleep(20);
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+ JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+ JM_PHY_EXT_COMM_2_CALI_LATCH);
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+ /* Disable PHY test mode */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+ return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+ u32 phy_comm0 = 0, phy_comm1 = 0;
+ u8 nic_ctrl;
+
+ pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+ if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+ return 0;
+
+ switch (jme->pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMC250:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ break;
+ case PCI_DEVICE_ID_JMICRON_JMC260:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+ phy_comm0 = 0x608A;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+ phy_comm0 = 0x408A;
+ break;
+ default:
+ return -ENODEV;
+ }
+ if (phy_comm0)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+ if (phy_comm1)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+ return 0;
+}
+
+static int
jme_open(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_reset_link(jme);
return 0;
@@ -1883,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev,
struct page *page,
u32 page_offset,
u32 len,
- u8 hidma)
+ bool hidma)
{
dma_addr_t dmaaddr;
@@ -1917,7 +2024,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc, *ctxdesc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
- u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+ bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
const struct skb_frag_struct *frag;
@@ -2292,9 +2399,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(jme->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -2620,8 +2727,8 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
jme->msg_enable = value;
}
-static u32
-jme_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+jme_fix_features(struct net_device *netdev, netdev_features_t features)
{
if (netdev->mtu > 1900)
features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
@@ -2629,7 +2736,7 @@ jme_fix_features(struct net_device *netdev, u32 features)
}
static int
-jme_set_features(struct net_device *netdev, u32 features)
+jme_set_features(struct net_device *netdev, netdev_features_t features)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 02ea27c1dcb..4304072bd3c 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
RXMCS_CHECKSUM,
};
+/* Extern PHY common register 2 */
+
+#define PHY_GAD_TEST_MODE_1 0x00002000
+#define PHY_GAD_TEST_MODE_MSK 0x0000E000
+#define JM_PHY_SPEC_REG_READ 0x00004000
+#define JM_PHY_SPEC_REG_WRITE 0x00008000
+#define PHY_CALIBRATION_DELAY 20
+#define JM_PHY_SPEC_ADDR_REG 0x1E
+#define JM_PHY_SPEC_DATA_REG 0x1F
+
+#define JM_PHY_EXT_COMM_0_REG 0x30
+#define JM_PHY_EXT_COMM_1_REG 0x31
+#define JM_PHY_EXT_COMM_2_REG 0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
+#define PCI_PRIV_SHARE_NICCTRL 0xF5
+#define JME_FLAG_PHYEA_ENABLE 0x2
+
/*
* Wakeup Frame setup interface registers
*/
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d8430f487b8..6ad094f176f 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1230,18 +1230,7 @@ static struct platform_driver korina_driver = {
.remove = korina_remove,
};
-static int __init korina_init_module(void)
-{
- return platform_driver_register(&korina_driver);
-}
-
-static void korina_cleanup_module(void)
-{
- return platform_driver_unregister(&korina_driver);
-}
-
-module_init(korina_init_module);
-module_exit(korina_cleanup_module);
+module_platform_driver(korina_driver);
MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6bb2b9506ca..0b3567ab812 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -34,6 +34,8 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <asm/checksum.h>
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 194a0311380..e87847e32dd 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1502,10 +1502,12 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
- strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "platform", 32);
+ strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
}
@@ -1578,10 +1580,10 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
static int
-mv643xx_eth_set_features(struct net_device *dev, u32 features)
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- u32 rx_csum = features & NETIF_F_RXCSUM;
+ bool rx_csum = features & NETIF_F_RXCSUM;
wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d17d0624c5e..5ec409e3da0 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1645,18 +1645,7 @@ static struct platform_driver pxa168_eth_driver = {
},
};
-static int __init pxa168_init_module(void)
-{
- return platform_driver_register(&pxa168_eth_driver);
-}
-
-static void __exit pxa168_cleanup_module(void)
-{
- platform_driver_unregister(&pxa168_eth_driver);
-}
-
-module_init(pxa168_init_module);
-module_exit(pxa168_cleanup_module);
+module_platform_driver(pxa168_eth_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c7b60839ac9..18a87a57fc0 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -394,10 +394,10 @@ static void skge_get_drvinfo(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(skge->hw->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+ sizeof(info->bus_info));
}
static const struct skge_stat {
@@ -2606,6 +2606,9 @@ static int skge_up(struct net_device *dev)
spin_unlock_irq(&hw->hw_lock);
napi_enable(&skge->napi);
+
+ skge_set_multicast(dev);
+
return 0;
free_tx_ring:
@@ -4039,7 +4042,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int skge_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -4101,7 +4104,7 @@ static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
#else
#define SKGE_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM_SLEEP */
static void skge_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index fdc6c394c68..760c2b17dfd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -50,7 +50,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.29"
+#define DRV_VERSION "1.30"
/*
* The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -68,7 +68,7 @@
#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
#define TX_MAX_PENDING 1024
-#define TX_DEF_PENDING 127
+#define TX_DEF_PENDING 63
#define TX_WATCHDOG (5 * HZ)
#define NAPI_WEIGHT 64
@@ -869,6 +869,7 @@ static void sky2_wol_init(struct sky2_port *sky2)
/* block receiver */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+ sky2_read32(hw, B0_CTST);
}
static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -1109,6 +1110,7 @@ static void tx_init(struct sky2_port *sky2)
sky2->tx_prod = sky2->tx_cons = 0;
sky2->tx_tcpsum = 0;
sky2->tx_last_mss = 0;
+ netdev_reset_queue(sky2->netdev);
le = get_tx_le(sky2, &sky2->tx_prod);
le->addr = 0;
@@ -1274,8 +1276,16 @@ static void rx_set_checksum(struct sky2_port *sky2)
? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
+/*
+ * Fixed initial key as seed to RSS.
+ */
+static const uint32_t rss_init_key[10] = {
+ 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43,
+ 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30
+};
+
/* Enable/disable receive hash calculation (RSS) */
-static void rx_set_rss(struct net_device *dev, u32 features)
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1289,12 +1299,9 @@ static void rx_set_rss(struct net_device *dev, u32 features)
/* Program RSS initial values */
if (features & NETIF_F_RXHASH) {
- u32 key[nkeys];
-
- get_random_bytes(key, nkeys * sizeof(u32));
for (i = 0; i < nkeys; i++)
sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
- key[i]);
+ rss_init_key[i]);
/* Need to turn on (undocumented) flag to make hashing work */
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
@@ -1396,7 +1403,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
-static void sky2_vlan_mode(struct net_device *dev, u32 features)
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1717,6 +1724,8 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
if (err)
dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
else {
+ hw->flags |= SKY2_HW_IRQ_SETUP;
+
napi_enable(&hw->napi);
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
sky2_read32(hw, B0_IMSK);
@@ -1727,7 +1736,7 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name)
/* Bring up network interface. */
-static int sky2_up(struct net_device *dev)
+static int sky2_open(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1747,6 +1756,11 @@ static int sky2_up(struct net_device *dev)
sky2_hw_up(sky2);
+ if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+ hw->chip_id == CHIP_ID_YUKON_PRM ||
+ hw->chip_id == CHIP_ID_YUKON_OP_2)
+ imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */
+
/* Enable interrupts from phy/mac for port */
imask = sky2_read32(hw, B0_IMSK);
imask |= portirq_msk[port];
@@ -1958,6 +1972,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (tx_avail(sky2) <= MAX_SKB_TX_LE)
netif_stop_queue(dev);
+ netdev_sent_queue(dev, skb->len);
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
return NETDEV_TX_OK;
@@ -1989,7 +2004,8 @@ mapping_error:
static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
{
struct net_device *dev = sky2->netdev;
- unsigned idx;
+ u16 idx;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
BUG_ON(done >= sky2->tx_ring_size);
@@ -2004,10 +2020,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
netif_printk(sky2, tx_done, KERN_DEBUG, dev,
"tx done %u\n", idx);
- u64_stats_update_begin(&sky2->tx_stats.syncp);
- ++sky2->tx_stats.packets;
- sky2->tx_stats.bytes += skb->len;
- u64_stats_update_end(&sky2->tx_stats.syncp);
+ pkts_compl++;
+ bytes_compl += skb->len;
re->skb = NULL;
dev_kfree_skb_any(skb);
@@ -2018,6 +2032,13 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ u64_stats_update_begin(&sky2->tx_stats.syncp);
+ sky2->tx_stats.packets += pkts_compl;
+ sky2->tx_stats.bytes += bytes_compl;
+ u64_stats_update_end(&sky2->tx_stats.syncp);
}
static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -2040,6 +2061,8 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+
+ sky2_read32(hw, B0_CTST);
}
static void sky2_hw_down(struct sky2_port *sky2)
@@ -2090,7 +2113,7 @@ static void sky2_hw_down(struct sky2_port *sky2)
}
/* Network shutdown */
-static int sky2_down(struct net_device *dev)
+static int sky2_close(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -2101,15 +2124,22 @@ static int sky2_down(struct net_device *dev)
netif_info(sky2, ifdown, dev, "disabling interface\n");
- /* Disable port IRQ */
- sky2_write32(hw, B0_IMSK,
- sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
- sky2_read32(hw, B0_IMSK);
-
if (hw->ports == 1) {
+ sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
+
napi_disable(&hw->napi);
free_irq(hw->pdev->irq, hw);
+ hw->flags &= ~SKY2_HW_IRQ_SETUP;
} else {
+ u32 imask;
+
+ /* Disable port IRQ */
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~portirq_msk[sky2->port];
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+
synchronize_irq(hw->pdev->irq);
napi_synchronize(&hw->napi);
}
@@ -2587,7 +2617,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
if (netif_running(dev)) {
sky2_tx_complete(sky2, last);
- /* Wake unless it's detached, and called e.g. from sky2_down() */
+ /* Wake unless it's detached, and called e.g. from sky2_close() */
if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
netif_wake_queue(dev);
}
@@ -3258,7 +3288,6 @@ static void sky2_reset(struct sky2_hw *hw)
hw->chip_id == CHIP_ID_YUKON_PRM ||
hw->chip_id == CHIP_ID_YUKON_OP_2) {
u16 reg;
- u32 msk;
if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
/* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
@@ -3281,11 +3310,6 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
- /* enable PHY Quick Link */
- msk = sky2_read32(hw, B0_IMSK);
- msk |= Y2_IS_PHY_QLNK;
- sky2_write32(hw, B0_IMSK, msk);
-
/* check if PSMv2 was running before */
reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
if (reg & PCI_EXP_LNKCTL_ASPMC)
@@ -3383,7 +3407,7 @@ static void sky2_detach(struct net_device *dev)
netif_tx_lock(dev);
netif_device_detach(dev); /* stop txq */
netif_tx_unlock(dev);
- sky2_down(dev);
+ sky2_close(dev);
}
}
@@ -3393,7 +3417,7 @@ static int sky2_reattach(struct net_device *dev)
int err = 0;
if (netif_running(dev)) {
- err = sky2_up(dev);
+ err = sky2_open(dev);
if (err) {
netdev_info(dev, "could not restart %d\n", err);
dev_close(dev);
@@ -3410,10 +3434,13 @@ static void sky2_all_down(struct sky2_hw *hw)
{
int i;
- sky2_read32(hw, B0_IMSK);
- sky2_write32(hw, B0_IMSK, 0);
- synchronize_irq(hw->pdev->irq);
- napi_disable(&hw->napi);
+ if (hw->flags & SKY2_HW_IRQ_SETUP) {
+ sky2_read32(hw, B0_IMSK);
+ sky2_write32(hw, B0_IMSK, 0);
+
+ synchronize_irq(hw->pdev->irq);
+ napi_disable(&hw->napi);
+ }
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
@@ -3446,11 +3473,12 @@ static void sky2_all_up(struct sky2_hw *hw)
netif_wake_queue(dev);
}
- sky2_write32(hw, B0_IMSK, imask);
- sky2_read32(hw, B0_IMSK);
-
- sky2_read32(hw, B0_Y2_SP_LISR);
- napi_enable(&hw->napi);
+ if (hw->flags & SKY2_HW_IRQ_SETUP) {
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+ }
}
static void sky2_restart(struct work_struct *work)
@@ -3623,10 +3651,10 @@ static void sky2_get_drvinfo(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+ sizeof(info->bus_info));
}
static const struct sky2_stat {
@@ -4071,6 +4099,16 @@ static int sky2_set_coalesce(struct net_device *dev,
return 0;
}
+/*
+ * Hardware is limited to min of 128 and max of 2048 for ring size
+ * and rounded up to next power of two
+ * to avoid division in modulus calclation
+ */
+static unsigned long roundup_ring_size(unsigned long pending)
+{
+ return max(128ul, roundup_pow_of_two(pending+1));
+}
+
static void sky2_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
@@ -4098,7 +4136,7 @@ static int sky2_set_ringparam(struct net_device *dev,
sky2->rx_pending = ering->rx_pending;
sky2->tx_pending = ering->tx_pending;
- sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1);
+ sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending);
return sky2_reattach(dev);
}
@@ -4281,7 +4319,8 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
-static u32 sky2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
const struct sky2_port *sky2 = netdev_priv(dev);
const struct sky2_hw *hw = sky2->hw;
@@ -4305,13 +4344,13 @@ static u32 sky2_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int sky2_set_features(struct net_device *dev, u32 features)
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_RXCSUM) {
- u32 on = features & NETIF_F_RXCSUM;
+ bool on = features & NETIF_F_RXCSUM;
sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
@@ -4556,7 +4595,7 @@ static int sky2_device_event(struct notifier_block *unused,
struct net_device *dev = ptr;
struct sky2_port *sky2 = netdev_priv(dev);
- if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
+ if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug)
return NOTIFY_DONE;
switch (event) {
@@ -4621,8 +4660,8 @@ static __exit void sky2_debug_cleanup(void)
not allowing netpoll on second port */
static const struct net_device_ops sky2_netdev_ops[2] = {
{
- .ndo_open = sky2_up,
- .ndo_stop = sky2_down,
+ .ndo_open = sky2_open,
+ .ndo_stop = sky2_close,
.ndo_start_xmit = sky2_xmit_frame,
.ndo_do_ioctl = sky2_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -4638,8 +4677,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
#endif
},
{
- .ndo_open = sky2_up,
- .ndo_stop = sky2_down,
+ .ndo_open = sky2_open,
+ .ndo_stop = sky2_close,
.ndo_start_xmit = sky2_xmit_frame,
.ndo_do_ioctl = sky2_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -4692,7 +4731,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
spin_lock_init(&sky2->phy_lock);
sky2->tx_pending = TX_DEF_PENDING;
- sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1);
+ sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING);
sky2->rx_pending = RX_DEF_PENDING;
hw->dev[port] = dev;
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 0af31b8b5f1..ff6f58bf822 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2287,6 +2287,7 @@ struct sky2_hw {
#define SKY2_HW_RSS_BROKEN 0x00000100
#define SKY2_HW_VLAN_BROKEN 0x00000200
#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */
+#define SKY2_HW_IRQ_SETUP 0x00000800
u8 chip_id;
u8 chip_rev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index d1aa45a1585..4a40ab967ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+ mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 45aea9c3ae2..915e947b422 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -48,7 +48,8 @@ static struct work_struct catas_work;
static int internal_err_reset = 1;
module_param(internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
- "Reset device on internal errors if non-zero (default 1)");
+ "Reset device on internal errors if non-zero"
+ " (default 1, in SRIOV mode default is 0)");
static void dump_err_buf(struct mlx4_dev *dev)
{
@@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
phys_addr_t addr;
+ /*If we are in SRIOV the default of the module param must be 0*/
+ if (mlx4_is_mfunc(dev))
+ internal_err_reset = 0;
+
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78f5a1a0b8c..978f593094c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -39,12 +39,18 @@
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
#include <asm/io.h>
#include "mlx4.h"
+#include "fw.h"
#define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK 0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
enum {
/* command completed successfully: */
@@ -110,8 +116,12 @@ struct mlx4_cmd_context {
int next;
u64 out_param;
u16 token;
+ u8 fw_status;
};
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr);
+
static int mlx4_status_to_errno(u8 status)
{
static const int trans_table[] = {
@@ -142,6 +152,139 @@ static int mlx4_status_to_errno(u8 status)
return trans_table[status];
}
+static u8 mlx4_errno_to_status(int errno)
+{
+ switch (errno) {
+ case -EPERM:
+ return CMD_STAT_BAD_OP;
+ case -EINVAL:
+ return CMD_STAT_BAD_PARAM;
+ case -ENXIO:
+ return CMD_STAT_BAD_SYS_STATE;
+ case -EBUSY:
+ return CMD_STAT_RESOURCE_BUSY;
+ case -ENOMEM:
+ return CMD_STAT_EXCEED_LIM;
+ case -ENFILE:
+ return CMD_STAT_ICM_ERROR;
+ default:
+ return CMD_STAT_INTERNAL_ERR;
+ }
+}
+
+static int comm_pending(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 status = readl(&priv->mfunc.comm->slave_read);
+
+ return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 val;
+
+ priv->cmd.comm_toggle ^= 1;
+ val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+ __raw_writel((__force u32) cpu_to_be32(val),
+ &priv->mfunc.comm->slave_write);
+ mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ unsigned long end;
+ int err = 0;
+ int ret_from_pending = 0;
+
+ /* First, verify that the master reports correct status */
+ if (comm_pending(dev)) {
+ mlx4_warn(dev, "Communication channel is not idle."
+ "my toggle is %d (cmd:0x%x)\n",
+ priv->cmd.comm_toggle, cmd);
+ return -EAGAIN;
+ }
+
+ /* Write command */
+ down(&priv->cmd.poll_sem);
+ mlx4_comm_cmd_post(dev, cmd, param);
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (comm_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+ ret_from_pending = comm_pending(dev);
+ if (ret_from_pending) {
+ /* check if the slave is trying to boot in the middle of
+ * FLR process. The only non-zero result in the RESET command
+ * is MLX4_DELAY_RESET_SLAVE*/
+ if ((MLX4_COMM_CMD_RESET == cmd)) {
+ mlx4_warn(dev, "Got slave FLRed from Communication"
+ " channel (ret:0x%x)\n", ret_from_pending);
+ err = MLX4_DELAY_RESET_SLAVE;
+ } else {
+ mlx4_warn(dev, "Communication channel timed out\n");
+ err = -ETIMEDOUT;
+ }
+ }
+
+ up(&priv->cmd.poll_sem);
+ return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+ u16 param, unsigned long timeout)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_context *context;
+ int err = 0;
+
+ down(&cmd->event_sem);
+
+ spin_lock(&cmd->context_lock);
+ BUG_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ mlx4_comm_cmd_post(dev, op, param);
+
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = context->result;
+ if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
+ goto out;
+ }
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ up(&cmd->event_sem);
+ return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+ return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
static int cmd_pending(struct mlx4_dev *dev)
{
u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +310,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
- if (time_after_eq(jiffies, end))
+ if (time_after_eq(jiffies, end)) {
+ mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
goto out;
+ }
cond_resched();
}
@@ -192,7 +337,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
(cmd->toggle << HCR_T_BIT) |
(event ? (1 << HCR_E_BIT) : 0) |
(op_modifier << HCR_OPMOD_SHIFT) |
- op), hcr + 6);
+ op), hcr + 6);
/*
* Make sure that our HCR writes don't get mixed in with
@@ -209,6 +354,62 @@ out:
return ret;
}
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+ int ret;
+
+ down(&priv->cmd.slave_sem);
+ vhcr->in_param = cpu_to_be64(in_param);
+ vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+ vhcr->in_modifier = cpu_to_be32(in_modifier);
+ vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+ vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+ vhcr->status = 0;
+ vhcr->flags = !!(priv->cmd.use_events) << 6;
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ }
+ } else {
+ ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+ MLX4_COMM_TIME + timeout);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ } else
+ mlx4_err(dev, "failed execution of VHCR_POST command"
+ "opcode 0x%x\n", op);
+ }
+ up(&priv->cmd.slave_sem);
+ return ret;
+}
+
static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
@@ -217,6 +418,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
void __iomem *hcr = priv->cmd.hcr;
int err = 0;
unsigned long end;
+ u32 stat;
down(&priv->cmd.poll_sem);
@@ -240,9 +442,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
- err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
- __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+ stat = be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+ err = mlx4_status_to_errno(stat);
+ if (err)
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, stat);
out:
up(&priv->cmd.poll_sem);
@@ -259,6 +464,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
if (token != context->token)
return;
+ context->fw_status = status;
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
@@ -287,14 +493,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, context->token, 1);
- if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
err = -EBUSY;
goto out;
}
err = context->result;
- if (err)
+ if (err) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
goto out;
+ }
if (out_is_imm)
*out_param = context->out_param;
@@ -311,17 +521,1046 @@ out:
int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
- u16 op, unsigned long timeout)
+ u16 op, unsigned long timeout, int native)
{
- if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
- else
- return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ else
+ return mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ }
+ return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+ int slave, u64 slave_addr,
+ int size, int is_read)
+{
+ u64 in_param;
+ u64 out_param;
+
+ if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+ (slave & ~0x7f) | (size & 0xff)) {
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+ "master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
+ return -EINVAL;
+ }
+
+ if (is_read) {
+ in_param = (u64) slave | slave_addr;
+ out_param = (u64) dev->caps.function | master_addr;
+ } else {
+ in_param = (u64) dev->caps.function | master_addr;
+ out_param = (u64) slave | slave_addr;
+ }
+
+ return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+ MLX4_CMD_ACCESS_MEM,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+ if (cmd->encode_slave_id) {
+ in_param &= 0xffffffffffffff00ll;
+ in_param |= slave;
+ }
+
+ err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+ vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm)
+ vhcr->out_param = out_param;
+
+ return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+ {
+ .opcode = MLX4_CMD_QUERY_FW,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_HCA,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_DEV_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_ADAPTER,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_INIT_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_CLOSE_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CLOSE_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_PORT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_PORT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MAP_EQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MAP_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_EQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW_HEALTH_CHECK,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_NOP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_ALLOC_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ALLOC_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_FREE_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_FREE_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_MPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_MPT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_MPT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_READ_MTT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_WRITE_MTT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_WRITE_MTT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SYNC_TPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_CQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_CQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MODIFY_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MODIFY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_SRQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_SRQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_ARM_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ARM_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RST2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_RST2INIT_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2RTR_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT2RTR_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQERR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2ERR_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2SQD_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2SQD_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2RST_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_2RST_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_QP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_UNSUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_IF_STAT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_IF_STAT_wrapper
+ },
+ /* Native multicast commands are not available for guests */
+ {
+ .opcode = MLX4_CMD_QP_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_PROMISC,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_PROMISC_wrapper
+ },
+ /* Ethernet specific commands */
+ {
+ .opcode = MLX4_CMD_SET_VLAN_FLTR,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_VLAN_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_MCAST_FLTR,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_MCAST_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_DUMP_ETH_STATS,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_DUMP_ETH_STATS_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INFORM_FLR_DONE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_info *cmd = NULL;
+ struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+ struct mlx4_vhcr *vhcr;
+ struct mlx4_cmd_mailbox *inbox = NULL;
+ struct mlx4_cmd_mailbox *outbox = NULL;
+ u64 in_param;
+ u64 out_param;
+ int ret = 0;
+ int i;
+ int err = 0;
+
+ /* Create sw representation of Virtual HCR */
+ vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+ if (!vhcr)
+ return -ENOMEM;
+
+ /* DMA in the vHCR */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr_cmd),
+ MLX4_ACCESS_MEM_ALIGN), 1);
+ if (ret) {
+ mlx4_err(dev, "%s:Failed reading vhcr"
+ "ret: 0x%x\n", __func__, ret);
+ kfree(vhcr);
+ return ret;
+ }
+ }
+
+ /* Fill SW VHCR fields */
+ vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+ vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+ vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+ vhcr->token = be16_to_cpu(vhcr_cmd->token);
+ vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+ vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+ vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+ /* Lookup command */
+ for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+ if (vhcr->op == cmd_info[i].opcode) {
+ cmd = &cmd_info[i];
+ break;
+ }
+ }
+ if (!cmd) {
+ mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+ vhcr->op, slave);
+ vhcr_cmd->status = CMD_STAT_BAD_PARAM;
+ goto out_status;
+ }
+
+ /* Read inbox */
+ if (cmd->has_inbox) {
+ vhcr->in_param &= INBOX_MASK;
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ inbox = NULL;
+ goto out_status;
+ }
+
+ if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+ vhcr->in_param,
+ MLX4_MAILBOX_SIZE, 1)) {
+ mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+ __func__, cmd->opcode);
+ vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
+ goto out_status;
+ }
+ }
+
+ /* Apply permission and bound checks if applicable */
+ if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+ "checks for resource_id:%d\n", vhcr->op, slave,
+ vhcr->in_modifier);
+ vhcr_cmd->status = CMD_STAT_BAD_OP;
+ goto out_status;
+ }
+
+ /* Allocate outbox */
+ if (cmd->has_outbox) {
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ outbox = NULL;
+ goto out_status;
+ }
+ }
+
+ /* Execute the command! */
+ if (cmd->wrapper) {
+ err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+ cmd);
+ if (cmd->out_is_imm)
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ } else {
+ in_param = cmd->has_inbox ? (u64) inbox->dma :
+ vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma :
+ vhcr->out_param;
+ err = __mlx4_cmd(dev, in_param, &out_param,
+ cmd->out_is_imm, vhcr->in_modifier,
+ vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm) {
+ vhcr->out_param = out_param;
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ }
+ }
+
+ if (err) {
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+ " error:%d, status %d\n",
+ vhcr->op, slave, vhcr->errno, err);
+ vhcr_cmd->status = mlx4_errno_to_status(err);
+ goto out_status;
+ }
+
+
+ /* Write outbox if command completed successfully */
+ if (cmd->has_outbox && !vhcr_cmd->status) {
+ ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+ vhcr->out_param,
+ MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+ if (ret) {
+ /* If we failed to write back the outbox after the
+ *command was successfully executed, we must fail this
+ * slave, as it is now in undefined state */
+ mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+ goto out;
+ }
+ }
+
+out_status:
+ /* DMA back vhcr result */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr),
+ MLX4_ACCESS_MEM_ALIGN),
+ MLX4_CMD_WRAPPED);
+ if (ret)
+ mlx4_err(dev, "%s:Failed writing vhcr result\n",
+ __func__);
+ else if (vhcr->e_bit &&
+ mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+ mlx4_warn(dev, "Failed to generate command completion "
+ "eqe for slave %d\n", slave);
+ }
+
+out:
+ kfree(vhcr);
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+ u16 param, u8 toggle)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ u32 reply;
+ u32 slave_status = 0;
+ u8 is_going_down = 0;
+
+ slave_state[slave].comm_toggle ^= 1;
+ reply = (u32) slave_state[slave].comm_toggle << 31;
+ if (toggle != slave_state[slave].comm_toggle) {
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+ "STATE COMPROMISIED ***\n", toggle, slave);
+ goto reset_slave;
+ }
+ if (cmd == MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+ slave_state[slave].active = false;
+ /*check if we are in the middle of FLR process,
+ if so return "retry" status to the slave*/
+ if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ slave_status = MLX4_DELAY_RESET_SLAVE;
+ goto inform_slave_state;
+ }
+
+ /* write the version in the event field */
+ reply |= mlx4_comm_get_version();
+
+ goto reset_slave;
+ }
+ /*command from slave in the middle of FLR*/
+ if (cmd != MLX4_COMM_CMD_RESET &&
+ MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+ "in the middle of FLR\n", slave, cmd);
+ return;
+ }
+
+ switch (cmd) {
+ case MLX4_COMM_CMD_VHCR0:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma = ((u64) param) << 48;
+ priv->mfunc.master.slave_state[slave].cookie = 0;
+ mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ break;
+ case MLX4_COMM_CMD_VHCR1:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+ break;
+ case MLX4_COMM_CMD_VHCR2:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+ break;
+ case MLX4_COMM_CMD_VHCR_EN:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= param;
+ slave_state[slave].active = true;
+ break;
+ case MLX4_COMM_CMD_VHCR_POST:
+ if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+ (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+ goto reset_slave;
+ down(&priv->cmd.slave_sem);
+ if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+ mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+ " reseting slave.\n", slave);
+ up(&priv->cmd.slave_sem);
+ goto reset_slave;
+ }
+ up(&priv->cmd.slave_sem);
+ break;
+ default:
+ mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+ goto reset_slave;
+ }
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = cmd;
+ else
+ is_going_down = 1;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ if (is_going_down) {
+ mlx4_warn(dev, "Slave is going down aborting command(%d)"
+ " executing from slave:%d\n",
+ cmd, slave);
+ return;
+ }
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ mmiowb();
+
+ return;
+
+reset_slave:
+ /* cleanup any slave resources */
+ mlx4_delete_all_resources_for_slave(dev, slave);
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+ memset(&slave_state[slave].event_eq, 0,
+ sizeof(struct mlx4_slave_event_eq_info));
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work,
+ struct mlx4_mfunc_master_ctx,
+ comm_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ __be32 *bit_vec;
+ u32 comm_cmd;
+ u32 vec;
+ int i, j, slave;
+ int toggle;
+ int served = 0;
+ int reported = 0;
+ u32 slt;
+
+ bit_vec = master->comm_arm_bit_vector;
+ for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+ vec = be32_to_cpu(bit_vec[i]);
+ for (j = 0; j < 32; j++) {
+ if (!(vec & (1 << j)))
+ continue;
+ ++reported;
+ slave = (i * 32) + j;
+ comm_cmd = swab32(readl(
+ &mfunc->comm[slave].slave_write));
+ slt = swab32(readl(&mfunc->comm[slave].slave_read))
+ >> 31;
+ toggle = comm_cmd >> 31;
+ if (toggle != slt) {
+ if (master->slave_state[slave].comm_toggle
+ != slt) {
+ printk(KERN_INFO "slave %d out of sync."
+ " read toggle %d, state toggle %d. "
+ "Resynching.\n", slave, slt,
+ master->slave_state[slave].comm_toggle);
+ master->slave_state[slave].comm_toggle =
+ slt;
+ }
+ mlx4_master_do_cmd(dev, slave,
+ comm_cmd >> 16 & 0xff,
+ comm_cmd & 0xffff, toggle);
+ ++served;
+ }
+ }
+ }
+
+ if (reported && reported != served)
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+ " but %d were served\n",
+ reported, served);
+
+ if (mlx4_ARM_COMM_CHANNEL(dev))
+ mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int wr_toggle;
+ int rd_toggle;
+ unsigned long end;
+
+ wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+ end = jiffies + msecs_to_jiffies(5000);
+
+ while (time_before(jiffies, end)) {
+ rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+ if (rd_toggle == wr_toggle) {
+ priv->cmd.comm_toggle = rd_toggle;
+ return 0;
+ }
+
+ cond_resched();
+ }
+
+ /*
+ * we could reach here if for example the previous VM using this
+ * function misbehaved and left the channel with unsynced state. We
+ * should fix this here and give this VM a chance to use a properly
+ * synced channel
+ */
+ mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+ priv->cmd.comm_toggle = 0;
+
+ return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i, err, port;
+
+ priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ &priv->mfunc.vhcr_dma,
+ GFP_KERNEL);
+ if (!priv->mfunc.vhcr) {
+ mlx4_err(dev, "Couldn't allocate vhcr.\n");
+ return -ENOMEM;
+ }
+
+ if (mlx4_is_master(dev))
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+ priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+ else
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, 2) +
+ MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+ if (!priv->mfunc.comm) {
+ mlx4_err(dev, "Couldn't map communication vector.\n");
+ goto err_vhcr;
+ }
+
+ if (mlx4_is_master(dev)) {
+ priv->mfunc.master.slave_state =
+ kzalloc(dev->num_slaves *
+ sizeof(struct mlx4_slave_state), GFP_KERNEL);
+ if (!priv->mfunc.master.slave_state)
+ goto err_comm;
+
+ for (i = 0; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_write);
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_read);
+ mmiowb();
+ for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ s_state->vlan_filter[port] =
+ kzalloc(sizeof(struct mlx4_vlan_fltr),
+ GFP_KERNEL);
+ if (!s_state->vlan_filter[port]) {
+ if (--port)
+ kfree(s_state->vlan_filter[port]);
+ goto err_slaves;
+ }
+ INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+ }
+ spin_lock_init(&s_state->lock);
+ }
+
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+ INIT_WORK(&priv->mfunc.master.comm_work,
+ mlx4_master_comm_channel);
+ INIT_WORK(&priv->mfunc.master.slave_event_work,
+ mlx4_gen_slave_eqe);
+ INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+ mlx4_master_handle_slave_flr);
+ spin_lock_init(&priv->mfunc.master.slave_state_lock);
+ priv->mfunc.master.comm_wq =
+ create_singlethread_workqueue("mlx4_comm");
+ if (!priv->mfunc.master.comm_wq)
+ goto err_slaves;
+
+ if (mlx4_init_resource_tracker(dev))
+ goto err_thread;
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ err = mlx4_ARM_COMM_CHANNEL(dev);
+ if (err) {
+ mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+ err);
+ goto err_resource;
+ }
+
+ } else {
+ err = sync_toggles(dev);
+ if (err) {
+ mlx4_err(dev, "Couldn't sync toggles\n");
+ goto err_comm;
+ }
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ }
+ return 0;
+
+err_resource:
+ mlx4_free_resource_tracker(dev);
+err_thread:
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+ while (--i) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
+err_comm:
+ iounmap(priv->mfunc.comm);
+err_vhcr:
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+ return -ENOMEM;
+}
+
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1570,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.use_events = 0;
priv->cmd.toggle = 1;
- priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
- MLX4_HCR_SIZE);
- if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.");
- return -ENOMEM;
+ priv->cmd.hcr = NULL;
+ priv->mfunc.vhcr = NULL;
+
+ if (!mlx4_is_slave(dev)) {
+ priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+ MLX4_HCR_BASE, MLX4_HCR_SIZE);
+ if (!priv->cmd.hcr) {
+ mlx4_err(dev, "Couldn't map command register.\n");
+ return -ENOMEM;
+ }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
- if (!priv->cmd.pool) {
- iounmap(priv->cmd.hcr);
- return -ENOMEM;
- }
+ if (!priv->cmd.pool)
+ goto err_hcr;
return 0;
+
+err_hcr:
+ if (!mlx4_is_slave(dev))
+ iounmap(priv->cmd.hcr);
+ return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, port;
+
+ if (mlx4_is_master(dev)) {
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+ for (i = 0; i < dev->num_slaves; i++) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
+ iounmap(priv->mfunc.comm);
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+ }
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1622,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
pci_pool_destroy(priv->cmd.pool);
- iounmap(priv->cmd.hcr);
+
+ if (!mlx4_is_slave(dev))
+ iounmap(priv->cmd.hcr);
}
/*
@@ -365,6 +1635,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ int err = 0;
priv->cmd.context = kmalloc(priv->cmd.max_cmds *
sizeof (struct mlx4_cmd_context),
@@ -389,11 +1660,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
; /* nothing */
--priv->cmd.token_mask;
- priv->cmd.use_events = 1;
-
down(&priv->cmd.poll_sem);
+ priv->cmd.use_events = 1;
- return 0;
+ return err;
}
/*
@@ -433,7 +1703,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox)
{
if (!mailbox)
return;
@@ -442,3 +1713,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
kfree(mailbox);
}
EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+ return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 499a5168892..475f9d6af95 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,9 +34,9 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/export.h>
-#include <linux/gfp.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
@@ -44,27 +44,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_cq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- __be32 logsize_usrpage;
- __be16 cq_period;
- __be16 cq_max_count;
- u8 reserved2[3];
- u8 comp_eqn;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved4[2];
- __be64 db_rec_addr;
-};
-
#define MLX4_CQ_STATUS_OK ( 0 << 28)
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
@@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
if (!cq) {
- mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
@@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
+ MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num, u32 opmod)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
+ cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
}
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ int err;
+
+ *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+ if (*cqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &cq_table->table, *cqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+ if (err)
+ goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+ else {
+ *cqn = get_param_l(&out_param);
+ return 0;
+ }
+ }
+ return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+ mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+ mlx4_table_put(dev, &cq_table->table, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cqn);
+ err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+ } else
+ __mlx4_cq_free_icm(dev, cqn);
+}
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed)
@@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
cq->vector = vector;
- cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
- if (cq->cqn == -1)
- return -ENOMEM;
-
- err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
- if (err)
- goto err_out;
-
- err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+ err = mlx4_cq_alloc_icm(dev, &cq->cqn);
if (err)
- goto err_put;
+ return err;
spin_lock_irq(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@ err_radix:
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+ mlx4_cq_free_icm(dev, cq->cqn);
return err;
}
@@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
complete(&cq->free);
wait_for_completion(&cq->free);
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+ mlx4_cq_free_icm(dev, cq->cqn);
}
EXPORT_SYMBOL_GPL(mlx4_cq_free);
@@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
/* Nothing to do to clean up radix_tree */
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 227997d775e..00b81272e31 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,10 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- if (mode == RX)
- cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
- else
- cq->buf_size = sizeof(struct mlx4_cqe);
+ cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
cq->ring = ring;
cq->is_tx = mode;
@@ -120,7 +117,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->size = priv->rx_ring[cq->ring].actual_size;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
- cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+ cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
if (err)
return err;
@@ -147,6 +144,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector)
mlx4_release_eq(priv->mdev->dev, cq->vector);
+ cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 74e2a2a8a02..7dbc6a23077 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -45,13 +45,16 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
- sprintf(drvinfo->fw_version, "%d.%d.%d",
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
@@ -103,8 +106,17 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
int err = 0;
u64 config = 0;
+ u64 mask;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+ if ((priv->port < 1) || (priv->port > 2)) {
+ en_err(priv, "Failed to get WoL information\n");
+ return;
+ }
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+ if (!(priv->mdev->dev->caps.flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -133,8 +145,15 @@ static int mlx4_en_set_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
u64 config = 0;
int err = 0;
+ u64 mask;
+
+ if ((priv->port < 1) || (priv->port > 2))
+ return -EOPNOTSUPP;
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+ if (!(priv->mdev->dev->caps.flags & mask))
return -EOPNOTSUPP;
if (wol->supported & ~WAKE_MAGIC)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 78d776bc355..72fa807b69c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,7 +45,7 @@
#include "mlx4_en.h"
#include "en_port.h"
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
+ return 0;
}
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
+
+ return 0;
}
u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
if (priv->port_up) {
/* Remove old MAC and insert the new one */
err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac, 0);
+ priv->base_qpn, priv->mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
} else
@@ -148,6 +151,7 @@ static void mlx4_en_clear_list(struct net_device *dev)
struct mlx4_en_priv *priv = netdev_priv(dev);
kfree(priv->mc_addrs);
+ priv->mc_addrs = NULL;
priv->mc_addrs_cnt = 0;
}
@@ -167,6 +171,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ mlx4_en_clear_list(dev);
priv->mc_addrs = mc_addrs;
priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -204,6 +209,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
goto out;
}
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
/*
* Promsicuous mode: disable all filters
*/
@@ -599,12 +614,12 @@ int mlx4_en_start_port(struct net_device *dev)
++rx_index;
}
- /* Set port mac number */
- en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
- err = mlx4_register_mac(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn, 0);
+ /* Set qp number */
+ en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+ err = mlx4_get_eth_qp(mdev->dev, priv->port,
+ priv->mac, &priv->base_qpn);
if (err) {
- en_err(priv, "Failed setting port mac\n");
+ en_err(priv, "Failed getting eth qp\n");
goto cq_err;
}
mdev->mac_removed[priv->port] = 0;
@@ -699,7 +714,7 @@ tx_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +760,6 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
- /* Unregister Mac address for the port */
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
- mdev->mac_removed[priv->port] = 1;
-
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +773,10 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
+ /* Unregister Mac address for the port */
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mdev->mac_removed[priv->port] = 1;
+
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -974,6 +989,21 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int mlx4_en_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ if (features & NETIF_F_LOOPBACK)
+ priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ else
+ priv->ctrl_flags &=
+ cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
+ return 0;
+
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -990,6 +1020,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
#endif
+ .ndo_set_features = mlx4_en_set_features,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1022,6 +1053,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
+ priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED);
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
priv->mac_index = -1;
@@ -1088,6 +1121,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ dev->hw_features |= NETIF_F_LOOPBACK;
mdev->pndev[port] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 03c84cd78cd..331791467a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -41,13 +41,6 @@
#include "mlx4_en.h"
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
- u64 mac, u64 clear, u8 mode)
-{
- return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
- MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
filter->entry[i] = cpu_to_be32(entry);
}
err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
- MLX4_CMD_TIME_CLASS_B);
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
- u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_general_context *context;
- int err;
- u32 in_mod;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->flags = SET_PORT_GEN_ALL_VALID;
- context->mtu = cpu_to_be16(mtu);
- context->pptx = (pptx * (!pfctx)) << 7;
- context->pfctx = pfctx;
- context->pprx = (pprx * (!pfcrx)) << 7;
- context->pfcrx = pfcrx;
-
- in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
- u8 promisc)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_rqp_calc_context *context;
- int err;
- u32 in_mod;
- u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
- MCAST_DIRECT : MCAST_DEFAULT;
-
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
- return 0;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = dev->caps.log_num_macs;
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
- base_qpn);
- context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
- base_qpn);
- context->intra_no_vlan = 0;
- context->no_vlan = MLX4_NO_VLAN_IDX;
- context->intra_vlan_miss = 0;
- context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
- in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
@@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
- MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
qport_context = mailbox->buf;
@@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 19eb244f516..6934fd7e66e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,49 +39,6 @@
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
-enum {
- MLX4_CMD_SET_VLAN_FLTR = 0x47,
- MLX4_CMD_SET_MCAST_FLTR = 0x48,
- MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
- MCAST_DIRECT_ONLY = 0,
- MCAST_DIRECT = 1,
- MCAST_DEFAULT = 2
-};
-
-struct mlx4_set_port_general_context {
- u8 reserved[3];
- u8 flags;
- u16 reserved2;
- __be16 mtu;
- u8 pptx;
- u8 pfctx;
- u16 reserved3;
- u8 pprx;
- u8 pfcrx;
- u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
- __be32 base_qpn;
- u8 rererved;
- u8 n_mac;
- u8 n_vlan;
- u8 n_prio;
- u8 reserved2[3];
- u8 mac_miss;
- u8 intra_no_vlan;
- u8 no_vlan;
- u8 intra_vlan_miss;
- u8 vlan_miss;
- u8 reserved3[3];
- u8 no_vlan_prio;
- __be32 promisc;
- __be32 mcast;
-};
-
#define VLAN_FLTR_SIZE 128
struct mlx4_set_vlan_fltr_mbox {
__be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 0dfb4ec8a9d..bcbc54c1694 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
struct mlx4_en_dev *mdev = priv->mdev;
memset(context, 0, sizeof *context);
- context->flags = cpu_to_be32(7 << 16 | rss << 13);
+ context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
if (!is_tx && !rss)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b89c36dbf5b..e8d6ad2dce0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -541,6 +541,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
+ struct ethhdr *ethh;
+ u64 s_mac;
if (!priv->port_up)
return 0;
@@ -577,10 +579,24 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ /* Get pointer to first fragment since we haven't skb yet and
+ * cast it to ethhdr struct */
+ ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+ skb_frags[0].offset);
+ s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+ /* If source MAC is equal to our own MAC and not performing
+ * the selftest or flb disabled - drop the packet */
+ if (s_mac == priv->mac &&
+ (!(dev->features & NETIF_F_LOOPBACK) ||
+ !priv->validate_loopback))
+ goto next;
+
/*
* Packet is OK - process it.
*/
length = be32_to_cpu(cqe->byte_cnt);
+ length -= ring->fcs_del;
ring->bytes += length;
ring->packets++;
@@ -813,8 +829,11 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
/* Cancel FCS removal if FW allows */
- if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
context->param3 |= cpu_to_be32(1 << 29);
+ ring->fcs_del = ETH_FCS_LEN;
+ } else
+ ring->fcs_del = 0;
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
if (err) {
@@ -833,9 +852,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
- struct mlx4_en_rss_context *rss_context;
+ struct mlx4_rss_context *rss_context;
void *ptr;
- u8 rss_mask = 0x3f;
+ u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
+ MLX4_RSS_TCP_IPV6);
int i, qpn;
int err = 0;
int good_qps = 0;
@@ -873,18 +893,21 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, &context);
- ptr = ((void *) &context) + 0x3c;
+ ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+ if (priv->mdev->profile.udp_rss) {
+ rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
+ rss_context->base_qpn_udp = rss_context->default_qpn;
+ }
rss_context->flags = rss_mask;
- rss_context->hash_fn = 1;
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
rss_context->rss_key[i] = rsskey[i];
- if (priv->mdev->profile.udp_rss)
- rss_context->base_qpn_udp = rss_context->default_qpn;
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 9fdbcecd499..bf2e5d3f177 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -43,7 +43,7 @@
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d901b426753..9ef9038d062 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -307,59 +307,60 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
- struct mlx4_cqe *cqe = cq->buf;
+ struct mlx4_cqe *cqe;
u16 index;
- u16 new_index;
+ u16 new_index, ring_index;
u32 txbbs_skipped = 0;
- u32 cq_last_sav;
-
- /* index always points to the first TXBB of the last polled descriptor */
- index = ring->cons & ring->size_mask;
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- if (index == new_index)
- return;
+ u32 cons_index = mcq->cons_index;
+ int size = cq->size;
+ u32 size_mask = ring->size_mask;
+ struct mlx4_cqe *buf = cq->buf;
if (!priv->port_up)
return;
- /*
- * We use a two-stage loop:
- * - the first samples the HW-updated CQE
- * - the second frees TXBBs until the last sample
- * This lets us amortize CQE cache misses, while still polling the CQ
- * until is quiescent.
- */
- cq_last_sav = mcq->cons_index;
- do {
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ ring_index = ring->cons & size_mask;
+
+ /* Process all completed CQEs */
+ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+ cons_index & size)) {
+ /*
+ * make sure we read the CQE after we read the
+ * ownership bit
+ */
+ rmb();
+
+ /* Skip over last polled CQE */
+ new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+
do {
- /* Skip over last polled CQE */
- index = (index + ring->last_nr_txbb) & ring->size_mask;
txbbs_skipped += ring->last_nr_txbb;
-
- /* Poll next CQE */
+ ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+ /* free next descriptor */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
- priv, ring, index,
- !!((ring->cons + txbbs_skipped) &
- ring->size));
- ++mcq->cons_index;
-
- } while (index != new_index);
+ priv, ring, ring_index,
+ !!((ring->cons + txbbs_skipped) &
+ ring->size));
+ } while (ring_index != new_index);
+
+ ++cons_index;
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ }
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- } while (index != new_index);
- AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
- (u32) (mcq->cons_index - cq_last_sav));
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
*/
+ mcq->cons_index = cons_index;
mlx4_cq_set_ci(mcq);
wmb();
ring->cons += txbbs_skipped;
@@ -565,7 +566,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ (!!vlan_tx_tag_present(skb));
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
@@ -676,27 +678,25 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ !!vlan_tx_tag_present(skb);
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
- tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
- MLX4_WQE_CTRL_SOLICITED);
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
ring->tx_csum++;
}
- if (unlikely(priv->validate_loopback)) {
- /* Copy dst mac address to wqe */
- skb_reset_mac_header(skb);
- ethh = eth_hdr(skb);
- if (ethh && ethh->h_dest) {
- mac = mlx4_en_mac_to_u64(ethh->h_dest);
- mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
- mac_l = (u32) (mac & 0xffffffff);
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
- tx_desc->ctrl.imm = cpu_to_be32(mac_l);
- }
+ /* Copy dst mac address to wqe */
+ skb_reset_mac_header(skb);
+ ethh = eth_hdr(skb);
+ if (ethh && ethh->h_dest) {
+ mac = mlx4_en_mac_to_u64(ethh->h_dest);
+ mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+ mac_l = (u32) (mac & 0xffffffff);
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+ tx_desc->ctrl.imm = cpu_to_be32(mac_l);
}
/* Handle LSO (TSO) packets */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 24ee9677599..1e9b55eb721 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -52,30 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- u8 log_eq_size;
- u8 reserved2[4];
- u8 eq_period;
- u8 reserved3;
- u8 eq_max_count;
- u8 reserved4[3];
- u8 intr;
- u8 log_page_size;
- u8 reserved5[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- u32 reserved6[2];
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved7[4];
-};
-
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -100,46 +77,9 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
- (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
- u8 reserved1;
- u8 type;
- u8 reserved2;
- u8 subtype;
- union {
- u32 raw[6];
- struct {
- __be32 cqn;
- } __packed comp;
- struct {
- u16 reserved1;
- __be16 token;
- u32 reserved2;
- u8 reserved3[3];
- u8 status;
- __be64 out_param;
- } __packed cmd;
- struct {
- __be32 qpn;
- } __packed qp;
- struct {
- __be32 srqn;
- } __packed srq;
- struct {
- __be32 cqn;
- u32 reserved1;
- u8 reserved2[3];
- u8 syndrome;
- } __packed cq_err;
- struct {
- u32 reserved1[2];
- __be32 port;
- } __packed port_change;
- } event;
- u8 reserved3[3];
- u8 owner;
-} __packed;
+ (1ull << MLX4_EVENT_TYPE_CMD) | \
+ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
+ (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
@@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+ struct mlx4_eqe *eqe =
+ &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+ return (!!(eqe->owner & 0x80) ^
+ !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+ eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+ struct mlx4_eqe *eqe;
+ u8 slave;
+ int i;
+
+ for (eqe = next_slave_event_eqe(slave_eq); eqe;
+ eqe = next_slave_event_eqe(slave_eq)) {
+ slave = eqe->slave_id;
+
+ /* All active slaves need to receive the event */
+ if (slave == ALL_SLAVES) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i != dev->caps.function &&
+ master->slave_state[i].active)
+ if (mlx4_GEN_EQE(dev, i, eqe))
+ mlx4_warn(dev, "Failed to "
+ " generate event "
+ "for slave %d\n", i);
+ }
+ } else {
+ if (mlx4_GEN_EQE(dev, slave, eqe))
+ mlx4_warn(dev, "Failed to generate event "
+ "for slave %d\n", slave);
+ }
+ ++slave_eq->cons;
+ }
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+ struct mlx4_eqe *s_eqe =
+ &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+ if ((!!(s_eqe->owner & 0x80)) ^
+ (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+ "No free EQE on slave events queue\n", slave);
+ return;
+ }
+
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ s_eqe->slave_id = slave;
+ /* ensure all information is written before setting the ownersip bit */
+ wmb();
+ s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+ ++slave_eq->prod;
+
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+ struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave =
+ &priv->mfunc.master.slave_state[slave];
+
+ if (!s_slave->active) {
+ /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+ return;
+ }
+
+ slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_flr_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int i;
+ int err;
+
+ mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+
+ if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+ "clean slave: %d\n", i);
+
+ mlx4_delete_all_resources_for_slave(dev, i);
+ /*return the slave to running mode*/
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+ slave_state[i].is_slave_going_down = 0;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*notify the FW:*/
+ err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to notify FW on "
+ "FLR done (slave:%d)\n", i);
+ }
+ }
+}
+
static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe;
int cqn;
int eqes_found = 0;
int set_ci = 0;
int port;
+ int slave = 0;
+ int ret;
+ u32 flr_slave;
+ u8 update_slave_state;
+ int i;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
- mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
- eqe->type);
+ mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the QP */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_QP,
+ be32_to_cpu(eqe->event.qp.qpn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "QP event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+
+ }
+ mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
+ mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+ __func__);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
- mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
- eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the SRQ */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_SRQ,
+ be32_to_cpu(eqe->event.srq.srqn)
+ & 0xffffff,
+ &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_warn(dev, "SRQ event %02x(%02x) "
+ "on EQ %d at index %u: could"
+ " not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+ " event: %02x(%02x)\n", __func__,
+ slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_warn(dev, "%s: sending event "
+ "%02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PORT_CHANGE:
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+ if (mlx4_is_master(dev))
+ /*change the state of all slave's port
+ * to down:*/
+ for (i = 0; i < dev->num_slaves; i++) {
+ mlx4_dbg(dev, "%s: Sending "
+ "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+ " to slave: %d, port:%d\n",
+ __func__, i, port);
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
} else {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_UP,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+ if (mlx4_is_master(dev)) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
+ }
}
break;
@@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
- mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_CQ,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "CQ event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_cq_event(dev,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff,
eqe->type);
break;
@@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
+ case MLX4_EVENT_TYPE_COMM_CHANNEL:
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Received comm channel event "
+ "for non master device\n");
+ break;
+ }
+ memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+ eqe->event.comm_channel_arm.bit_vec,
+ sizeof eqe->event.comm_channel_arm.bit_vec);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.comm_work);
+ break;
+
+ case MLX4_EVENT_TYPE_FLR_EVENT:
+ flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Non-master function received"
+ "FLR event\n");
+ break;
+ }
+
+ mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+ if (flr_slave > dev->num_slaves) {
+ mlx4_warn(dev,
+ "Got FLR for unknown function: %d\n",
+ flr_slave);
+ update_slave_state = 0;
+ } else
+ update_slave_state = 1;
+
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (update_slave_state) {
+ priv->mfunc.master.slave_state[flr_slave].active = false;
+ priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+ priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+ }
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_flr_event_work);
+ break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
- eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+ "index %u. owner=%x, nent=0x%x, slave=%x, "
+ "ownership=%s\n",
+ eqe->type, eqe->subtype, eq->eqn,
+ eq->cons_index, eqe->owner, eq->nent,
+ eqe->slave_id,
+ !!(eqe->owner & 0x80) ^
+ !!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
- }
+ };
++eq->cons_index;
eqes_found = 1;
@@ -290,25 +505,58 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq =
+ &priv->mfunc.master.slave_state[slave].event_eq;
+ u32 in_modifier = vhcr->in_modifier;
+ u32 eqn = in_modifier & 0x1FF;
+ u64 in_param = vhcr->in_param;
+ int err = 0;
+
+ if (slave == dev->caps.function)
+ err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ if (!err) {
+ if (in_modifier >> 31) {
+ /* unmap */
+ event_eq->event_type &= ~in_param;
+ } else {
+ event_eq->eqn = eqn;
+ event_eq->event_type = in_param;
+ }
+ }
+ return err;
+}
+
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
- 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
+ MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
+ 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -585,14 +833,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
priv->eq_table.uar_map[i] = NULL;
- err = mlx4_map_clr_int(dev);
- if (err)
- goto err_out_bitmap;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_map_clr_int(dev);
+ if (err)
+ goto err_out_bitmap;
- priv->eq_table.clr_mask =
- swab32(1 << (priv->eq_table.inta_pin & 31));
- priv->eq_table.clr_int = priv->clr_base +
- (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ priv->eq_table.clr_mask =
+ swab32(1 << (priv->eq_table.inta_pin & 31));
+ priv->eq_table.clr_int = priv->clr_base +
+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ }
priv->eq_table.irq_names =
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +950,8 @@ err_out_unmap:
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
--i;
}
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
mlx4_free_irqs(dev);
err_out_bitmap:
@@ -725,7 +976,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i])
@@ -748,7 +1000,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
err = mlx4_NOP(dev);
/* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X))
+ if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
return err;
/* A loop over all completion vectors, for each vector we will check
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 435ca6e4973..a424a19280c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -32,6 +32,7 @@
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/module.h>
#include <linux/cache.h>
@@ -48,7 +49,7 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static int enable_qos;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
@@ -139,12 +140,185 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u8 field;
+ u32 size;
+ int err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
+#define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+
+ if (vhcr->op_modifier == 1) {
+ field = vhcr->in_modifier;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+ field = 0; /* ensure fvl bit is not set */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ } else if (vhcr->op_modifier == 0) {
+ field = 1 << 7; /* enable only ethernet interface */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+ field = slave;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+
+ field = dev->caps.num_ports;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+ size = 0; /* no PF behavious is set for now */
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+ size = dev->caps.num_qps;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+ size = dev->caps.num_srqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_cqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+ size = dev->caps.reserved_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+ size = dev->caps.num_mpts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mtts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mgms + dev->caps.num_amgms;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+ } else
+ err = -EINVAL;
+
+ return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 field;
+ u32 size;
+ int i;
+ int err = 0;
+
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ outbox = mailbox->buf;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+ if (!(field & (1 << 7))) {
+ mlx4_err(dev, "The host doesn't support eth interface\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+ func_cap->function = field;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+ func_cap->num_ports = field;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+ func_cap->pf_context_behaviour = size;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ func_cap->max_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ func_cap->reserved_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+
+ for (i = 1; i <= func_cap->num_ports; ++i) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+ MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ if (field & (1 << 7)) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ if (field & (1 << 6)) {
+ mlx4_err(dev, "Force mac is enabled on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+ func_cap->physical_port[i] = field;
+ }
+
+ /* All other resources are allocated by the master, but we still report
+ * 'num' and 'reserved' capabilities as follows:
+ * - num remains the maximum resource index
+ * - 'num - reserved' is the total available objects of a resource, but
+ * resource indices may be less than 'reserved'
+ * TODO: set per-resource quotas */
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +403,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
if (err)
goto out;
@@ -396,12 +570,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
dev_cap->supported_port_types[i] = field & 3;
+ dev_cap->suggested_type[i] = (field >> 3) & 1;
+ dev_cap->default_sense[i] = (field >> 4) & 1;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -470,6 +647,61 @@ out:
return err;
}
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 def_mac;
+ u8 port_type;
+ int err;
+
+#define MLX4_PORT_SUPPORT_IB (1 << 0)
+#define MLX4_PORT_SUGGEST_TYPE (1 << 3)
+#define MLX4_PORT_DEFAULT_SENSE (1 << 4)
+#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \
+ ~MLX4_PORT_SUGGEST_TYPE & \
+ ~MLX4_PORT_DEFAULT_SENSE)
+
+ err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ if (!err && dev->caps.function != slave) {
+ /* set slave default_mac address */
+ MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+ def_mac += slave << 8;
+ MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+ /* get port type - currently only eth is enabled */
+ MLX4_GET(port_type, outbox->buf,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+ /* Allow only Eth port, no link sensing allowed */
+ port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
+
+ /* check eth is enabled for this port */
+ if (!(port_type & 2))
+ mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+ MLX4_PUT(outbox->buf, port_type,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ }
+
+ return err;
+}
+
+static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+ struct mlx4_cmd_mailbox *outbox = ptr;
+
+ return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +751,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
if (++nent == MLX4_MAILBOX_SIZE / 16) {
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
goto out;
nent = 0;
@@ -528,7 +761,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
}
if (nent)
- err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -557,13 +791,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_FA(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_RUN_FW(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +815,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_OUT_SIZE 0x100
#define QUERY_FW_VER_OFFSET 0x00
+#define QUERY_FW_PPF_ID 0x09
#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
#define QUERY_FW_MAX_CMD_OFFSET 0x0f
#define QUERY_FW_ERR_START_OFFSET 0x30
@@ -589,13 +826,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
+#define QUERY_FW_COMM_BASE_OFFSET 0x40
+#define QUERY_FW_COMM_BAR_OFFSET 0x48
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -608,6 +848,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+ dev->caps.function = lg;
+
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +892,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
+ MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+ MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
+ fw->comm_bar = (fw->comm_bar >> 6) * 2;
+ mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+ fw->comm_bar, fw->comm_base);
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
/*
@@ -711,7 +959,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -743,6 +991,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
+#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1080,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* UAR attributes */
- MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
- err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+ MLX4_CMD_NATIVE);
if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1093,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return err;
}
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+ struct mlx4_init_hca_param *param)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be32 *outbox;
+ int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_QUERY_HCA,
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
+ if (err)
+ goto out;
+
+ MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+ /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+ MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
+ MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
+ MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+ MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
+ MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+ MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+ MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+ MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+ /* multicast attributes */
+
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+ /* TPT attributes */
+
+ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
+ MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+ /* UAR attributes */
+
+ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+
+ /* Enable port only if it was previously disabled */
+ if (!priv->mfunc.master.init_port_ref[port]) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ priv->mfunc.master.slave_state[slave].init_port_mask |=
+ (1 << port);
+ }
+ ++priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1231,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
} else
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+ (1 << port)))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+ if (priv->mfunc.master.init_port_ref[port] == 1) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ }
+ priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+ --priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{
- return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{
- return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+ MLX4_CMD_NATIVE);
}
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
MLX4_CMD_SET_ICM_SIZE,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (ret)
return ret;
@@ -929,7 +1303,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
int mlx4_NOP(struct mlx4_dev *dev)
{
/* Input modifier of 0x1f means "finish as soon as possible." */
- return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
#define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1312,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
- MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_read);
@@ -947,6 +1322,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index bf5ec228652..119e0cc9fab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -111,11 +111,30 @@ struct mlx4_dev_cap {
u64 max_icm_sz;
int max_gso_sz;
u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 suggested_type[MLX4_MAX_PORTS + 1];
+ u8 default_sense[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
u32 max_counters;
};
+struct mlx4_func_cap {
+ u8 function;
+ u8 num_ports;
+ u8 flags;
+ u32 pf_context_behaviour;
+ int qp_quota;
+ int cq_quota;
+ int srq_quota;
+ int mpt_quota;
+ int mtt_quota;
+ int max_eq;
+ int reserved_eq;
+ int mcg_quota;
+ u8 physical_port[MLX4_MAX_PORTS + 1];
+ u8 port_flags[MLX4_MAX_PORTS + 1];
+};
+
struct mlx4_adapter {
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
@@ -133,6 +152,7 @@ struct mlx4_init_hca_param {
u64 dmpt_base;
u64 cmpt_base;
u64 mtt_base;
+ u64 global_caps;
u16 log_mc_entry_sz;
u16 log_mc_hash_sz;
u8 log_num_qps;
@@ -143,6 +163,7 @@ struct mlx4_init_hca_param {
u8 log_mc_table_sz;
u8 log_mpt_sz;
u8 log_uar_sz;
+ u8 uar_page_sz; /* log pg sz in 4k chunks */
};
struct mlx4_init_ib_param {
@@ -167,12 +188,19 @@ struct mlx4_set_ib_param {
};
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
int mlx4_RUN_FW(struct mlx4_dev *dev);
int mlx4_QUERY_FW(struct mlx4_dev *dev);
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 02393fdf44c..a9ade1c3cad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index ca6feb55bd9..b4e9f6f5cc0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
- mlx4_start_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
- mlx4_stop_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 94bbc85a532..6bb62c580e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -40,6 +40,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/delay.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+ mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+ " of qp per mcg, for example:"
+ " 10 gives 248.range: 9<="
+ " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK 0
+#define PF_CONTEXT_BEHAVIOUR_MASK 0
+
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static struct mlx4_profile default_profile = {
- .num_qp = 1 << 17,
+ .num_qp = 1 << 18,
.num_srq = 1 << 16,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
- .num_mpt = 1 << 17,
+ .num_mpt = 1 << 19,
.num_mtt = 1 << 20,
};
-static int log_num_mac = 2;
+static int log_num_mac = 7;
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
@@ -99,15 +121,33 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
/* Log2 max number of VLANs per ETH port (0-7) */
#define MLX4_LOG_NUM_VLANS 7
-static int use_prio;
+static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)");
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
+ "1 for IB, 2 for Ethernet");
+
+struct mlx4_port_config {
+ struct list_head list;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+ struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+ return dev->caps.reserved_eqs +
+ MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -140,10 +180,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
int i;
- dev->caps.port_mask = 0;
for (i = 1; i <= dev->caps.num_ports; ++i)
- if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
- dev->caps.port_mask |= 1 << (i - 1);
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
}
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -188,12 +226,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
dev->caps.def_mac[i] = dev_cap->def_mac[i];
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+ dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
+ dev->caps.default_sense[i] = dev_cap->default_sense[i];
dev->caps.trans_type[i] = dev_cap->trans_type[i];
dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
dev->caps.wavelength[i] = dev_cap->wavelength[i];
dev->caps.trans_code[i] = dev_cap->trans_code[i];
}
+ dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
dev->caps.bf_reg_size = dev_cap->bf_reg_size;
@@ -207,7 +248,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +257,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
- dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
- dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
- dev->caps.mtts_per_seg);
+ dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
- dev->caps.reserved_uars = dev_cap->reserved_uars;
+
+ /* The first 128 UARs are used for EQ doorbells */
+ dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->reserved_xrcds : 0;
dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->max_xrcds : 0;
- dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
+
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
@@ -235,18 +277,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
+ /* Sense port always allowed on supported devices for ConnectX1 and 2 */
+ if (dev->pdev->device != 0x1003)
+ dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
- if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
- dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
- else
- dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
- dev->caps.possible_type[i] = dev->caps.port_type[i];
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+ if (dev->caps.supported_type[i]) {
+ /* if only ETH is supported - assign ETH */
+ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ /* if only IB is supported,
+ * assign IB only if SRIOV is off*/
+ else if (dev->caps.supported_type[i] ==
+ MLX4_PORT_TYPE_IB) {
+ if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_NONE;
+ else
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_IB;
+ /* if IB and ETH are supported,
+ * first of all check if SRIOV is on */
+ } else if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ else {
+ /* In non-SRIOV mode, we set the port type
+ * according to user selection of port type,
+ * if usere selected none, take the FW hint */
+ if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
+ MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
+ else
+ dev->caps.port_type[i] = port_type_array[i-1];
+ }
+ }
+ /*
+ * Link sensing is allowed on the port if 3 conditions are true:
+ * 1. Both protocols are supported on the port.
+ * 2. Different types are supported on the port
+ * 3. FW declared that it supports link sensing
+ */
mlx4_priv(dev)->sense.sense_allowed[i] =
- dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+ ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
+
+ /*
+ * If "default_sense" bit is set, we move the port to "AUTO" mode
+ * and perform sense_port FW command to try and set the correct
+ * port type from beginning
+ */
+ if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
+ enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
+ dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
+ mlx4_SENSE_PORT(dev, i, &sensed_port);
+ if (sensed_port != MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = sensed_port;
+ } else {
+ dev->caps.possible_type[i] = dev->caps.port_type[i];
+ }
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -262,8 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- mlx4_set_port_mask(dev);
-
dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +374,149 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i;
+ int ret = 0;
+
+ for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ if (s_state->active && s_state->last_cmd !=
+ MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "%s: slave: %d is still active\n",
+ __func__, i);
+ ret++;
+ }
+ }
+ return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave;
+
+ if (!mlx4_is_master(dev))
+ return 0;
+
+ s_slave = &priv->mfunc.master.slave_state[slave];
+ return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+ int err;
+ u32 page_size;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_func_cap func_cap;
+ struct mlx4_init_hca_param hca_param;
+ int i;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ if (err) {
+ mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ return err;
+ }
+
+ /*fail if the hca has an unknown capability */
+ if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+ HCA_GLOBAL_CAP_MASK) {
+ mlx4_err(dev, "Unknown hca global capabilities\n");
+ return -ENOSYS;
+ }
+
+ mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ page_size = ~dev->caps.page_size_cap + 1;
+ mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+ if (page_size > PAGE_SIZE) {
+ mlx4_err(dev, "HCA minimum page size of %d bigger than "
+ "kernel PAGE_SIZE of %ld, aborting.\n",
+ page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ /* slave gets uar page size from QUERY_HCA fw command */
+ dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+ /* TODO: relax this assumption */
+ if (dev->caps.uar_page_size != PAGE_SIZE) {
+ mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+ dev->caps.uar_page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+ PF_CONTEXT_BEHAVIOUR_MASK) {
+ mlx4_err(dev, "Unknown pf context behaviour\n");
+ return -ENOSYS;
+ }
+
+ dev->caps.function = func_cap.function;
+ dev->caps.num_ports = func_cap.num_ports;
+ dev->caps.num_qps = func_cap.qp_quota;
+ dev->caps.num_srqs = func_cap.srq_quota;
+ dev->caps.num_cqs = func_cap.cq_quota;
+ dev->caps.num_eqs = func_cap.max_eq;
+ dev->caps.reserved_eqs = func_cap.reserved_eq;
+ dev->caps.num_mpts = func_cap.mpt_quota;
+ dev->caps.num_mtts = func_cap.mtt_quota;
+ dev->caps.num_pds = MLX4_NUM_PDS;
+ dev->caps.num_mgms = 0;
+ dev->caps.num_amgms = 0;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+ if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+ "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ return -ENODEV;
+ }
+
+ if (dev->caps.uar_page_size * (dev->caps.num_uars -
+ dev->caps.reserved_uars) >
+ pci_resource_len(dev->pdev, 2)) {
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+ "PCI resource 2 size of 0x%llx, aborting.\n",
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ (unsigned long long) pci_resource_len(dev->pdev, 2));
+ return -ENODEV;
+ }
+
+#if 0
+ mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+ mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+ dev->caps.num_uars, dev->caps.reserved_uars,
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ pci_resource_len(dev->pdev, 2));
+ mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+ dev->caps.reserved_eqs);
+ mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+ dev->caps.num_pds, dev->caps.reserved_pds,
+ dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+ return 0;
+}
/*
* Change the port configuration of the device.
@@ -377,7 +612,8 @@ static ssize_t set_port_type(struct device *dev,
types[i] = mdev->caps.port_type[i+1];
}
- if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
@@ -451,6 +687,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ int num_eqs;
err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
cmpt_base +
@@ -480,12 +717,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
- cmpt_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+ cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
if (err)
goto err_cq;
@@ -509,6 +748,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 aux_pages;
+ int num_eqs;
int err;
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +780,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
goto err_unmap_aux;
}
+
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs,
- 0, 0);
+ num_eqs, num_eqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt;
@@ -563,7 +806,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
init_hca->mtt_base,
dev->caps.mtt_entry_sz,
- dev->caps.num_mtt_segs,
+ dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +893,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
* and it's a lot easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
- init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+ init_hca->mc_base,
+ mlx4_get_mgm_entry_size(dev),
dev->caps.num_mgms + dev->caps.num_amgms,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
@@ -726,6 +970,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ down(&priv->cmd.slave_sem);
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+ mlx4_warn(dev, "Failed to close slave function.\n");
+ up(&priv->cmd.slave_sem);
+}
+
static int map_bf_area(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +987,10 @@ static int map_bf_area(struct mlx4_dev *dev)
resource_size_t bf_len;
int err = 0;
- bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
- bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+ bf_start = pci_resource_start(dev->pdev, 2) +
+ (dev->caps.num_uars << PAGE_SHIFT);
+ bf_len = pci_resource_len(dev->pdev, 2) -
+ (dev->caps.num_uars << PAGE_SHIFT);
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
if (!priv->bf_mapping)
err = -ENOMEM;
@@ -751,10 +1007,81 @@ static void unmap_bf_area(struct mlx4_dev *dev)
static void mlx4_close_hca(struct mlx4_dev *dev)
{
unmap_bf_area(dev);
- mlx4_CLOSE_HCA(dev, 0);
- mlx4_free_icms(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ if (mlx4_is_slave(dev))
+ mlx4_slave_exit(dev);
+ else {
+ mlx4_CLOSE_HCA(dev, 0);
+ mlx4_free_icms(dev);
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ }
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u64 dma = (u64) priv->mfunc.vhcr_dma;
+ int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+ int ret_from_reset = 0;
+ u32 slave_read;
+ u32 cmd_channel_ver;
+
+ down(&priv->cmd.slave_sem);
+ priv->cmd.max_cmds = 1;
+ mlx4_warn(dev, "Sending reset\n");
+ ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+ MLX4_COMM_TIME);
+ /* if we are in the middle of flr the slave will try
+ * NUM_OF_RESET_RETRIES times before leaving.*/
+ if (ret_from_reset) {
+ if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+ msleep(SLEEP_TIME_IN_RESET);
+ while (ret_from_reset && num_of_reset_retries) {
+ mlx4_warn(dev, "slave is currently in the"
+ "middle of FLR. retrying..."
+ "(try num:%d)\n",
+ (NUM_OF_RESET_RETRIES -
+ num_of_reset_retries + 1));
+ ret_from_reset =
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+ 0, MLX4_COMM_TIME);
+ num_of_reset_retries = num_of_reset_retries - 1;
+ }
+ } else
+ goto err;
+ }
+
+ /* check the driver version - the slave I/F revision
+ * must match the master's */
+ slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+ cmd_channel_ver = mlx4_comm_get_version();
+
+ if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+ MLX4_COMM_GET_IF_REV(slave_read)) {
+ mlx4_err(dev, "slave driver version is not supported"
+ " by the master\n");
+ goto err;
+ }
+
+ mlx4_warn(dev, "Sending vhcr0\n");
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+ goto err;
+ up(&priv->cmd.slave_sem);
+ return 0;
+
+err:
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+ up(&priv->cmd.slave_sem);
+ return -EIO;
}
static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,56 +1095,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
u64 icm_size;
int err;
- err = mlx4_QUERY_FW(dev);
- if (err) {
- if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
- else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
- return err;
- }
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_QUERY_FW(dev);
+ if (err) {
+ if (err == -EACCES)
+ mlx4_info(dev, "non-primary physical function, skipping.\n");
+ else
+ mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ goto unmap_bf;
+ }
- err = mlx4_load_fw(dev);
- if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
- return err;
- }
+ err = mlx4_load_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to start FW, aborting.\n");
+ goto unmap_bf;
+ }
- mlx4_cfg.log_pg_sz_m = 1;
- mlx4_cfg.log_pg_sz = 0;
- err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
- if (err)
- mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+ mlx4_cfg.log_pg_sz_m = 1;
+ mlx4_cfg.log_pg_sz = 0;
+ err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+ if (err)
+ mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
- err = mlx4_dev_cap(dev, &dev_cap);
- if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
- goto err_stop_fw;
- }
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_stop_fw;
+ }
- profile = default_profile;
+ profile = default_profile;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
- if ((long long) icm_size < 0) {
- err = icm_size;
- goto err_stop_fw;
- }
+ icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+ &init_hca);
+ if ((long long) icm_size < 0) {
+ err = icm_size;
+ goto err_stop_fw;
+ }
- if (map_bf_area(dev))
- mlx4_dbg(dev, "Failed to map blue flame area\n");
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca.uar_page_sz = PAGE_SHIFT - 12;
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ if (err)
+ goto err_stop_fw;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
- if (err)
- goto err_stop_fw;
+ err = mlx4_INIT_HCA(dev, &init_hca);
+ if (err) {
+ mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ goto err_free_icm;
+ }
+ } else {
+ err = mlx4_init_slave(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize slave\n");
+ goto unmap_bf;
+ }
- err = mlx4_INIT_HCA(dev, &init_hca);
- if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
- goto err_free_icm;
+ err = mlx4_slave_cap(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to obtain slave caps\n");
+ goto err_close;
+ }
}
+ if (map_bf_area(dev))
+ mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+ /*Only the master set the ports, all the rest got it from it.*/
+ if (!mlx4_is_slave(dev))
+ mlx4_set_port_mask(dev);
+
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
@@ -830,16 +1177,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0;
err_close:
- mlx4_CLOSE_HCA(dev, 0);
+ mlx4_close_hca(dev);
err_free_icm:
- mlx4_free_icms(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_free_icms(dev);
err_stop_fw:
+ if (!mlx4_is_slave(dev)) {
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+ }
+unmap_bf:
unmap_bf_area(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
return err;
}
@@ -986,55 +1336,56 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- err = mlx4_init_mcg_table(dev);
- if (err) {
- mlx4_err(dev, "Failed to initialize "
- "multicast group table, aborting.\n");
- goto err_qp_table_free;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "multicast group table, aborting.\n");
+ goto err_qp_table_free;
+ }
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
- goto err_counters_table_free;
+ goto err_mcg_table_free;
}
- for (port = 1; port <= dev->caps.num_ports; port++) {
- enum mlx4_port_type port_type = 0;
- mlx4_SENSE_PORT(dev, port, &port_type);
- if (port_type)
- dev->caps.port_type[port] = port_type;
- ib_port_default_caps = 0;
- err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
- if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing with "
- "caps = 0\n", port, err);
- dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-
- err = mlx4_check_ext_port_caps(dev, port);
- if (err)
- mlx4_warn(dev, "failed to get port %d extended "
- "port capabilities support info (%d)."
- " Assuming not supported\n", port, err);
+ if (!mlx4_is_slave(dev)) {
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ ib_port_default_caps = 0;
+ err = mlx4_get_port_ib_caps(dev, port,
+ &ib_port_default_caps);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d default "
+ "ib capabilities (%d). Continuing "
+ "with caps = 0\n", port, err);
+ dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+ err = mlx4_check_ext_port_caps(dev, port);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d extended "
+ "port capabilities support info (%d)."
+ " Assuming not supported\n",
+ port, err);
- err = mlx4_SET_PORT(dev, port);
- if (err) {
- mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
- goto err_mcg_table_free;
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_counters_table_free;
+ }
}
}
- mlx4_set_port_mask(dev);
return 0;
-err_mcg_table_free:
- mlx4_cleanup_mcg_table(dev);
-
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1081,8 +1432,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;
if (msi_x) {
- nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
- nreq);
+ /* In multifunction mode each function gets 2 msi-X vectors
+ * one for data path completions anf the other for asynch events
+ * or command completions */
+ if (mlx4_is_mfunc(dev)) {
+ nreq = 2;
+ } else {
+ nreq = min_t(int, dev->caps.num_eqs -
+ dev->caps.reserved_eqs, nreq);
+ }
+
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
goto no_msi;
@@ -1138,16 +1497,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
- mlx4_init_mac_table(dev, &info->mac_table);
- mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ if (!mlx4_is_slave(dev)) {
+ INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+ mlx4_init_mac_table(dev, &info->mac_table);
+ mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn =
+ dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << log_num_mac);
+ }
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
- info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev))
+ info->port_attr.attr.mode = S_IRUGO;
+ else {
+ info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ info->port_attr.store = set_port_type;
+ }
info->port_attr.show = show_port_type;
- info->port_attr.store = set_port_type;
sysfs_attr_init(&info->port_attr.attr);
err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1587,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev)
kfree(priv->steer);
}
+static int extended_func_num(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE 0x8069c
+#define MLX4_OWNER_SIZE 4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+ u32 ret;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return -ENOMEM;
+ }
+
+ ret = readl(owner);
+ iounmap(owner);
+ return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return;
+ }
+ writel(0, owner);
+ msleep(1000);
+ iounmap(owner);
+}
+
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
@@ -1235,13 +1642,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
"aborting.\n");
return err;
}
-
+ if (num_vfs > MLX4_MAX_NUM_VF) {
+ printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+ num_vfs, MLX4_MAX_NUM_VF);
+ return -EINVAL;
+ }
/*
- * Check for BARs. We expect 0: 1MB
+ * Check for BARs.
*/
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) != 1 << 20) {
- dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+ if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+ !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Missing DCS, aborting."
+ "(id == 0X%p, id->driver_data: 0x%lx,"
+ " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+ id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
@@ -1305,42 +1719,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
+ /* Detect if this device is a virtual function */
+ if (id && id->driver_data & MLX4_VF) {
+ /* When acting as pf, we normally skip vfs unless explicitly
+ * requested to probe them. */
+ if (num_vfs && extended_func_num(pdev) > probe_vf) {
+ mlx4_warn(dev, "Skipping virtual function:%d\n",
+ extended_func_num(pdev));
+ err = -ENODEV;
+ goto err_free_dev;
+ }
+ mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+ dev->flags |= MLX4_FLAG_SLAVE;
+ } else {
+ /* We reset the device and enable SRIOV only for physical
+ * devices. Try to claim ownership on the device;
+ * if already taken, skip -- do not allow multiple PFs */
+ err = mlx4_get_ownership(dev);
+ if (err) {
+ if (err < 0)
+ goto err_free_dev;
+ else {
+ mlx4_warn(dev, "Multiple PFs not yet supported."
+ " Skipping PF.\n");
+ err = -EINVAL;
+ goto err_free_dev;
+ }
+ }
- /*
- * Now reset the HCA before we touch the PCI capabilities or
- * attempt a firmware command, since a boot ROM may have left
- * the HCA in an undefined state.
- */
- err = mlx4_reset(dev);
- if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
- goto err_free_dev;
+ if (num_vfs) {
+ mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ mlx4_err(dev, "Failed to enable sriov,"
+ "continuing without sriov enabled"
+ " (err = %d).\n", err);
+ num_vfs = 0;
+ err = 0;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev->flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev->num_vfs = num_vfs;
+ }
+ }
+
+ /*
+ * Now reset the HCA before we touch the PCI capabilities or
+ * attempt a firmware command, since a boot ROM may have left
+ * the HCA in an undefined state.
+ */
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_rel_own;
+ }
}
+slave_start:
if (mlx4_cmd_init(dev)) {
mlx4_err(dev, "Failed to init command interface, aborting.\n");
- goto err_free_dev;
+ goto err_sriov;
+ }
+
+ /* In slave functions, the communication channel must be initialized
+ * before posting commands. Also, init num_slaves before calling
+ * mlx4_init_hca */
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_is_master(dev))
+ dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+ else {
+ dev->num_slaves = 0;
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init slave mfunc"
+ " interface, aborting.\n");
+ goto err_cmd;
+ }
+ }
}
err = mlx4_init_hca(dev);
- if (err)
- goto err_cmd;
+ if (err) {
+ if (err == -EACCES) {
+ /* Not primary Physical function
+ * Running in slave mode */
+ mlx4_cmd_cleanup(dev);
+ dev->flags |= MLX4_FLAG_SLAVE;
+ dev->flags &= ~MLX4_FLAG_MASTER;
+ goto slave_start;
+ } else
+ goto err_mfunc;
+ }
+
+ /* In master functions, the communication channel must be initialized
+ * after obtaining its address from fw */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init master mfunc"
+ "interface, aborting.\n");
+ goto err_close;
+ }
+ }
err = mlx4_alloc_eq_table(dev);
if (err)
- goto err_close;
+ goto err_master_mfunc;
priv->msix_ctl.pool_bm = 0;
spin_lock_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
-
- err = mlx4_init_steering(dev);
- if (err)
+ if ((mlx4_is_mfunc(dev)) &&
+ !(dev->flags & MLX4_FLAG_MSI_X)) {
+ mlx4_err(dev, "INTx is not supported in multi-function mode."
+ " aborting.\n");
goto err_free_eq;
+ }
+
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_steering(dev);
+ if (err)
+ goto err_free_eq;
+ }
err = mlx4_setup_hca(dev);
- if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+ if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+ !mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
@@ -1383,20 +1887,37 @@ err_port:
mlx4_cleanup_uar_table(dev);
err_steer:
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
err_free_eq:
mlx4_free_eq_table(dev);
+err_master_mfunc:
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_close:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
mlx4_close_hca(dev);
+err_mfunc:
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_cmd:
mlx4_cmd_cleanup(dev);
+err_sriov:
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+ pci_disable_sriov(pdev);
+
+err_rel_own:
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
+
err_free_dev:
kfree(priv);
@@ -1424,6 +1945,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
int p;
if (dev) {
+ /* in SRIOV it is not allowed to unload the pf's
+ * driver while there are alive vf's */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_how_many_lives_vf(dev))
+ printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ }
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -1443,17 +1970,31 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev);
+
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
mlx4_free_eq_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev);
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_cmd_cleanup(dev);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+ mlx4_warn(dev, "Disabling sriov\n");
+ pci_disable_sriov(pdev);
+ }
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
kfree(priv);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -1468,33 +2009,48 @@ int mlx4_restart_one(struct pci_dev *pdev)
}
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
- { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+ /* MT25408 "Hermon" SDR */
+ { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+ /* MT25408 "Hermon" DDR */
+ { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+ /* MT25408 "Hermon" QDR */
+ { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+ /* MT25408 "Hermon" DDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+ /* MT25408 "Hermon" QDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+ /* MT25408 "Hermon" EN 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+ /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+ /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+ { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+ /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+ { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+ /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+ /* MT25400 Family [ConnectX-2 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+ /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+ /* MT27500 Family [ConnectX-3 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
{ 0, }
};
@@ -1523,6 +2079,12 @@ static int __init mlx4_verify_params(void)
return -1;
}
+ /* Check if module param for ports type has legal combination */
+ if (port_type_array[0] == false && port_type_array[1] == true) {
+ printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+ port_type_array[0] = true;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 978688c3104..0785d9b2a26 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -44,28 +44,47 @@
static const u8 zero_gid[16]; /* automatically initialized to 0 */
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+ return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+ return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
struct mlx4_cmd_mailbox *mailbox)
{
u32 in_mod;
- in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+ in_mod = (u32) port << 16 | steer << 1;
return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
- MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int err;
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
- MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
if (!err)
*hash = imm;
@@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
* Add new entry to steering data structure.
* All promisc QPs should be added as well
*/
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_promisc_qp *dqp = NULL;
u32 prot;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
@@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* If the given qpn is also a promisc qp,
* it should be inserted to duplicates list
*/
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (pqp) {
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
if (!dqp) {
@@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* don't add already existing qpn */
if (pqp->qpn == qpn)
continue;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* out of space */
err = -ENOMEM;
goto out_mailbox;
@@ -193,7 +211,7 @@ out_alloc:
}
/* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (!pqp)
return 0; /* nothing to do */
@@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
* we need to add it as a duplicate to this entry
* for future references */
list_for_each_entry(dqp, &entry->duplicates, list) {
- if (qpn == dqp->qpn)
+ if (qpn == pqp->qpn)
return 0; /* qp is already duplicated */
}
@@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* Check whether a qpn is a duplicate on steering entry
* If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
/* if qp is not promisc, it cannot be duplicated */
- if (!get_promisc_qp(dev, pf_num, steer, qpn))
+ if (!get_promisc_qp(dev, 0, steer, qpn))
return false;
/* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
/* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn)
{
@@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
u32 members_count;
bool ret = false;
int i;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; i++) {
qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
- if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+ if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
/* the qp is not promisc, the entry can't be removed */
goto out;
}
@@ -332,7 +344,7 @@ out:
return ret;
}
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool found;
int last_index;
int err;
- u8 pf_num;
struct mlx4_priv *priv = mlx4_priv(dev);
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ s_steer = &mlx4_priv(dev)->steer[0];
mutex_lock(&priv->mcg_table.mutex);
- if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+ if (get_promisc_qp(dev, 0, steer, qpn)) {
err = 0; /* Noting to do, already exists */
goto out_mutex;
}
@@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
if (!found) {
/* Need to add the qpn to mgm */
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_mailbox;
@@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_list;
@@ -439,7 +450,7 @@ out_mutex:
return err;
}
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool back_to_list = false;
int loc, i;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[0];
mutex_lock(&priv->mcg_table.mutex);
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (unlikely(!pqp)) {
mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
/* nothing to do */
@@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
goto out_list;
}
mgm = mailbox->buf;
+ memset(mgm, 0, sizeof *mgm);
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_mailbox;
@@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
index += dev->caps.num_mgms;
+ new_entry = 1;
memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid, 16);
}
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
mlx4_err(dev, "MGM at index %x is full.\n", index);
err = -ENOMEM;
goto out;
@@ -696,9 +707,9 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ new_steering_entry(dev, port, steer, index, qp->qpn);
else
- existing_steering_entry(dev, 0, port, steer,
+ existing_steering_entry(dev, port, steer,
index, qp->qpn);
}
if (err && link && index != -1) {
@@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
/* if this pq is also a promisc qp, it shouldn't be removed */
if (prot == MLX4_PROT_ETH &&
- check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+ check_duplicate_entry(dev, port, steer, index, qp->qpn))
goto out;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mgm->qp[i - 1] = 0;
if (prot == MLX4_PROT_ETH)
- removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ removed_entry = can_remove_steering_entry(dev, port, steer,
+ index, qp->qpn);
if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
goto out;
@@ -828,6 +840,34 @@ out:
return err;
}
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], u8 attach, u8 block_loopback,
+ enum mlx4_protocol prot)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err = 0;
+ int qpn;
+
+ if (!mlx4_is_mfunc(dev))
+ return -EBADF;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, gid, 16);
+ qpn = qp->qpn;
+ qpn |= (prot << 28);
+ if (attach && block_loopback)
+ qpn |= (1 << 31);
+
+ err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+ MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- return mlx4_qp_attach_common(dev, qp, gid,
- block_mcast_loopback, prot,
- steer);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
@@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
- if (prot == MLX4_PROT_ETH) {
+ if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- }
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+ struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+ u8 port = vhcr->in_param >> 62;
+ enum mlx4_steer_type steer = vhcr->in_modifier;
+
+ /* Promiscuous unicast is not allowed in mfunc */
+ if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+ return 0;
+
+ if (vhcr->op_modifier)
+ return add_promisc_qp(dev, port, steer, qpn);
+ else
+ return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+ enum mlx4_steer_type steer, u8 add, u8 port)
+{
+ return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+ MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+}
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
@@ -884,8 +995,10 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
- return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
@@ -894,8 +1007,10 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
@@ -904,7 +1019,10 @@ int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
- return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+ return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 5dfa68ffc11..a80121a2b51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,21 +46,25 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
#define DRV_NAME "mlx4_core"
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "July 14, 2011"
+#define PFX DRV_NAME ": "
+#define DRV_VERSION "1.1"
+#define DRV_RELDATE "Dec, 2011"
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
- MLX4_CLR_INT_SIZE = 0x00008
+ MLX4_CLR_INT_SIZE = 0x00008,
+ MLX4_SLAVE_COMM_BASE = 0x0,
+ MLX4_COMM_PAGESIZE = 0x1000
};
enum {
- MLX4_MGM_ENTRY_SIZE = 0x100,
- MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8
+ MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+ MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
@@ -80,6 +84,94 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
+enum mlx4_mr_state {
+ MLX4_MR_DISABLED = 0,
+ MLX4_MR_EN_HW,
+ MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME 10000
+enum {
+ MLX4_COMM_CMD_RESET,
+ MLX4_COMM_CMD_VHCR0,
+ MLX4_COMM_CMD_VHCR1,
+ MLX4_COMM_CMD_VHCR2,
+ MLX4_COMM_CMD_VHCR_EN,
+ MLX4_COMM_CMD_VHCR_POST,
+ MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES 10
+#define SLEEP_TIME_IN_RESET (2 * 1000)
+enum mlx4_resource {
+ RES_QP,
+ RES_CQ,
+ RES_SRQ,
+ RES_XRCD,
+ RES_MPT,
+ RES_MTT,
+ RES_MAC,
+ RES_VLAN,
+ RES_EQ,
+ RES_COUNTER,
+ MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+ RES_OP_RESERVE,
+ RES_OP_RESERVE_AND_MAP,
+ RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+ u64 in_param;
+ u64 out_param;
+ u32 in_modifier;
+ u32 errno;
+ u16 op;
+ u16 token;
+ u8 op_modifier;
+ u8 e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+ __be64 in_param;
+ __be32 in_modifier;
+ __be64 out_param;
+ __be16 token;
+ u16 reserved;
+ u8 status;
+ u8 flags;
+ __be16 opcode;
+};
+
+struct mlx4_cmd_info {
+ u16 opcode;
+ bool has_inbox;
+ bool has_outbox;
+ bool out_is_imm;
+ bool encode_slave_id;
+ int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox);
+ int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+};
+
#ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level;
#else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@ do { \
#define mlx4_warn(mdev, format, arg...) \
dev_warn(&mdev->pdev->dev, format, ##arg)
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
struct mlx4_bitmap {
u32 last;
u32 top;
@@ -130,6 +228,147 @@ struct mlx4_icm_table {
struct mlx4_icm **icm;
};
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+ __be32 flags;
+ __be32 qpn;
+ __be32 key;
+ __be32 pd_flags;
+ __be64 start;
+ __be64 length;
+ __be32 lkey;
+ __be32 win_cnt;
+ u8 reserved1[3];
+ u8 mtt_rep;
+ __be64 mtt_addr;
+ __be32 mtt_sz;
+ __be32 entity_size;
+ __be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ u8 log_eq_size;
+ u8 reserved2[4];
+ u8 eq_period;
+ u8 reserved3;
+ u8 eq_max_count;
+ u8 reserved4[3];
+ u8 intr;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ u32 reserved6[2];
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved7[4];
+};
+
+struct mlx4_cq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ __be32 logsize_usrpage;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ u8 reserved2[3];
+ u8 comp_eqn;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved4[2];
+ __be64 db_rec_addr;
+};
+
+struct mlx4_srq_context {
+ __be32 state_logsize_srqn;
+ u8 logstride;
+ u8 reserved1;
+ __be16 xrcd;
+ __be32 pg_offset_cqn;
+ u32 reserved2;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved4;
+ __be16 wqe_counter;
+ u32 reserved5;
+ __be64 db_rec_addr;
+};
+
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __packed comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __packed cmd;
+ struct {
+ __be32 qpn;
+ } __packed qp;
+ struct {
+ __be32 srqn;
+ } __packed srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __packed cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __packed port_change;
+ struct {
+ #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
+ u32 reserved;
+ u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ } __packed comm_channel_arm;
+ struct {
+ u8 port;
+ u8 reserved[3];
+ __be64 mac;
+ } __packed mac_update;
+ struct {
+ u8 port;
+ } __packed sw_event;
+ struct {
+ __be32 slave_id;
+ } __packed flr_event;
+ } event;
+ u8 slave_id;
+ u8 reserved3[2];
+ u8 owner;
+} __packed;
+
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
@@ -142,6 +381,18 @@ struct mlx4_eq {
struct mlx4_mtt mtt;
};
+struct mlx4_slave_eqe {
+ u8 type;
+ u8 port;
+ u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+ u32 eqn;
+ u16 token;
+ u64 event_type;
+};
+
struct mlx4_profile {
int num_qp;
int rdmarc_per_qp;
@@ -155,16 +406,37 @@ struct mlx4_profile {
struct mlx4_fw {
u64 clr_int_base;
u64 catas_offset;
+ u64 comm_base;
struct mlx4_icm *fw_icm;
struct mlx4_icm *aux_icm;
u32 catas_size;
u16 fw_pages;
u8 clr_int_bar;
u8 catas_bar;
+ u8 comm_bar;
};
-#define MGM_QPN_MASK 0x00FFFFFF
-#define MGM_BLCK_LB_BIT 30
+struct mlx4_comm {
+ u32 slave_write;
+ u32 slave_read;
+};
+
+enum {
+ MLX4_MCAST_CONFIG = 0,
+ MLX4_MCAST_DISABLE = 1,
+ MLX4_MCAST_ENABLE = 2,
+};
+
+#define VLAN_FLTR_SIZE 128
+
+struct mlx4_vlan_fltr {
+ __be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+ struct list_head list;
+ u64 addr;
+};
struct mlx4_promisc_qp {
struct list_head list;
@@ -177,19 +449,87 @@ struct mlx4_steer_index {
struct list_head duplicates;
};
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_QP_PER_MGM];
+struct mlx4_slave_state {
+ u8 comm_toggle;
+ u8 last_cmd;
+ u8 init_port_mask;
+ bool active;
+ u8 function;
+ dma_addr_t vhcr_dma;
+ u16 mtu[MLX4_MAX_PORTS + 1];
+ __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+ struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+ struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+ struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+ struct mlx4_slave_event_eq_info event_eq;
+ u16 eq_pi;
+ u16 eq_ci;
+ spinlock_t lock;
+ /*initialized via the kzalloc*/
+ u8 is_slave_going_down;
+ u32 cookie;
+};
+
+struct slave_list {
+ struct mutex mutex;
+ struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+ spinlock_t lock;
+ /* tree for each resources */
+ struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ /* num_of_slave's lists, one per slave */
+ struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE 128
+struct mlx4_slave_event_eq {
+ u32 eqn;
+ u32 cons;
+ u32 prod;
+ struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+ int proxy_qp0_active;
+ int qp0_active;
+ int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+ int init_port_ref[MLX4_MAX_PORTS + 1];
+ u16 max_mtu[MLX4_MAX_PORTS + 1];
+ int disable_mcast_ref[MLX4_MAX_PORTS + 1];
+ struct mlx4_resource_tracker res_tracker;
+ struct workqueue_struct *comm_wq;
+ struct work_struct comm_work;
+ struct work_struct slave_event_work;
+ struct work_struct slave_flr_event_work;
+ spinlock_t slave_state_lock;
+ __be32 comm_arm_bit_vector[4];
+ struct mlx4_eqe cmd_eqe;
+ struct mlx4_slave_event_eq slave_eq;
+ struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+ struct mlx4_comm __iomem *comm;
+ struct mlx4_vhcr_cmd *vhcr;
+ dma_addr_t vhcr_dma;
+
+ struct mlx4_mfunc_master_ctx master;
};
+
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct semaphore slave_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -197,6 +537,7 @@ struct mlx4_cmd {
u16 token_mask;
u8 use_events;
u8 toggle;
+ u8 comm_toggle;
};
struct mlx4_uar_table {
@@ -287,6 +628,48 @@ struct mlx4_vlan_table {
int max;
};
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_MC_PROMISC_SHIFT 30
+
+enum {
+ MCAST_DIRECT_ONLY = 0,
+ MCAST_DIRECT = 1,
+ MCAST_DEFAULT = 2
+};
+
+
+struct mlx4_set_port_general_context {
+ u8 reserved[3];
+ u8 flags;
+ u16 reserved2;
+ __be16 mtu;
+ u8 pptx;
+ u8 pfctx;
+ u16 reserved3;
+ u8 pprx;
+ u8 pfcrx;
+ u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+ __be32 base_qpn;
+ u8 rererved;
+ u8 n_mac;
+ u8 n_vlan;
+ u8 n_prio;
+ u8 reserved2[3];
+ u8 mac_miss;
+ u8 intra_no_vlan;
+ u8 no_vlan;
+ u8 intra_vlan_miss;
+ u8 vlan_miss;
+ u8 reserved3[3];
+ u8 no_vlan_prio;
+ __be32 promisc;
+ __be32 mcast;
+};
+
struct mlx4_mac_entry {
u64 mac;
};
@@ -333,6 +716,7 @@ struct mlx4_priv {
struct mlx4_fw fw;
struct mlx4_cmd cmd;
+ struct mlx4_mfunc mfunc;
struct mlx4_bitmap pd_bitmap;
struct mlx4_bitmap xrcd_bitmap;
@@ -359,6 +743,7 @@ struct mlx4_priv {
struct list_head bf_list;
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
+ int reserved_mtts;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +788,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list);
void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +860,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_profile *request,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
int mlx4_cmd_use_events(struct mlx4_dev *dev);
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout);
+
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
@@ -452,12 +993,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource resource_type,
+ int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, enum mlx4_steer_type steer);
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+ int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+ struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+ *((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+ *arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+ return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+ return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+ return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8fda331c65d..f2a8e65f5f8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -51,8 +51,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.5.4.2"
-#define DRV_RELDATE "October 2011"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "Dec 2011"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -272,6 +272,7 @@ struct mlx4_en_rx_ring {
u32 prod;
u32 cons;
u32 buf_size;
+ u8 fcs_del;
void *buf;
void *rx_info;
unsigned long bytes;
@@ -365,16 +366,6 @@ struct mlx4_en_rss_map {
enum mlx4_qp_state indir_state;
};
-struct mlx4_en_rss_context {
- __be32 base_qpn;
- __be32 default_qpn;
- u16 reserved;
- u8 hash_fn;
- u8 flags;
- __be32 rss_key[10];
- __be32 base_qpn_udp;
-};
-
struct mlx4_en_port_state {
int link_state;
int link_speed;
@@ -462,6 +453,7 @@ struct mlx4_en_priv {
int base_qpn;
struct mlx4_en_rss_map rss_map;
+ u32 ctrl_flags;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
@@ -494,9 +486,9 @@ struct mlx4_en_priv {
enum mlx4_en_wol {
MLX4_EN_WOL_MAGIC = (1ULL << 61),
MLX4_EN_WOL_ENABLED = (1ULL << 62),
- MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
};
+#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index efa3e77355e..01df5567e16 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,35 +32,17 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
- __be32 flags;
- __be32 qpn;
- __be32 key;
- __be32 pd_flags;
- __be64 start;
- __be64 length;
- __be32 lkey;
- __be32 win_cnt;
- u8 reserved1[3];
- u8 mtt_rep;
- __be64 mtt_seg;
- __be32 mtt_sz;
- __be32 entity_size;
- __be32 first_byte_offset;
-} __packed;
-
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
@@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
kfree(buddy->num_free);
}
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
u32 seg;
+ int seg_order;
+ u32 offset;
- seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+ seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
if (seg == -1)
return -1;
- if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
- seg + (1 << order) - 1)) {
- mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+ offset = seg * (1 << log_mtts_per_seg);
+
+ if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1)) {
+ mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
return -1;
}
- return seg;
+ return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, order);
+ err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_alloc_mtt_range(dev, order);
}
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else
mtt->page_shift = page_shift;
- for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+ for (mtt->order = 0, i = 1; i < npages; i <<= 1)
++mtt->order;
- mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
- if (mtt->first_seg == -1)
+ mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+ if (mtt->offset == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_init);
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{
+ u32 first_seg;
+ int seg_order;
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+ first_seg = offset / (1 << log_mtts_per_seg);
+
+ mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+ mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, offset);
+ set_param_h(&in_param, order);
+ err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to free mtt range at:"
+ "%d order:%d\n", offset, order);
+ return;
+ }
+ __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
if (mtt->order < 0)
return;
- mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
- mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
+ mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
}
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
- return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+ return (u64) mtt->offset * dev->caps.mtt_entry_sz;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
@@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key)
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
- return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
- MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
+ 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
- !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+ !mailbox, MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
- int npages, int page_shift, struct mlx4_mr *mr)
+static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ u32 *base_mridx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u32 index;
- int err;
+ u32 mridx;
- index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
- if (index == -1)
+ mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+ if (mridx == -1)
return -ENOMEM;
+ *base_mridx = mridx;
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ u64 iova, u64 size, u32 access, int npages,
+ int page_shift, struct mlx4_mr *mr)
+{
mr->iova = iova;
mr->size = size;
mr->pd = pd;
mr->access = access;
- mr->enabled = 0;
- mr->key = hw_index_to_key(index);
+ mr->enabled = MLX4_MR_DISABLED;
+ mr->key = hw_index_to_key(mridx);
+
+ return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ int num_entries)
+{
+ return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+}
- err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ u64 out_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to release mr index:%d\n",
+ index);
+ return;
+ }
+ __mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, index);
+ return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+ index);
+ return;
+ }
+ return __mlx4_mr_free_icm(dev, index);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ int npages, int page_shift, struct mlx4_mr *mr)
+{
+ u32 index;
+ int err;
+
+ index = mlx4_mr_reserve(dev);
+ if (index == -1)
+ return -ENOMEM;
+
+ err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+ access, npages, page_shift, mr);
if (err)
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_mr_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_priv *priv = mlx4_priv(dev);
int err;
- if (mr->enabled) {
+ if (mr->enabled == MLX4_MR_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err)
- mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
- }
+ mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+ mr->enabled = MLX4_MR_EN_SW;
+ }
mlx4_mtt_cleanup(dev, &mr->mtt);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ mlx4_mr_free_reserved(dev, mr);
+ if (mr->enabled)
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mr_release(dev, key_to_hw_index(mr->key));
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
- mpt_entry->mtt_seg = 0;
+ mpt_entry->mtt_addr = 0;
} else {
- mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+ mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+ &mr->mtt));
}
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
- mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
- dev->caps.mtts_per_seg);
+ mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
@@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
-
- mr->enabled = 1;
+ mr->enabled = MLX4_MR_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -373,7 +546,7 @@ err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
- mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
__be64 *mtts;
dma_addr_t dma_handle;
int i;
- int s = start_index * sizeof (u64);
- /* All MTTs must fit in the same page */
- if (start_index / (PAGE_SIZE / sizeof (u64)) !=
- (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
- return -EINVAL;
-
- if (start_index & (dev->caps.mtts_per_seg - 1))
- return -EINVAL;
+ mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+ start_index, &dma_handle);
- mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
- s / dev->caps.mtt_entry_sz, &dma_handle);
if (!mtts)
return -ENOMEM;
@@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
return 0;
}
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- int start_index, int npages, u64 *page_list)
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
{
+ int err = 0;
int chunk;
- int err;
+ int mtts_per_page;
+ int max_mtts_first_page;
- if (mtt->order < 0)
- return -EINVAL;
+ /* compute how may mtts fit in the first page */
+ mtts_per_page = PAGE_SIZE / sizeof(u64);
+ max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+ % mtts_per_page;
+
+ chunk = min_t(int, max_mtts_first_page, npages);
while (npages > 0) {
- chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
if (err)
return err;
-
npages -= chunk;
start_index += chunk;
page_list += chunk;
+
+ chunk = min_t(int, mtts_per_page, npages);
}
+ return err;
+}
- return 0;
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
+{
+ struct mlx4_cmd_mailbox *mailbox = NULL;
+ __be64 *inbox = NULL;
+ int chunk;
+ int err = 0;
+ int i;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ if (mlx4_is_mfunc(dev)) {
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ while (npages > 0) {
+ chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+ npages);
+ inbox[0] = cpu_to_be64(mtt->offset + start_index);
+ inbox[1] = 0;
+ for (i = 0; i < chunk; ++i)
+ inbox[i + 2] = cpu_to_be64(page_list[i] |
+ MLX4_MTT_FLAG_PRESENT);
+ err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+ if (err) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ npages -= chunk;
+ start_index += chunk;
+ page_list += chunk;
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
}
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
@@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
+ /* Nothing to do for slaves - all MR handling is forwarded
+ * to the master */
+ if (mlx4_is_slave(dev))
+ return 0;
+
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
- ilog2(dev->caps.num_mtt_segs));
+ ilog2(dev->caps.num_mtts /
+ (1 << log_mtts_per_seg)));
if (err)
goto err_buddy;
if (dev->caps.reserved_mtts) {
- if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+ priv->reserved_mtts =
+ mlx4_alloc_mtt_range(dev,
+ fls(dev->caps.reserved_mtts - 1));
+ if (priv->reserved_mtts < 0) {
mlx4_warn(dev, "MTT table of order %d is too small.\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
@@ -497,8 +723,14 @@ err_buddy:
void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
+ if (mlx4_is_slave(dev))
+ return;
+ if (priv->reserved_mtts >= 0)
+ mlx4_free_mtt_range(dev, priv->reserved_mtts,
+ fls(dev->caps.reserved_mtts - 1));
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
@@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u64 mtt_seg;
+ u64 mtt_offset;
int err = -ENOMEM;
if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
if (err)
return err;
- mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+ mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
- fmr->mr.mtt.first_seg,
+ fmr->mr.mtt.offset,
&fmr->dma_handle);
+
if (!fmr->mtts) {
err = -ENOMEM;
goto err_free;
@@ -619,6 +852,46 @@ err_free:
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
+static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+ u32 pd, u32 access, int max_pages,
+ int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = -ENOMEM;
+
+ if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+ return -EINVAL;
+
+ /* All MTTs must fit in the same page */
+ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+ return -EINVAL;
+
+ fmr->page_shift = page_shift;
+ fmr->max_pages = max_pages;
+ fmr->max_maps = max_maps;
+ fmr->maps = 0;
+
+ err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+ page_shift, &fmr->mr);
+ if (err)
+ return err;
+
+ fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+ fmr->mr.mtt.offset,
+ &fmr->dma_handle);
+ if (!fmr->mtts) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
if (!fmr->maps)
return;
fmr->maps = 0;
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+ " failed (%d)\n", err);
+ return;
+ }
+
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(fmr->mr.key) &
+ (dev->caps.num_mpts - 1));
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err) {
+ printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+ err);
+ return;
+ }
+ fmr->mr.enabled = MLX4_MR_EN_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
- fmr->mr.enabled = 0;
mlx4_mr_free(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_free);
+static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+ if (fmr->maps)
+ return -EBUSY;
+
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 260ed259ce9..5c9a54df17a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
@@ -51,7 +52,8 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
if (*pdn == -1)
return -ENOMEM;
-
+ if (mlx4_is_mfunc(dev))
+ *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
@@ -85,7 +87,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
- (1 << 24) - 1, dev->caps.reserved_pds, 0);
+ (1 << NOT_MASKED_PD_BITS) - 1,
+ dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +111,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
+ int offset;
+
uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
if (uar->index == -1)
return -ENOMEM;
- uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+ if (mlx4_is_slave(dev))
+ offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+ dev->caps.uar_page_size);
+ else
+ offset = uar->index;
+ uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
uar->map = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +241,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
- max(128, dev->caps.reserved_uars), 0);
+ dev->caps.reserved_uars, 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index d942aea4927..88b52e54752 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -70,41 +70,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
- __be64 *entries)
-{
- struct mlx4_cmd_mailbox *mailbox;
- u32 in_mod;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
- in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
int err;
- if (reserve) {
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- return err;
- }
- }
qp.qpn = *qpn;
mac &= 0xffffffffffffULL;
@@ -113,16 +84,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- err = mlx4_qp_attach_common(dev, &qp, gid, 0,
- MLX4_PROT_ETH, MLX4_UC_STEER);
- if (err && reserve)
- mlx4_qp_release_range(dev, *qpn, 1);
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ if (err)
+ mlx4_warn(dev, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u8 free)
+ u64 mac, int qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
@@ -134,60 +104,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
- if (free)
- mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+}
+
+static int validate_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, int index)
+{
+ int err = 0;
+
+ if (index < 0 || index >= table->max || !table->entries[index]) {
+ mlx4_warn(dev, "No valid Mac entry for the given index\n");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, u64 mac)
+{
+ int i;
+
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if ((mac & MLX4_MAC_MASK) ==
+ (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+ return i;
+ }
+ /* Mac not found */
+ return -EINVAL;
}
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
struct mlx4_mac_entry *entry;
- int i, err = 0;
- int free = -1;
+ int index = 0;
+ int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
- if (err)
- return err;
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+ (unsigned long long) mac);
+ index = mlx4_register_mac(dev, port, mac);
+ if (index < 0) {
+ err = index;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
+ return err;
+ }
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return -ENOMEM;
- }
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ *qpn = info->base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+ if (err) {
+ mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ if (err)
+ goto steer_err;
- entry->mac = mac;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err) {
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ entry->mac = mac;
+ err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+ if (err)
+ goto insert_err;
+ return 0;
+
+insert_err:
+ kfree(entry);
+
+alloc_err:
+ mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, port, mac);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_entry *entry;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+ (unsigned long long) mac);
+ mlx4_unregister_mac(dev, port, mac);
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (entry) {
+ mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+ " qpn %d\n", port,
+ (unsigned long long) mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_qp_release_range(dev, qpn, 1);
+ radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return err;
}
}
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
- mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int i, err = 0;
+ int free = -1;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+ (unsigned long long) mac, port);
mutex_lock(&table->mutex);
- for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
- if (free < 0 && !table->refs[i]) {
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (free < 0 && !table->entries[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- /* MAC already registered, increase references count */
- ++table->refs[i];
+ /* MAC already registered, Must not have duplicates */
+ err = -EEXIST;
goto out;
}
}
- if (free < 0) {
- err = -ENOMEM;
- goto out;
- }
-
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -197,103 +271,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
}
/* Register new MAC */
- table->refs[free] = 1;
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
- table->refs[free] = 0;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
table->entries[free] = 0;
goto out;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- *qpn = info->base_qpn + free;
+ err = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
-static int validate_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, int index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
- int err = 0;
+ u64 out_param;
+ int err;
- if (index < 0 || index >= table->max || !table->entries[index]) {
- mlx4_warn(dev, "No valid Mac entry for the given index\n");
- err = -EINVAL;
- }
- return err;
-}
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
-static int find_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, u64 mac)
-{
- int i;
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
- return i;
+ return get_param_l(&out_param);
}
- /* Mac not found */
- return -EINVAL;
+ return __mlx4_register_mac(dev, port, mac);
}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
- struct mlx4_mac_entry *entry;
+ int index;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- index = find_index(dev, table, entry->mac);
- kfree(entry);
- }
- }
+ index = find_index(dev, table, mac);
mutex_lock(&table->mutex);
if (validate_index(dev, table, index))
goto out;
- /* Check whether this address has reference count */
- if (!(--table->refs[index])) {
- table->entries[index] = 0;
- mlx4_set_port_mac_table(dev, port, table->entries);
- --table->total;
- }
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
out:
mutex_unlock(&table->mutex);
}
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ return;
+ }
+ __mlx4_unregister_mac(dev, port, mac);
+ return;
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
- int err;
+ int index = qpn - info->base_qpn;
+ int err = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- index = find_index(dev, table, entry->mac);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
- if (err || index < 0)
- return err;
+ mlx4_register_mac(dev, port, new_mac);
+ err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ return err;
}
+ /* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
err = validate_index(dev, table, index);
@@ -304,7 +378,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) new_mac);
table->entries[index] = 0;
}
out:
@@ -312,6 +387,7 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
{
@@ -326,7 +402,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -352,7 +428,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+ int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i, err = 0;
@@ -387,7 +464,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
goto out;
}
- /* Register new MAC */
+ /* Register new VLAN */
table->refs[free] = 1;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
@@ -405,9 +482,27 @@ out:
mutex_unlock(&table->mutex);
return err;
}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *index = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_register_vlan(dev, port, vlan, index);
+}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -432,6 +527,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
out:
mutex_unlock(&table->mutex);
}
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, port);
+ err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (!err)
+ mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+ index);
+
+ return;
+ }
+ __mlx4_unregister_vlan(dev, port, index);
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +576,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
*caps = *(__be32 *) (outbuf + 84);
mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +614,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
@@ -512,6 +628,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
return err;
}
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+ u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_port_info *port_info;
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ struct mlx4_set_port_rqp_calc_context *qpn_context;
+ struct mlx4_set_port_general_context *gen_context;
+ int reset_qkey_viols;
+ int port;
+ int is_eth;
+ u32 in_modifier;
+ u32 promisc;
+ u16 mtu, prev_mtu;
+ int err;
+ int i;
+ __be32 agg_cap_mask;
+ __be32 slave_cap_mask;
+ __be32 new_cap_mask;
+
+ port = in_mod & 0xff;
+ in_modifier = in_mod >> 8;
+ is_eth = op_mod;
+ port_info = &priv->port[port];
+
+ /* Slaves cannot perform SET_PORT operations except changing MTU */
+ if (is_eth) {
+ if (slave != dev->caps.function &&
+ in_modifier != MLX4_SET_PORT_GENERAL) {
+ mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+ slave);
+ return -EINVAL;
+ }
+ switch (in_modifier) {
+ case MLX4_SET_PORT_RQP_CALC:
+ qpn_context = inbox->buf;
+ qpn_context->base_qpn =
+ cpu_to_be32(port_info->base_qpn);
+ qpn_context->n_mac = 0x7;
+ promisc = be32_to_cpu(qpn_context->promisc) >>
+ SET_PORT_PROMISC_SHIFT;
+ qpn_context->promisc = cpu_to_be32(
+ promisc << SET_PORT_PROMISC_SHIFT |
+ port_info->base_qpn);
+ promisc = be32_to_cpu(qpn_context->mcast) >>
+ SET_PORT_MC_PROMISC_SHIFT;
+ qpn_context->mcast = cpu_to_be32(
+ promisc << SET_PORT_MC_PROMISC_SHIFT |
+ port_info->base_qpn);
+ break;
+ case MLX4_SET_PORT_GENERAL:
+ gen_context = inbox->buf;
+ /* Mtu is configured as the max MTU among all the
+ * the functions on the port. */
+ mtu = be16_to_cpu(gen_context->mtu);
+ mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+ prev_mtu = slave_st->mtu[port];
+ slave_st->mtu[port] = mtu;
+ if (mtu > master->max_mtu[port])
+ master->max_mtu[port] = mtu;
+ if (mtu < prev_mtu && prev_mtu ==
+ master->max_mtu[port]) {
+ slave_st->mtu[port] = mtu;
+ master->max_mtu[port] = mtu;
+ for (i = 0; i < dev->num_slaves; i++) {
+ master->max_mtu[port] =
+ max(master->max_mtu[port],
+ master->slave_state[i].mtu[port]);
+ }
+ }
+
+ gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ break;
+ }
+ return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ }
+
+ /* For IB, we only consider:
+ * - The capability mask, which is set to the aggregate of all
+ * slave function capabilities
+ * - The QKey violatin counter - reset according to each request.
+ */
+
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+ new_cap_mask = ((__be32 *) inbox->buf)[2];
+ } else {
+ reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+ new_cap_mask = ((__be32 *) inbox->buf)[1];
+ }
+
+ agg_cap_mask = 0;
+ slave_cap_mask =
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+ for (i = 0; i < dev->num_slaves; i++)
+ agg_cap_mask |=
+ priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+ /* only clear mailbox for guests. Master may be setting
+ * MTU or PKEY table size
+ */
+ if (slave != dev->caps.function)
+ memset(inbox->buf, 0, 256);
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ *(u8 *) inbox->buf = !!reset_qkey_viols << 6;
+ ((__be32 *) inbox->buf)[2] = agg_cap_mask;
+ } else {
+ ((u8 *) inbox->buf)[3] = !!reset_qkey_viols;
+ ((__be32 *) inbox->buf)[1] = agg_cap_mask;
+ }
+
+ err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+ slave_cap_mask;
+ return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+ vhcr->op_modifier, inbox);
+}
+
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +777,127 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->flags = SET_PORT_GEN_ALL_VALID;
+ context->mtu = cpu_to_be16(mtu);
+ context->pptx = (pptx * (!pfctx)) << 7;
+ context->pfctx = pfctx;
+ context->pprx = (pprx * (!pfcrx)) << 7;
+ context->pfcrx = pfcrx;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_rqp_calc_context *context;
+ int err;
+ u32 in_mod;
+ u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+ MCAST_DIRECT : MCAST_DEFAULT;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->base_qpn = cpu_to_be32(base_qpn);
+ context->n_mac = dev->caps.log_num_macs;
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+ base_qpn);
+ context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+ base_qpn);
+ context->intra_no_vlan = 0;
+ context->no_vlan = MLX4_NO_VLAN_IDX;
+ context->intra_vlan_miss = 0;
+ context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+ in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+ u64 mac, u64 clear, u8 mode)
+{
+ return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+ MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+ u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+ return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return mlx4_common_dump_eth_stats(dev, slave,
+ vhcr->in_modifier, outbox);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b967647d0c7..66f91ca7a7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
- profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
- profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
+ profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
profile[MLX4_RES_QP].num = request->num_qp;
profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->cmpt_base = profile[i].start;
break;
case MLX4_RES_MTT:
- dev->caps.num_mtt_segs = profile[i].num;
+ dev->caps.num_mtts = profile[i].num;
priv->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
break;
@@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
dev->caps.num_mgms = profile[i].num >> 1;
dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
- init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+ init_hca->log_mc_entry_sz =
+ ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
init_hca->log_mc_hash_sz = profile[i].log_num - 1;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 15f870cb259..6b03ac8b900 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,6 +35,8 @@
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
spin_unlock(&qp_table->lock);
if (!qp) {
- mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
return;
}
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
complete(&qp->free);
}
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
- struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
- int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ return qp->qpn >= dev->caps.sqp_start &&
+ qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp, int native)
{
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
[MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
};
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
+ u8 port;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
- if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
- return mlx4_cmd(dev, 0, qp->qpn, 2,
- MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+ if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+ ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+ MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+ if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+ cur_state != MLX4_QP_STATE_RST &&
+ is_qp0(dev, qp)) {
+ port = (qp->qpn & 1) + 1;
+ priv->mfunc.master.qp0_state[port].qp0_active = 0;
+ }
+ return ret;
+ }
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -132,107 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
+ port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
+ if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ context->pri_path.sched_queue = (context->pri_path.sched_queue &
+ 0xc3);
+
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof *context);
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
- ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+ ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
+ qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
- op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+ op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp)
+{
+ return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+ optpar, sqd_event, qp, 0);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- int qpn;
- qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
- if (qpn == -1)
+ *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (*base == -1)
return -ENOMEM;
- *base = qpn;
return 0;
}
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cnt);
+ set_param_h(&in_param, align);
+ err = mlx4_cmd_imm(dev, in_param, &out_param,
+ RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+
+ *base = get_param_l(&out_param);
+ return 0;
+ }
+ return __mlx4_qp_reserve_range(dev, cnt, align, base);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- if (base_qpn < dev->caps.sqp_start + 8)
- return;
+ if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+ return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
}
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, base_qpn);
+ set_param_h(&in_param, cnt);
+ err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err) {
+ mlx4_warn(dev, "Failed to release qp range"
+ " base:%d cnt:%d\n", base_qpn, cnt);
+ }
+ } else
+ __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- if (!qpn)
- return -EINVAL;
-
- qp->qpn = qpn;
-
- err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
- spin_lock_irq(&qp_table->lock);
- err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
- spin_unlock_irq(&qp_table->lock);
- if (err)
- goto err_put_cmpt;
-
- atomic_set(&qp->refcount, 1);
- init_completion(&qp->free);
-
return 0;
-err_put_cmpt:
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
err_put_rdmarc:
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
err_put_altc:
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
err_put_auxc:
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
err_put_qp:
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
err_out:
return err;
}
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, qpn);
+ return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+ mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, qpn);
+ if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+ } else
+ __mlx4_qp_free_icm(dev, qpn);
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int err;
+
+ if (!qpn)
+ return -EINVAL;
+
+ qp->qpn = qpn;
+
+ err = mlx4_qp_alloc_icm(dev, qpn);
+ if (err)
+ return err;
+
+ spin_lock_irq(&qp_table->lock);
+ err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+ (dev->caps.num_qps - 1), qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (err)
+ goto err_icm;
+
+ atomic_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+
+ return 0;
+
+err_icm:
+ mlx4_qp_free_icm(dev, qpn);
+ return err;
+}
+
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +378,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_qp_free_icm(dev, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
{
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
/*
* We reserve 2 extra QPs per port for the special QPs. The
@@ -327,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
+
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
@@ -342,7 +471,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
- MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
if (!err)
memcpy(context, mailbox->buf + 8, sizeof *context);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644
index 00000000000..ed20751a057
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -0,0 +1,3104 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_MAC_MASK 0x7fffffffffffffffULL
+#define ETH_ALEN 6
+
+struct mac_res {
+ struct list_head list;
+ u64 mac;
+ u8 port;
+};
+
+struct res_common {
+ struct list_head list;
+ u32 res_id;
+ int owner;
+ int state;
+ int from_state;
+ int to_state;
+ int removing;
+};
+
+enum {
+ RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+ struct list_head list;
+ u8 gid[16];
+ enum mlx4_protocol prot;
+};
+
+enum res_qp_states {
+ RES_QP_BUSY = RES_ANY_BUSY,
+
+ /* QP number was allocated */
+ RES_QP_RESERVED,
+
+ /* ICM memory for QP context was mapped */
+ RES_QP_MAPPED,
+
+ /* QP is in hw ownership */
+ RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+ switch (state) {
+ case RES_QP_BUSY: return "RES_QP_BUSY";
+ case RES_QP_RESERVED: return "RES_QP_RESERVED";
+ case RES_QP_MAPPED: return "RES_QP_MAPPED";
+ case RES_QP_HW: return "RES_QP_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_qp {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ struct res_srq *srq;
+ struct list_head mcg_list;
+ spinlock_t mcg_spl;
+ int local_qpn;
+};
+
+enum res_mtt_states {
+ RES_MTT_BUSY = RES_ANY_BUSY,
+ RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+ switch (state) {
+ case RES_MTT_BUSY: return "RES_MTT_BUSY";
+ case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_mtt {
+ struct res_common com;
+ int order;
+ atomic_t ref_count;
+};
+
+enum res_mpt_states {
+ RES_MPT_BUSY = RES_ANY_BUSY,
+ RES_MPT_RESERVED,
+ RES_MPT_MAPPED,
+ RES_MPT_HW,
+};
+
+struct res_mpt {
+ struct res_common com;
+ struct res_mtt *mtt;
+ int key;
+};
+
+enum res_eq_states {
+ RES_EQ_BUSY = RES_ANY_BUSY,
+ RES_EQ_RESERVED,
+ RES_EQ_HW,
+};
+
+struct res_eq {
+ struct res_common com;
+ struct res_mtt *mtt;
+};
+
+enum res_cq_states {
+ RES_CQ_BUSY = RES_ANY_BUSY,
+ RES_CQ_ALLOCATED,
+ RES_CQ_HW,
+};
+
+struct res_cq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ atomic_t ref_count;
+};
+
+enum res_srq_states {
+ RES_SRQ_BUSY = RES_ANY_BUSY,
+ RES_SRQ_ALLOCATED,
+ RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+ switch (state) {
+ case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+ case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+ case RES_SRQ_HW: return "RES_SRQ_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_srq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *cq;
+ atomic_t ref_count;
+};
+
+enum res_counter_states {
+ RES_COUNTER_BUSY = RES_ANY_BUSY,
+ RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+ switch (state) {
+ case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+ case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_counter {
+ struct res_common com;
+ int port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+ switch (rt) {
+ case RES_QP: return "RES_QP";
+ case RES_CQ: return "RES_CQ";
+ case RES_SRQ: return "RES_SRQ";
+ case RES_MPT: return "RES_MPT";
+ case RES_MTT: return "RES_MTT";
+ case RES_MAC: return "RES_MAC";
+ case RES_EQ: return "RES_EQ";
+ case RES_COUNTER: return "RES_COUNTER";
+ default: return "Unknown resource type !!!";
+ };
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+ int t;
+
+ priv->mfunc.master.res_tracker.slave_list =
+ kzalloc(dev->num_slaves * sizeof(struct slave_list),
+ GFP_KERNEL);
+ if (!priv->mfunc.master.res_tracker.slave_list)
+ return -ENOMEM;
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+ for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+ INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+ slave_list[i].res_list[t]);
+ mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
+
+ mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+ dev->num_slaves);
+ for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+ INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+ GFP_ATOMIC|__GFP_NOWARN);
+
+ spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+ return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ if (priv->mfunc.master.res_tracker.slave_list) {
+ for (i = 0 ; i < dev->num_slaves; i++)
+ mlx4_delete_all_resources_for_slave(dev, i);
+
+ kfree(priv->mfunc.master.res_tracker.slave_list);
+ }
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+ u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+ if (MLX4_QP_ST_UD == ts)
+ qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+ mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+ slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+ return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+ enum mlx4_resource type)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type,
+ void *res)
+{
+ struct res_common *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (!r) {
+ err = -ENONET;
+ goto exit;
+ }
+
+ if (r->state == RES_ANY_BUSY) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto exit;
+ }
+
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ ResourceType(type), r->res_id);
+
+ if (res)
+ *((struct res_common **)res) = r;
+
+exit:
+ spin_unlock_irq(mlx4_tlock(dev));
+ return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource type,
+ int res_id, int *slave)
+{
+
+ struct res_common *r;
+ int err = -ENOENT;
+ int id = res_id;
+
+ if (type == RES_QP)
+ id &= 0x7fffff;
+ spin_lock(mlx4_tlock(dev));
+
+ r = find_res(dev, id, type);
+ if (r) {
+ *slave = r->owner;
+ err = 0;
+ }
+ spin_unlock(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type)
+{
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (r)
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+ struct res_qp *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_QP_RESERVED;
+ INIT_LIST_HEAD(&ret->mcg_list);
+ spin_lock_init(&ret->mcg_spl);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+ struct res_mtt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->order = order;
+ ret->com.state = RES_MTT_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+ struct res_mpt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_MPT_RESERVED;
+ ret->key = key;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+ struct res_eq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_EQ_RESERVED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+ struct res_cq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_CQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+ struct res_srq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_SRQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+ struct res_counter *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_COUNTER_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+ int extra)
+{
+ struct res_common *ret;
+
+ switch (type) {
+ case RES_QP:
+ ret = alloc_qp_tr(id);
+ break;
+ case RES_MPT:
+ ret = alloc_mpt_tr(id, extra);
+ break;
+ case RES_MTT:
+ ret = alloc_mtt_tr(id, extra);
+ break;
+ case RES_EQ:
+ ret = alloc_eq_tr(id);
+ break;
+ case RES_CQ:
+ ret = alloc_cq_tr(id);
+ break;
+ case RES_SRQ:
+ ret = alloc_srq_tr(id);
+ break;
+ case RES_MAC:
+ printk(KERN_ERR "implementation missing\n");
+ return NULL;
+ case RES_COUNTER:
+ ret = alloc_counter_tr(id);
+ break;
+
+ default:
+ return NULL;
+ }
+ if (ret)
+ ret->owner = slave;
+
+ return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct res_common **res_arr;
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct radix_tree_root *root = &tracker->res_tree[type];
+
+ res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+ if (!res_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < count; ++i) {
+ res_arr[i] = alloc_tr(base + i, type, slave, extra);
+ if (!res_arr[i]) {
+ for (--i; i >= 0; --i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = 0; i < count; ++i) {
+ if (find_res(dev, base + i, type)) {
+ err = -EEXIST;
+ goto undo;
+ }
+ err = radix_tree_insert(root, base + i, res_arr[i]);
+ if (err)
+ goto undo;
+ list_add_tail(&res_arr[i]->list,
+ &tracker->slave_list[slave].res_list[type]);
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(res_arr);
+
+ return 0;
+
+undo:
+ for (--i; i >= base; --i)
+ radix_tree_delete(&tracker->res_tree[type], i);
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ for (i = 0; i < count; ++i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+
+ return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+ if (res->com.state == RES_QP_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_QP_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+ if (res->com.state == RES_MTT_BUSY ||
+ atomic_read(&res->ref_count)) {
+ printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
+ return -EBUSY;
+ } else if (res->com.state != RES_MTT_ALLOCATED)
+ return -EPERM;
+ else if (res->order != order)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+ if (res->com.state == RES_COUNTER_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_COUNTER_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+ if (res->com.state == RES_CQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_CQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+ if (res->com.state == RES_SRQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_SRQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+ switch (type) {
+ case RES_QP:
+ return remove_qp_ok((struct res_qp *)res);
+ case RES_CQ:
+ return remove_cq_ok((struct res_cq *)res);
+ case RES_SRQ:
+ return remove_srq_ok((struct res_srq *)res);
+ case RES_MPT:
+ return remove_mpt_ok((struct res_mpt *)res);
+ case RES_MTT:
+ return remove_mtt_ok((struct res_mtt *)res, extra);
+ case RES_MAC:
+ return -ENOSYS;
+ case RES_EQ:
+ return remove_eq_ok((struct res_eq *)res);
+ case RES_COUNTER:
+ return remove_counter_ok((struct res_counter *)res);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ if (!r) {
+ err = -ENOENT;
+ goto out;
+ }
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto out;
+ }
+ err = remove_ok(r, type, extra);
+ if (err)
+ goto out;
+ }
+
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ radix_tree_delete(&tracker->res_tree[type], i);
+ list_del(&r->list);
+ kfree(r);
+ }
+ err = 0;
+
+out:
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+ enum res_qp_states state, struct res_qp **qp,
+ int alloc)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_qp *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_QP_BUSY:
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ __func__, r->com.res_id);
+ err = -EBUSY;
+ break;
+
+ case RES_QP_RESERVED:
+ if (r->com.state == RES_QP_MAPPED && !alloc)
+ break;
+
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ err = -EINVAL;
+ break;
+
+ case RES_QP_MAPPED:
+ if ((r->com.state == RES_QP_RESERVED && alloc) ||
+ r->com.state == RES_QP_HW)
+ break;
+ else {
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ r->com.res_id);
+ err = -EINVAL;
+ }
+
+ break;
+
+ case RES_QP_HW:
+ if (r->com.state != RES_QP_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_QP_BUSY;
+ if (qp)
+ *qp = (struct res_qp *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_mpt_states state, struct res_mpt **mpt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mpt *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_MPT_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_RESERVED:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_MAPPED:
+ if (r->com.state != RES_MPT_RESERVED &&
+ r->com.state != RES_MPT_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_HW:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_MPT_BUSY;
+ if (mpt)
+ *mpt = (struct res_mpt *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_eq_states state, struct res_eq **eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_eq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_EQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_RESERVED:
+ if (r->com.state != RES_EQ_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_HW:
+ if (r->com.state != RES_EQ_RESERVED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_EQ_BUSY;
+ if (eq)
+ *eq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+ enum res_cq_states state, struct res_cq **cq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_cq *r;
+ int err;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_CQ_BUSY:
+ err = -EBUSY;
+ break;
+
+ case RES_CQ_ALLOCATED:
+ if (r->com.state != RES_CQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ break;
+
+ case RES_CQ_HW:
+ if (r->com.state != RES_CQ_ALLOCATED)
+ err = -EINVAL;
+ else
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_cq_states state, struct res_srq **srq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_srq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_SRQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_SRQ_ALLOCATED:
+ if (r->com.state != RES_SRQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ break;
+
+ case RES_SRQ_HW:
+ if (r->com.state != RES_SRQ_ALLOCATED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->to_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+ return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err;
+ int count;
+ int align;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ count = get_param_l(&in_param);
+ align = get_param_h(&in_param);
+ err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err) {
+ __mlx4_qp_release_range(dev, base, count);
+ return err;
+ }
+ set_param_l(out_param, base);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ if (valid_reserved(dev, slave, qpn)) {
+ err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ if (err)
+ return err;
+ }
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+ NULL, 1);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn)) {
+ err = __mlx4_qp_alloc_icm(dev, qpn);
+ if (err) {
+ res_abort_move(dev, slave, RES_QP, qpn);
+ return err;
+ }
+ }
+
+ res_end_move(dev, slave, RES_QP, qpn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ order = get_param_l(&in_param);
+ base = __mlx4_alloc_mtt_range(dev, order);
+ if (base == -1)
+ return -ENOMEM;
+
+ err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (err)
+ __mlx4_free_mtt_range(dev, base, order);
+ else
+ set_param_l(out_param, base);
+
+ return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = __mlx4_mr_reserve(dev);
+ if (index == -1)
+ break;
+ id = index & mpt_mask(dev);
+
+ err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+ if (err) {
+ __mlx4_mr_release(dev, index);
+ break;
+ }
+ set_param_l(out_param, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = __mlx4_mr_alloc_icm(dev, mpt->key);
+ if (err) {
+ res_abort_move(dev, slave, RES_MPT, id);
+ return err;
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ break;
+ }
+ return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err) {
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+ }
+
+ set_param_l(out_param, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err) {
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+ }
+
+ set_param_l(out_param, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct mac_res *res;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->mac = mac;
+ res->port = (u8) port;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_MAC]);
+ return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->mac == mac && res->port == (u8) port) {
+ list_del(&res->list);
+ kfree(res);
+ break;
+ }
+ }
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ list_del(&res->list);
+ __mlx4_unregister_mac(dev, res->port, res->mac);
+ kfree(res);
+ }
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int port;
+ u64 mac;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ port = get_param_l(out_param);
+ mac = in_param;
+
+ err = __mlx4_register_mac(dev, port, mac);
+ if (err >= 0) {
+ set_param_l(out_param, err);
+ err = 0;
+ }
+
+ if (!err) {
+ err = mac_add_to_slave(dev, slave, mac, port);
+ if (err)
+ __mlx4_unregister_mac(dev, port, mac);
+ }
+ return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_CQ:
+ err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err;
+ int count;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ base = get_param_l(&in_param) & 0x7fffff;
+ count = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err)
+ break;
+ __mlx4_qp_release_range(dev, base, count);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+ NULL, 0);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ if (valid_reserved(dev, slave, qpn))
+ err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ base = get_param_l(&in_param);
+ order = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (!err)
+ __mlx4_free_mtt_range(dev, base, order);
+ return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ break;
+ index = mpt->key;
+ put_res(dev, slave, id, RES_MPT);
+
+ err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+ if (err)
+ break;
+ __mlx4_mr_release(dev, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
+ return err;
+
+ __mlx4_mr_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
+ return err;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ cqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err)
+ break;
+
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ srqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err)
+ break;
+
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int port;
+ int err = 0;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ port = get_param_l(out_param);
+ mac_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_mac(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = -EINVAL;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_CQ:
+ err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ break;
+ }
+ return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+ return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+ return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->pd_flags) & 0xffffff;
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+ return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+ int page_shift = (qpc->log_page_size & 0x3f) + 12;
+ int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+ int log_sq_sride = qpc->sq_size_stride & 7;
+ int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+ int log_rq_stride = qpc->rq_size_stride & 7;
+ int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+ int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+ int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+ int sq_size;
+ int rq_size;
+ int total_pages;
+ int total_mem;
+ int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+ sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+ rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+ total_mem = sq_size + rq_size;
+ total_pages =
+ roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+ page_shift);
+
+ return total_pages;
+}
+
+static int qp_get_pdn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->pd) & 0xffffff;
+}
+
+static int pdn2slave(int pdn)
+{
+ return (pdn >> NOT_MASKED_PD_BITS) - 1;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+ int size, struct res_mtt *mtt)
+{
+ int res_start = mtt->com.res_id;
+ int res_size = (1 << mtt->order);
+
+ if (start < res_start || start + size > res_start + res_size)
+ return -EPERM;
+ return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_mpt *mpt;
+ int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+ int phys;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+ if (err)
+ return err;
+
+ phys = mr_phys_mpt(inbox->buf);
+ if (!phys) {
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base,
+ mr_get_mtt_size(inbox->buf), mtt);
+ if (err)
+ goto ex_put;
+
+ mpt->mtt = mtt;
+ }
+
+ if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
+ err = -EPERM;
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ if (!phys) {
+ atomic_inc(&mtt->ref_count);
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_put:
+ if (!phys)
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ return err;
+
+ if (mpt->com.from_state != RES_MPT_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+ put_res(dev, slave, id, RES_MPT);
+ return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_mtt *mtt;
+ struct res_qp *qp;
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+ int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+ int mtt_size = qp_get_mtt_size(qpc);
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ int rcqn = qp_get_rcqn(qpc);
+ int scqn = qp_get_scqn(qpc);
+ u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+ int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+ struct res_srq *srq;
+ int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+ if (err)
+ return err;
+ qp->local_qpn = local_qpn;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ if (pdn2slave(qp_get_pdn(qpc)) != slave) {
+ err = -EPERM;
+ goto ex_put_mtt;
+ }
+
+ err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+ if (err)
+ goto ex_put_mtt;
+
+ if (scqn != rcqn) {
+ err = get_res(dev, slave, scqn, RES_CQ, &scq);
+ if (err)
+ goto ex_put_rcq;
+ } else
+ scq = rcq;
+
+ if (use_srq) {
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ goto ex_put_scq;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_srq;
+ atomic_inc(&mtt->ref_count);
+ qp->mtt = mtt;
+ atomic_inc(&rcq->ref_count);
+ qp->rcq = rcq;
+ atomic_inc(&scq->ref_count);
+ qp->scq = scq;
+
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+
+ if (use_srq) {
+ atomic_inc(&srq->ref_count);
+ put_res(dev, slave, srqn, RES_SRQ);
+ qp->srq = srq;
+ }
+ put_res(dev, slave, rcqn, RES_CQ);
+ put_res(dev, slave, mtt_base, RES_MTT);
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ return 0;
+
+ex_put_srq:
+ if (use_srq)
+ put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+ put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+ put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+ return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+ int log_eq_size = eqc->log_eq_size & 0x1f;
+ int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+ if (log_eq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+ return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+ int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+ int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+ if (log_cq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int eqn = vhcr->in_modifier;
+ int res_id = (slave << 8) | eqn;
+ struct mlx4_eq_context *eqc = inbox->buf;
+ int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+ int mtt_size = eq_get_mtt_size(eqc);
+ struct res_eq *eq;
+ struct res_mtt *mtt;
+
+ err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ if (err)
+ return err;
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+ if (err)
+ goto out_add;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto out_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+
+ atomic_inc(&mtt->ref_count);
+ eq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+ int len, struct res_mtt **res)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mtt *mtt;
+ int err = -EINVAL;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+ com.list) {
+ if (!check_mtt_range(dev, slave, start, len, mtt)) {
+ *res = mtt;
+ mtt->com.from_state = mtt->com.state;
+ mtt->com.state = RES_MTT_BUSY;
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_mtt mtt;
+ __be64 *page_list = inbox->buf;
+ u64 *pg_list = (u64 *)page_list;
+ int i;
+ struct res_mtt *rmtt = NULL;
+ int start = be64_to_cpu(page_list[0]);
+ int npages = vhcr->in_modifier;
+ int err;
+
+ err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+ if (err)
+ return err;
+
+ /* Call the SW implementation of write_mtt:
+ * - Prepare a dummy mtt struct
+ * - Translate inbox contents to simple addresses in host endianess */
+ mtt.offset = 0; /* TBD this is broken but I don't handle it since
+ we don't really use it */
+ mtt.order = 0;
+ mtt.page_shift = 0;
+ for (i = 0; i < npages; ++i)
+ pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+ err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+ ((u64 *)page_list + 2));
+
+ if (rmtt)
+ put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+ return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+ if (err)
+ return err;
+
+ err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+ if (err)
+ goto ex_abort;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ atomic_dec(&eq->mtt->ref_count);
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+ return 0;
+
+ex_put:
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+
+ return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_modifier = 0;
+ int err;
+ int res_id;
+ struct res_eq *req;
+
+ if (!priv->mfunc.master.slave_state)
+ return -EINVAL;
+
+ event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
+
+ /* Create the event only if the slave is registered */
+ if ((event_eq->event_type & (1 << eqe->type)) == 0)
+ return 0;
+
+ mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ res_id = (slave << 8) | event_eq->eqn;
+ err = get_res(dev, slave, res_id, RES_EQ, &req);
+ if (err)
+ goto unlock;
+
+ if (req->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto put;
+ }
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto put;
+ }
+
+ if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+ ++event_eq->token;
+ eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+ }
+
+ memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+ in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+ err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+ MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ put_res(dev, slave, res_id, RES_EQ);
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+
+put:
+ put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = get_res(dev, slave, res_id, RES_EQ, &eq);
+ if (err)
+ return err;
+
+ if (eq->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+ put_res(dev, slave, res_id, RES_EQ);
+ return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+ struct res_cq *cq;
+ struct res_mtt *mtt;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto out_put;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_move;
+ atomic_dec(&cq->mtt->ref_count);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd,
+ struct res_cq *cq)
+{
+ int err;
+ struct res_mtt *orig_mtt;
+ struct res_mtt *mtt;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+ err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+ if (err)
+ return err;
+
+ if (orig_mtt != cq->mtt) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_put;
+
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto ex_put1;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put1;
+ atomic_dec(&orig_mtt->ref_count);
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ return 0;
+
+ex_put1:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+ return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ if (vhcr->op_modifier == 0) {
+ err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+ if (err)
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int srq_get_pdn(struct mlx4_srq_context *srqc)
+{
+ return be32_to_cpu(srqc->pd) & 0xffffff;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+ int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+ int log_rq_stride = srqc->logstride & 7;
+ int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+ if (log_srq_size + log_rq_stride + 4 < page_shift)
+ return 1;
+
+ return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_srq *srq;
+ struct mlx4_srq_context *srqc = inbox->buf;
+ int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+ if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+ return -EINVAL;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+ err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+ mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ if (pdn2slave(srq_get_pdn(srqc)) != slave) {
+ err = -EPERM;
+ goto ex_put_mtt;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_mtt;
+
+ atomic_inc(&mtt->ref_count);
+ srq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+ return 0;
+
+ex_put_mtt:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = get_res(dev, slave, qpn, RES_QP, &qp);
+ if (err)
+ return err;
+ if (qp->com.from_state != RES_QP_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+ update_ud_gid(dev, qpc, (u8)slave);
+
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ atomic_dec(&qp->mtt->ref_count);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ res_end_move(dev, slave, RES_QP, qpn);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+ struct res_qp *rqp, u8 *gid)
+{
+ struct res_gid *res;
+
+ list_for_each_entry(res, &rqp->mcg_list, list) {
+ if (!memcmp(res->gid, gid, 16))
+ return res;
+ }
+ return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot)
+{
+ struct res_gid *res;
+ int err;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ if (find_gid(dev, slave, rqp, gid)) {
+ kfree(res);
+ err = -EEXIST;
+ } else {
+ memcpy(res->gid, gid, 16);
+ res->prot = prot;
+ list_add_tail(&res->list, &rqp->mcg_list);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot)
+{
+ struct res_gid *res;
+ int err;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ res = find_gid(dev, slave, rqp, gid);
+ if (!res || res->prot != prot)
+ err = -EINVAL;
+ else {
+ list_del(&res->list);
+ kfree(res);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+ u8 *gid = inbox->buf;
+ enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+ int err, err1;
+ int qpn;
+ struct res_qp *rqp;
+ int attach = vhcr->op_modifier;
+ int block_loopback = vhcr->in_modifier >> 31;
+ u8 steer_type_mask = 2;
+ enum mlx4_steer_type type = gid[7] & steer_type_mask;
+
+ qpn = vhcr->in_modifier & 0xffffff;
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err)
+ return err;
+
+ qp.qpn = qpn;
+ if (attach) {
+ err = add_mcg_res(dev, slave, rqp, gid, prot);
+ if (err)
+ goto ex_put;
+
+ err = mlx4_qp_attach_common(dev, &qp, gid,
+ block_loopback, prot, type);
+ if (err)
+ goto ex_rem;
+ } else {
+ err = rem_mcg_res(dev, slave, rqp, gid, prot);
+ if (err)
+ goto ex_put;
+ err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+ }
+
+ put_res(dev, slave, qpn, RES_QP);
+ return 0;
+
+ex_rem:
+ /* ignore error return below, already in error */
+ err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
+ex_put:
+ put_res(dev, slave, qpn, RES_QP);
+
+ return err;
+}
+
+enum {
+ BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier & 0xffff;
+
+ err = get_res(dev, slave, index, RES_COUNTER, NULL);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ put_res(dev, slave, index, RES_COUNTER);
+ return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+ struct res_gid *rgid;
+ struct res_gid *tmp;
+ int err;
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+ list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+ qp.qpn = rqp->local_qpn;
+ err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+ MLX4_MC_STEER);
+ list_del(&rgid->list);
+ kfree(rgid);
+ }
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int print)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+ struct res_common *r;
+ struct res_common *tmp;
+ int busy;
+
+ busy = 0;
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(r, tmp, rlist, list) {
+ if (r->owner == slave) {
+ if (!r->removing) {
+ if (r->state == RES_ANY_BUSY) {
+ if (print)
+ mlx4_dbg(dev,
+ "%s id 0x%x is busy\n",
+ ResourceType(type),
+ r->res_id);
+ ++busy;
+ } else {
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ r->removing = 1;
+ }
+ }
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type)
+{
+ unsigned long begin;
+ int busy;
+
+ begin = jiffies;
+ do {
+ busy = _move_all_busy(dev, slave, type, 0);
+ if (time_after(jiffies, begin + 5 * HZ))
+ break;
+ if (busy)
+ cond_resched();
+ } while (busy);
+
+ if (busy)
+ busy = _move_all_busy(dev, slave, type, 1);
+
+ return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *qp_list =
+ &tracker->slave_list[slave].res_list[RES_QP];
+ struct res_qp *qp;
+ struct res_qp *tmp;
+ int state;
+ u64 in_param;
+ int qpn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_QP);
+ if (err)
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+ "for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (qp->com.owner == slave) {
+ qpn = qp->com.res_id;
+ detach_qp(dev, slave, qp);
+ state = qp->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_QP_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_QP],
+ qp->com.res_id);
+ list_del(&qp->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(qp);
+ state = 0;
+ break;
+ case RES_QP_MAPPED:
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+ state = RES_QP_RESERVED;
+ break;
+ case RES_QP_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param,
+ qp->local_qpn, 2,
+ MLX4_CMD_2RST_QP,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_qps: failed"
+ " to move slave %d qpn %d to"
+ " reset\n", slave,
+ qp->local_qpn);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ atomic_dec(&qp->mtt->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ state = RES_QP_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *srq_list =
+ &tracker->slave_list[slave].res_list[RES_SRQ];
+ struct res_srq *srq;
+ struct res_srq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int srqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_SRQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (srq->com.owner == slave) {
+ srqn = srq->com.res_id;
+ state = srq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_SRQ_ALLOCATED:
+ __mlx4_srq_free_icm(dev, srqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_SRQ],
+ srqn);
+ list_del(&srq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(srq);
+ state = 0;
+ break;
+
+ case RES_SRQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, srqn, 1,
+ MLX4_CMD_HW2SW_SRQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_srqs: failed"
+ " to move slave %d srq %d to"
+ " SW ownership\n",
+ slave, srqn);
+
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ state = RES_SRQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *cq_list =
+ &tracker->slave_list[slave].res_list[RES_CQ];
+ struct res_cq *cq;
+ struct res_cq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int cqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_CQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+ cqn = cq->com.res_id;
+ state = cq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_CQ_ALLOCATED:
+ __mlx4_cq_free_icm(dev, cqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_CQ],
+ cqn);
+ list_del(&cq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(cq);
+ state = 0;
+ break;
+
+ case RES_CQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, cqn, 1,
+ MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_cqs: failed"
+ " to move slave %d cq %d to"
+ " SW ownership\n",
+ slave, cqn);
+ atomic_dec(&cq->mtt->ref_count);
+ state = RES_CQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mpt_list =
+ &tracker->slave_list[slave].res_list[RES_MPT];
+ struct res_mpt *mpt;
+ struct res_mpt *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int mptn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MPT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mpt->com.owner == slave) {
+ mptn = mpt->com.res_id;
+ state = mpt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MPT_RESERVED:
+ __mlx4_mr_release(dev, mpt->key);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MPT],
+ mptn);
+ list_del(&mpt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mpt);
+ state = 0;
+ break;
+
+ case RES_MPT_MAPPED:
+ __mlx4_mr_free_icm(dev, mpt->key);
+ state = RES_MPT_RESERVED;
+ break;
+
+ case RES_MPT_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, mptn, 0,
+ MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_mrs: failed"
+ " to move slave %d mpt %d to"
+ " SW ownership\n",
+ slave, mptn);
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+ state = RES_MPT_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *mtt_list =
+ &tracker->slave_list[slave].res_list[RES_MTT];
+ struct res_mtt *mtt;
+ struct res_mtt *tmp;
+ int state;
+ LIST_HEAD(tlist);
+ int base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MTT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mtt->com.owner == slave) {
+ base = mtt->com.res_id;
+ state = mtt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MTT_ALLOCATED:
+ __mlx4_free_mtt_range(dev, base,
+ mtt->order);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MTT],
+ base);
+ list_del(&mtt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mtt);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *eq_list =
+ &tracker->slave_list[slave].res_list[RES_EQ];
+ struct res_eq *eq;
+ struct res_eq *tmp;
+ int err;
+ int state;
+ LIST_HEAD(tlist);
+ int eqn;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ err = move_all_busy(dev, slave, RES_EQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (eq->com.owner == slave) {
+ eqn = eq->com.res_id;
+ state = eq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_EQ_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_EQ],
+ eqn);
+ list_del(&eq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(eq);
+ state = 0;
+ break;
+
+ case RES_EQ_HW:
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ cond_resched();
+ continue;
+ }
+ err = mlx4_cmd_box(dev, slave, 0,
+ eqn & 0xff, 0,
+ MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ mlx4_dbg(dev, "rem_slave_eqs: failed"
+ " to move slave %d eqs %d to"
+ " SW ownership\n", slave, eqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (!err) {
+ atomic_dec(&eq->mtt->ref_count);
+ state = RES_EQ_RESERVED;
+ }
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+ /*VLAN*/
+ rem_slave_macs(dev, slave);
+ rem_slave_qps(dev, slave);
+ rem_slave_srqs(dev, slave);
+ rem_slave_cqs(dev, slave);
+ rem_slave_mrs(dev, slave);
+ rem_slave_eqs(dev, slave);
+ rem_slave_mtts(dev, slave);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index e2337a7411d..80249829352 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
int err = 0;
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
- MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err) {
mlx4_err(dev, "Sense command failed for port: %d\n", port);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 9cbf3fce014..2823fffc638 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -38,26 +40,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_srq_context {
- __be32 state_logsize_srqn;
- u8 logstride;
- u8 reserved1;
- __be16 xrcd;
- __be32 pg_offset_cqn;
- u32 reserved2;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 pd;
- __be16 limit_watermark;
- __be16 wqe_cnt;
- u16 reserved4;
- __be16 wqe_counter;
- u32 reserved5;
- __be64 db_rec_addr;
-};
-
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
- return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
+ MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
{
return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
- struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_srq_context *srq_context;
- u64 mtt_addr;
int err;
- srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
- if (srq->srqn == -1)
+
+ *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+ if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *srqn = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+ mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+ mlx4_table_put(dev, &srq_table->table, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, srqn);
+ if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+ return;
+ }
+ __mlx4_srq_free_icm(dev, srqn);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ u64 mtt_addr;
+ int err;
+
+ err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+ if (err)
+ return err;
spin_lock_irq(&srq_table->lock);
err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@ err_radix:
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+ mlx4_srq_free_icm(dev, srq->srqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
complete(&srq->free);
wait_for_completion(&srq->free);
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+ mlx4_srq_free_icm(dev, srq->srqn);
}
EXPORT_SYMBOL_GPL(mlx4_srq_free);
@@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index d10c2e15f4e..1ea811cf515 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -42,6 +42,8 @@ config KS8851
select NET_CORE
select MII
select CRC32
+ select MISC_DEVICES
+ select EEPROM_93CX6
---help---
SPI driver for Micrel KS8851 SPI attached network chip.
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 4a6ae057e3b..75ec87a822b 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1264,18 +1264,7 @@ static struct platform_driver ks8842_platform_driver = {
.remove = ks8842_remove,
};
-static int __init ks8842_init(void)
-{
- return platform_driver_register(&ks8842_platform_driver);
-}
-
-static void __exit ks8842_exit(void)
-{
- platform_driver_unregister(&ks8842_platform_driver);
-}
-
-module_init(ks8842_init);
-module_exit(ks8842_exit);
+module_platform_driver(ks8842_platform_driver);
MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index f56743a28fc..6b35e7da9a9 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -22,6 +22,7 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
+#include <linux/eeprom_93cx6.h>
#include <linux/spi/spi.h>
@@ -82,6 +83,7 @@ union ks8851_tx_hdr {
* @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -128,6 +130,8 @@ struct ks8851_net {
struct spi_message spi_msg2;
struct spi_transfer spi_xfer1;
struct spi_transfer spi_xfer2[2];
+
+ struct eeprom_93cx6 eeprom;
};
static int msg_enable;
@@ -343,6 +347,26 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
}
/**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+ unsigned pmecr;
+
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+ pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_PM_MASK;
+ pmecr |= pwrmode;
+
+ ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
* ks8851_write_mac_addr - write mac address to device registers
* @dev: The network device
*
@@ -358,8 +382,15 @@ static int ks8851_write_mac_addr(struct net_device *dev)
mutex_lock(&ks->lock);
+ /*
+ * Wake up chip in case it was powered off when stopped; otherwise,
+ * the first write to the MAC address does not take effect.
+ */
+ ks8851_set_powermode(ks, PMECR_PM_NORMAL);
for (i = 0; i < ETH_ALEN; i++)
ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+ if (!netif_running(dev))
+ ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
mutex_unlock(&ks->lock);
@@ -367,21 +398,47 @@ static int ks8851_write_mac_addr(struct net_device *dev)
}
/**
+ * ks8851_read_mac_addr - read mac address from device registers
+ * @dev: The network device
+ *
+ * Update our copy of the KS8851 MAC address from the registers of @dev.
+*/
+static void ks8851_read_mac_addr(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int i;
+
+ mutex_lock(&ks->lock);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+
+ mutex_unlock(&ks->lock);
+}
+
+/**
* ks8851_init_mac - initialise the mac address
* @ks: The device structure
*
* Get or create the initial mac address for the device and then set that
- * into the station address register. Currently we assume that the device
- * does not have a valid mac address in it, and so we use random_ether_addr()
+ * into the station address register. If there is an EEPROM present, then
+ * we try that. If no valid mac address is found we use random_ether_addr()
* to create a new one.
- *
- * In future, the driver should check to see if the device has an EEPROM
- * attached and whether that has a valid ethernet address in it.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
{
struct net_device *dev = ks->netdev;
+ /* first, try reading what we've got already */
+ if (ks->rc_ccr & CCR_EEPROM) {
+ ks8851_read_mac_addr(dev);
+ if (is_valid_ether_addr(dev->dev_addr))
+ return;
+
+ netdev_err(ks->netdev, "invalid mac address read %pM\n",
+ dev->dev_addr);
+ }
+
random_ether_addr(dev->dev_addr);
ks8851_write_mac_addr(dev);
}
@@ -739,26 +796,6 @@ static void ks8851_tx_work(struct work_struct *work)
}
/**
- * ks8851_set_powermode - set power mode of the device
- * @ks: The device state
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
-{
- unsigned pmecr;
-
- netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
- pmecr = ks8851_rdreg16(ks, KS_PMECR);
- pmecr &= ~PMECR_PM_MASK;
- pmecr |= pwrmode;
-
- ks8851_wrreg16(ks, KS_PMECR, pmecr);
-}
-
-/**
* ks8851_net_open - open network device
* @dev: The network device being opened.
*
@@ -1038,234 +1075,6 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* Companion eeprom access */
-
-enum { /* EEPROM programming states */
- EEPROM_CONTROL,
- EEPROM_ADDRESS,
- EEPROM_DATA,
- EEPROM_COMPLETE
-};
-
-/**
- * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @addr: EEPROM address to read
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
- * Warning: The READ feature is not supported on ks8851 revision 0.
- *
- * Rough programming model:
- * - on period start: set clock high and read value on bus
- * - on period / 2: set clock low and program value on bus
- * - start on period / 2
- */
-unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- int eepcr;
- int ctrl = EEPROM_OP_READ;
- int state = EEPROM_CONTROL;
- int bit_count = EEPROM_OP_LEN - 1;
- unsigned int data = 0;
- int dummy;
- unsigned int addr_len;
-
- addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
- /* start transaction: chip select high, authorize write */
- mutex_lock(&ks->lock);
- eepcr = EEPCR_EESA | EEPCR_EESRWA;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr |= EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- while (state != EEPROM_COMPLETE) {
- /* falling clock period starts... */
- /* set EED_IO pin for control and address */
- eepcr &= ~EEPCR_EEDO;
- switch (state) {
- case EEPROM_CONTROL:
- eepcr |= ((ctrl >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- bit_count = addr_len - 1;
- state = EEPROM_ADDRESS;
- }
- break;
- case EEPROM_ADDRESS:
- eepcr |= ((addr >> bit_count) & 1) << 2;
- bit_count--;
- break;
- case EEPROM_DATA:
- /* Change to receive mode */
- eepcr &= ~EEPCR_EESRWA;
- break;
- }
-
- /* lower clock */
- eepcr &= ~EEPCR_EESCK;
-
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* waitread period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
-
- /* rising clock period starts... */
-
- /* raise clock */
- mutex_lock(&ks->lock);
- eepcr |= EEPCR_EESCK;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* Manage read */
- switch (state) {
- case EEPROM_ADDRESS:
- if (bit_count < 0) {
- bit_count = EEPROM_DATA_LEN - 1;
- state = EEPROM_DATA;
- }
- break;
- case EEPROM_DATA:
- mutex_lock(&ks->lock);
- dummy = ks8851_rdreg16(ks, KS_EEPCR);
- mutex_unlock(&ks->lock);
- data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
- if (bit_count-- <= 0)
- state = EEPROM_COMPLETE;
- break;
- }
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
- }
-
- /* close transaction */
- mutex_lock(&ks->lock);
- eepcr &= ~EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr = 0;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- return data;
-}
-
-/**
- * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @op: operand (can be WRITE, EWEN, EWDS)
- * @addr: EEPROM address to write
- * @data: data to write
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
- *
- * Note that a write enable is required before writing data.
- *
- * Rough programming model:
- * - on period start: set clock high
- * - on period / 2: set clock low and program value on bus
- * - start on period / 2
- */
-void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
- unsigned int addr, unsigned int data)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- int eepcr;
- int state = EEPROM_CONTROL;
- int bit_count = EEPROM_OP_LEN - 1;
- unsigned int addr_len;
-
- addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
- switch (op) {
- case EEPROM_OP_EWEN:
- addr = 0x30;
- break;
- case EEPROM_OP_EWDS:
- addr = 0;
- break;
- }
-
- /* start transaction: chip select high, authorize write */
- mutex_lock(&ks->lock);
- eepcr = EEPCR_EESA | EEPCR_EESRWA;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr |= EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- while (state != EEPROM_COMPLETE) {
- /* falling clock period starts... */
- /* set EED_IO pin for control and address */
- eepcr &= ~EEPCR_EEDO;
- switch (state) {
- case EEPROM_CONTROL:
- eepcr |= ((op >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- bit_count = addr_len - 1;
- state = EEPROM_ADDRESS;
- }
- break;
- case EEPROM_ADDRESS:
- eepcr |= ((addr >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- if (op == EEPROM_OP_WRITE) {
- bit_count = EEPROM_DATA_LEN - 1;
- state = EEPROM_DATA;
- } else {
- state = EEPROM_COMPLETE;
- }
- }
- break;
- case EEPROM_DATA:
- eepcr |= ((data >> bit_count) & 1) << 2;
- if (bit_count-- <= 0)
- state = EEPROM_COMPLETE;
- break;
- }
-
- /* lower clock */
- eepcr &= ~EEPCR_EESCK;
-
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
-
- /* rising clock period starts... */
-
- /* raise clock */
- eepcr |= EEPCR_EESCK;
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
- }
-
- /* close transaction */
- mutex_lock(&ks->lock);
- eepcr &= ~EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr = 0;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
-}
-
/* ethtool support */
static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1312,115 +1121,141 @@ static int ks8851_nway_reset(struct net_device *dev)
return mii_nway_restart(&ks->mii);
}
-static int ks8851_get_eeprom_len(struct net_device *dev)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- return ks->eeprom_size;
-}
+/* EEPROM support */
-static int ks8851_get_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee)
{
- struct ks8851_net *ks = netdev_priv(dev);
- u16 *eeprom_buff;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
+ struct ks8851_net *ks = ee->data;
+ unsigned val;
- if (eeprom->len > ks->eeprom_size)
- return -EINVAL;
+ val = ks8851_rdreg16(ks, KS_EEPCR);
- eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+ ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0;
+ ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0;
+ ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0;
+}
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
+{
+ struct ks8851_net *ks = ee->data;
+ unsigned val = EEPCR_EESA; /* default - eeprom access on */
+
+ if (ee->drive_data)
+ val |= EEPCR_EESRWA;
+ if (ee->reg_data_in)
+ val |= EEPCR_EEDO;
+ if (ee->reg_data_clock)
+ val |= EEPCR_EESCK;
+ if (ee->reg_chip_select)
+ val |= EEPCR_EECS;
+
+ ks8851_wrreg16(ks, KS_EEPCR, val);
+}
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
+/**
+ * ks8851_eeprom_claim - claim device EEPROM and activate the interface
+ * @ks: The network device state.
+ *
+ * Check for the presence of an EEPROM, and then activate software access
+ * to the device.
+ */
+static int ks8851_eeprom_claim(struct ks8851_net *ks)
+{
+ if (!(ks->rc_ccr & CCR_EEPROM))
+ return -ENOENT;
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+ mutex_lock(&ks->lock);
- /* Device's eeprom is little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ /* start with clock low, cs high */
+ ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
+ return 0;
+}
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
- kfree(eeprom_buff);
+/**
+ * ks8851_eeprom_release - release the EEPROM interface
+ * @ks: The device state
+ *
+ * Release the software access to the device EEPROM
+ */
+static void ks8851_eeprom_release(struct ks8851_net *ks)
+{
+ unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
- return ret_val;
+ ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
+ mutex_unlock(&ks->lock);
}
+#define KS_EEPROM_MAGIC (0x00008851)
+
static int ks8851_set_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *ee, u8 *data)
{
struct ks8851_net *ks = netdev_priv(dev);
- u16 *eeprom_buff;
- void *ptr;
- int max_len;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EOPNOTSUPP;
-
- if (eeprom->len > ks->eeprom_size)
+ int offset = ee->offset;
+ int len = ee->len;
+ u16 tmp;
+
+ /* currently only support byte writing */
+ if (len != 1)
return -EINVAL;
- if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
- return -EFAULT;
+ if (ee->magic != KS_EEPROM_MAGIC)
+ return -EINVAL;
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- max_len = (last_word - first_word + 1) * 2;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
+ if (ks8851_eeprom_claim(ks))
+ return -ENOENT;
- ptr = (void *)eeprom_buff;
+ eeprom_93cx6_wren(&ks->eeprom, true);
- if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
- eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
- ptr++;
+ /* ethtool currently only supports writing bytes, which means
+ * we have to read/modify/write our 16bit EEPROMs */
+
+ eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp);
+
+ if (offset & 1) {
+ tmp &= 0xff;
+ tmp |= *data << 8;
+ } else {
+ tmp &= 0xff00;
+ tmp |= *data;
}
- if ((eeprom->offset + eeprom->len) & 1)
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
- eeprom_buff[last_word - first_word] =
- ks8851_eeprom_read(dev, last_word);
+ eeprom_93cx6_write(&ks->eeprom, offset/2, tmp);
+ eeprom_93cx6_wren(&ks->eeprom, false);
- /* Device's eeprom is little-endian, word addressable */
- le16_to_cpus(&eeprom_buff[0]);
- le16_to_cpus(&eeprom_buff[last_word - first_word]);
+ ks8851_eeprom_release(ks);
- memcpy(ptr, bytes, eeprom->len);
+ return 0;
+}
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int offset = ee->offset;
+ int len = ee->len;
- ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+ /* must be 2 byte aligned */
+ if (len & 1 || offset & 1)
+ return -EINVAL;
- for (i = 0; i < last_word - first_word + 1; i++) {
- ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
- eeprom_buff[i]);
- mdelay(EEPROM_WRITE_TIME);
- }
+ if (ks8851_eeprom_claim(ks))
+ return -ENOENT;
+
+ ee->magic = KS_EEPROM_MAGIC;
- ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+ eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
+ ks8851_eeprom_release(ks);
- kfree(eeprom_buff);
- return ret_val;
+ return 0;
+}
+
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+
+ /* currently, we assume it is an 93C46 attached, so return 128 */
+ return ks->rc_ccr & CCR_EEPROM ? 128 : 0;
}
static const struct ethtool_ops ks8851_ethtool_ops = {
@@ -1613,6 +1448,13 @@ static int __devinit ks8851_probe(struct spi_device *spi)
spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
+ /* setup EEPROM state */
+
+ ks->eeprom.data = ks;
+ ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
+ ks->eeprom.register_read = ks8851_eeprom_regread;
+ ks->eeprom.register_write = ks8851_eeprom_regwrite;
+
/* setup mii state */
ks->mii.dev = ndev;
ks->mii.phy_id = 1,
@@ -1674,9 +1516,10 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_netdev;
}
- netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq);
+ ndev->dev_addr, ndev->irq,
+ ks->rc_ccr & CCR_EEPROM ? "has" : "no");
return 0;
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 537fb06e593..b0fae86aaca 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -16,7 +16,7 @@
#define CCR_32PIN (1 << 0)
/* MAC address registers */
-#define KS_MAR(_m) 0x15 - (_m)
+#define KS_MAR(_m) (0x15 - (_m))
#define KS_MARL 0x10
#define KS_MARM 0x12
#define KS_MARH 0x14
@@ -27,22 +27,11 @@
#define KS_EEPCR 0x22
#define EEPCR_EESRWA (1 << 5)
#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB_OFFSET 3
-#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
+#define EEPCR_EESB (1 << 3)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
-#define EEPROM_OP_LEN 3 /* bits:*/
-#define EEPROM_OP_READ 0x06
-#define EEPROM_OP_EWEN 0x04
-#define EEPROM_OP_WRITE 0x05
-#define EEPROM_OP_EWDS 0x14
-
-#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
-#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
-#define EEPROM_SK_PERIOD 400 /* in us */
-
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index d19c849059d..e58e78e5c93 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1500,8 +1500,7 @@ static int ks_hw_init(struct ks_net *ks)
ks->all_mcast = 0;
ks->mcast_lst_size = 0;
- ks->frame_head_info = (struct type_frame_head *) \
- kmalloc(MHEADER_SIZE, GFP_KERNEL);
+ ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
if (!ks->frame_head_info) {
pr_err("Error: Fail to allocate frame memory\n");
return false;
@@ -1659,18 +1658,7 @@ static struct platform_driver ks8851_platform_driver = {
.remove = __devexit_p(ks8851_remove),
};
-static int __init ks8851_init(void)
-{
- return platform_driver_register(&ks8851_platform_driver);
-}
-
-static void __exit ks8851_exit(void)
-{
- platform_driver_unregister(&ks8851_platform_driver);
-}
-
-module_init(ks8851_init);
-module_exit(ks8851_exit);
+module_platform_driver(ks8851_platform_driver);
MODULE_DESCRIPTION("KS8851 MLL Network driver");
MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 7ece990381c..a718865a8fe 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -743,8 +743,7 @@
/* Change default LED mode. */
#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
-#define MAC_ADDR_LEN 6
-#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
#define MAX_ETHERNET_BODY_SIZE 1500
#define ETHERNET_HEADER_SIZE 14
@@ -1043,7 +1042,7 @@ enum {
* @valid: Valid setting indicating the entry is being used.
*/
struct ksz_mac_table {
- u8 mac_addr[MAC_ADDR_LEN];
+ u8 mac_addr[ETH_ALEN];
u16 vid;
u8 fid;
u8 ports;
@@ -1187,8 +1186,8 @@ struct ksz_switch {
u8 diffserv[DIFFSERV_ENTRIES];
u8 p_802_1p[PRIO_802_1P_ENTRIES];
- u8 br_addr[MAC_ADDR_LEN];
- u8 other_addr[MAC_ADDR_LEN];
+ u8 br_addr[ETH_ALEN];
+ u8 other_addr[ETH_ALEN];
u8 broad_per;
u8 member;
@@ -1292,14 +1291,14 @@ struct ksz_hw {
int tx_int_mask;
int tx_size;
- u8 perm_addr[MAC_ADDR_LEN];
- u8 override_addr[MAC_ADDR_LEN];
- u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 override_addr[ETH_ALEN];
+ u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
u8 addr_list_size;
u8 mac_override;
u8 promiscuous;
u8 all_multi;
- u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
u8 multi_bits[HW_MULTICAST_SIZE];
u8 multi_list_size;
@@ -3654,7 +3653,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
static const u8 mask[] = { 0x3F };
static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
- hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+ hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
}
/**
@@ -3689,7 +3688,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
- hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+ hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
}
/**
@@ -4055,7 +4054,7 @@ static void hw_set_addr(struct ksz_hw *hw)
{
int i;
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
hw->io + KS884X_ADDR_0_OFFSET + i);
@@ -4072,17 +4071,16 @@ static void hw_read_addr(struct ksz_hw *hw)
{
int i;
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
KS884X_ADDR_0_OFFSET + i);
if (!hw->mac_override) {
- memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
if (empty_addr(hw->override_addr)) {
- memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
- MAC_ADDR_LEN);
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
- MAC_ADDR_LEN);
+ ETH_ALEN);
hw->override_addr[5] += hw->id;
hw_set_addr(hw);
}
@@ -4130,16 +4128,16 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
int j = ADDITIONAL_ENTRIES;
- if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
}
if (j < ADDITIONAL_ENTRIES) {
- memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ memcpy(hw->address[j], mac_addr, ETH_ALEN);
hw_ena_add_addr(hw, j, hw->address[j]);
return 0;
}
@@ -4151,8 +4149,8 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
- memset(hw->address[i], 0, MAC_ADDR_LEN);
+ if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+ memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
return 0;
@@ -4382,12 +4380,10 @@ static void ksz_update_timer(struct ksz_timer_info *info)
*/
static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
{
- desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
- GFP_KERNEL);
+ desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
if (!desc_info->ring)
return 1;
- memset((void *) desc_info->ring, 0,
- sizeof(struct ksz_desc) * desc_info->alloc);
hw_init_desc(desc_info, transmit);
return 0;
}
@@ -5676,7 +5672,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
hw_del_addr(hw, dev->dev_addr);
else {
hw->mac_override = 1;
- memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
@@ -5786,7 +5782,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
if (i >= MAX_MULTICAST_LIST)
break;
- memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
+ memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
@@ -6093,9 +6089,10 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(hw_priv->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+ sizeof(info->bus_info));
}
/**
@@ -6587,7 +6584,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
*
* Return 0 if successful; otherwise an error code.
*/
-static int netdev_set_features(struct net_device *dev, u32 features)
+static int netdev_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
@@ -6860,7 +6858,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
int num;
i = j = num = got_num = 0;
- while (j < MAC_ADDR_LEN) {
+ while (j < ETH_ALEN) {
if (macaddr[i]) {
int digit;
@@ -6891,7 +6889,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
}
i++;
}
- if (MAC_ADDR_LEN == j) {
+ if (ETH_ALEN == j) {
if (MAIN_PORT == port)
hw_priv->hw.mac_override = 1;
}
@@ -7058,7 +7056,7 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
/* Multiple device interfaces mode requires a second MAC address. */
if (hw->dev_count > 1) {
- memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
read_other_addr(hw);
if (mac1addr[0] != ':')
get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
@@ -7108,12 +7106,11 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
dev->irq = pdev->irq;
if (MAIN_PORT == i)
memcpy(dev->dev_addr, hw_priv->hw.override_addr,
- MAC_ADDR_LEN);
+ ETH_ALEN);
else {
- memcpy(dev->dev_addr, sw->other_addr,
- MAC_ADDR_LEN);
+ memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
if (!memcmp(sw->other_addr, hw->override_addr,
- MAC_ADDR_LEN))
+ ETH_ALEN))
dev->dev_addr[5] += port->first_port;
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 0778edcf7b9..20b72ecb020 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1491,7 +1491,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
* access to avoid theoretical race condition with functions that
* change NETIF_F_LRO flag at runtime.
*/
- bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+ bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
@@ -3149,7 +3149,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t myri10ge_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
index 11be150e4d6..b7fc26c4f73 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
@@ -356,7 +356,7 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_GET_DCA_OFFSET = 56,
/* offset of dca control for WDMAs */
- /* VMWare NetQueue commands */
+ /* VMware NetQueue commands */
MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
MXGEFW_CMD_NETQ_ADD_FILTER = 58,
/* data0 = filter_id << 16 | queue << 8 | type */
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index fc7c6a932ad..5b89fd377ae 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -294,15 +294,4 @@ static struct platform_driver jazz_sonic_driver = {
},
};
-static int __init jazz_sonic_init_module(void)
-{
- return platform_driver_register(&jazz_sonic_driver);
-}
-
-static void __exit jazz_sonic_cleanup_module(void)
-{
- platform_driver_unregister(&jazz_sonic_driver);
-}
-
-module_init(jazz_sonic_init_module);
-module_exit(jazz_sonic_cleanup_module);
+module_platform_driver(jazz_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a2eacbfb425..70367d76fc8 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -643,15 +643,4 @@ static struct platform_driver mac_sonic_driver = {
},
};
-static int __init mac_sonic_init_module(void)
-{
- return platform_driver_register(&mac_sonic_driver);
-}
-
-static void __exit mac_sonic_cleanup_module(void)
-{
- platform_driver_unregister(&mac_sonic_driver);
-}
-
-module_init(mac_sonic_init_module);
-module_exit(mac_sonic_cleanup_module);
+module_platform_driver(mac_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 6ca047aab79..ac7b16b6e7a 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2555,9 +2555,9 @@ static void set_rx_mode(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
- strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2b8f64ddfb5..c24b46cbfe2 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1364,9 +1364,9 @@ static int ns83820_set_settings(struct net_device *ndev,
static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
struct ns83820 *dev = PRIV(ndev);
- strcpy(info->driver, "ns83820");
- strcpy(info->version, VERSION);
- strcpy(info->bus_info, pci_name(dev->pci_dev));
+ strlcpy(info->driver, "ns83820", sizeof(info->driver));
+ strlcpy(info->version, VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
}
static u32 ns83820_get_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index ccf61b9da8d..e01c0a07a93 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -319,15 +319,4 @@ static struct platform_driver xtsonic_driver = {
},
};
-static int __init xtsonic_init(void)
-{
- return platform_driver_register(&xtsonic_driver);
-}
-
-static void __exit xtsonic_cleanup(void)
-{
- platform_driver_unregister(&xtsonic_driver);
-}
-
-module_init(xtsonic_init);
-module_exit(xtsonic_cleanup);
+module_platform_driver(xtsonic_driver);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c27fb3dda9f..97f63e12d86 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5391,10 +5391,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
{
struct s2io_nic *sp = netdev_priv(dev);
- strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strncpy(info->version, s2io_driver_version, sizeof(info->version));
- strncpy(info->fw_version, "", sizeof(info->fw_version));
- strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+ strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strlcpy(info->version, s2io_driver_version, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
info->regdump_len = XENA_REG_SPACE;
info->eedump_len = XENA_EEPROM_SPACE;
}
@@ -6616,10 +6615,10 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
}
}
-static int s2io_set_features(struct net_device *dev, u32 features)
+static int s2io_set_features(struct net_device *dev, netdev_features_t features)
{
struct s2io_nic *sp = netdev_priv(dev);
- u32 changed = (features ^ dev->features) & NETIF_F_LRO;
+ netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
if (changed && netif_running(dev)) {
int rc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index a83197d757c..ef76725454d 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2662,9 +2662,10 @@ static void vxge_poll_vp_lockup(unsigned long data)
mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
}
-static u32 vxge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vxge_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
/* Enabling RTH requires some of the logic in vxge_device_register and a
* vpath reset. Due to these restrictions, only allow modification
@@ -2676,10 +2677,10 @@ static u32 vxge_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int vxge_set_features(struct net_device *dev, u32 features)
+static int vxge_set_features(struct net_device *dev, netdev_features_t features)
{
struct vxgedev *vdev = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (!(changed & NETIF_F_RXHASH))
return 0;
@@ -3304,7 +3305,7 @@ static void vxge_tx_watchdog(struct net_device *dev)
*
* Add the vlan id to the devices vlan id table
*/
-static void
+static int
vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3319,6 +3320,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
vxge_hw_vpath_vid_add(vpath->handle, vid);
}
set_bit(vid, vdev->active_vlans);
+ return 0;
}
/**
@@ -3328,7 +3330,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
*
* Remove the vlan id from the device's vlan id table
*/
-static void
+static int
vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3347,6 +3349,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
clear_bit(vid, vdev->active_vlans);
+ return 0;
}
static const struct net_device_ops vxge_netdev_ops = {
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index f1bfb8f8fcf..b75a0497d58 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -1103,18 +1103,7 @@ static struct platform_driver w90p910_ether_driver = {
},
};
-static int __init w90p910_ether_init(void)
-{
- return platform_driver_register(&w90p910_ether_driver);
-}
-
-static void __exit w90p910_ether_exit(void)
-{
- platform_driver_unregister(&w90p910_ether_driver);
-}
-
-module_init(w90p910_ether_init);
-module_exit(w90p910_ether_exit);
+module_platform_driver(w90p910_ether_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 MAC driver!");
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1dca57013cb..4c4e7f45838 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -65,7 +65,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/prefetch.h>
-#include <linux/io.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/system.h>
@@ -609,7 +610,7 @@ struct nv_ethtool_str {
};
static const struct nv_ethtool_str nv_estats_str[] = {
- { "tx_bytes" },
+ { "tx_bytes" }, /* includes Ethernet FCS CRC */
{ "tx_zero_rexmt" },
{ "tx_one_rexmt" },
{ "tx_many_rexmt" },
@@ -637,7 +638,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
/* version 2 stats */
{ "tx_deferral" },
{ "tx_packets" },
- { "rx_bytes" },
+ { "rx_bytes" }, /* includes Ethernet FCS CRC */
{ "tx_pause" },
{ "rx_pause" },
{ "rx_drop_frame" },
@@ -649,7 +650,7 @@ static const struct nv_ethtool_str nv_estats_str[] = {
};
struct nv_ethtool_stats {
- u64 tx_bytes;
+ u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
u64 tx_zero_rexmt;
u64 tx_one_rexmt;
u64 tx_many_rexmt;
@@ -670,14 +671,14 @@ struct nv_ethtool_stats {
u64 rx_unicast;
u64 rx_multicast;
u64 rx_broadcast;
- u64 rx_packets;
+ u64 rx_packets; /* should be ifconfig->rx_packets */
u64 rx_errors_total;
u64 tx_errors_total;
/* version 2 stats */
u64 tx_deferral;
- u64 tx_packets;
- u64 rx_bytes;
+ u64 tx_packets; /* should be ifconfig->tx_packets */
+ u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */
u64 tx_pause;
u64 rx_pause;
u64 rx_drop_frame;
@@ -736,6 +737,16 @@ struct nv_skb_map {
* - tx setup is lockless: it relies on netif_tx_lock. Actual submission
* needs netdev_priv(dev)->lock :-(
* - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ *
+ * Hardware stats updates are protected by hwstats_lock:
+ * - updated by nv_do_stats_poll (timer). This is meant to avoid
+ * integer wraparound in the NIC stats registers, at low frequency
+ * (0.1 Hz)
+ * - updated by nv_get_ethtool_stats + nv_get_stats64
+ *
+ * Software stats are accessed only through 64b synchronization points
+ * and are not subject to other synchronization techniques (single
+ * update thread on the TX or RX paths).
*/
/* in dev: base, irq */
@@ -745,9 +756,10 @@ struct fe_priv {
struct net_device *dev;
struct napi_struct napi;
- /* General data:
- * Locking: spin_lock(&np->lock); */
+ /* hardware stats are updated in syscall and timer */
+ spinlock_t hwstats_lock;
struct nv_ethtool_stats estats;
+
int in_shutdown;
u32 linkspeed;
int duplex;
@@ -798,6 +810,13 @@ struct fe_priv {
u32 nic_poll_irq;
int rx_ring_size;
+ /* RX software stats */
+ struct u64_stats_sync swstats_rx_syncp;
+ u64 stat_rx_packets;
+ u64 stat_rx_bytes; /* not always available in HW */
+ u64 stat_rx_missed_errors;
+ u64 stat_rx_dropped;
+
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
@@ -820,6 +839,12 @@ struct fe_priv {
struct nv_skb_map *tx_end_flip;
int tx_stop;
+ /* TX software stats */
+ struct u64_stats_sync swstats_tx_syncp;
+ u64 stat_tx_packets; /* not always available in HW */
+ u64 stat_tx_bytes;
+ u64 stat_tx_dropped;
+
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
@@ -892,6 +917,11 @@ enum {
static int dma_64bit = NV_DMA_64BIT_ENABLED;
/*
+ * Debug output control for tx_timeout
+ */
+static bool debug_tx_timeout = false;
+
+/*
* Crossover Detection
* Realtek 8201 phy + some OEM boards do not work properly.
*/
@@ -1630,11 +1660,19 @@ static void nv_mac_reset(struct net_device *dev)
pci_push(base);
}
-static void nv_get_hw_stats(struct net_device *dev)
+/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
+static void nv_update_stats(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ /* If it happens that this is run in top-half context, then
+ * replace the spin_lock of hwstats_lock with
+ * spin_lock_irqsave() in calling functions. */
+ WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
+ assert_spin_locked(&np->hwstats_lock);
+
+ /* query hardware */
np->estats.tx_bytes += readl(base + NvRegTxCnt);
np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
@@ -1693,33 +1731,73 @@ static void nv_get_hw_stats(struct net_device *dev)
}
/*
- * nv_get_stats: dev->get_stats function
+ * nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
* Called with read_lock(&dev_base_lock) held for read -
* only synchronized against unregister_netdevice.
*/
-static struct net_device_stats *nv_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64*
+nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
+ unsigned int syncp_start;
+
+ /*
+ * Note: because HW stats are not always available and for
+ * consistency reasons, the following ifconfig stats are
+ * managed by software: rx_bytes, tx_bytes, rx_packets and
+ * tx_packets. The related hardware stats reported by ethtool
+ * should be equivalent to these ifconfig stats, with 4
+ * additional bytes per packet (Ethernet FCS CRC), except for
+ * tx_packets when TSO kicks in.
+ */
+
+ /* software stats */
+ do {
+ syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+ storage->rx_packets = np->stat_rx_packets;
+ storage->rx_bytes = np->stat_rx_bytes;
+ storage->rx_dropped = np->stat_rx_dropped;
+ storage->rx_missed_errors = np->stat_rx_missed_errors;
+ } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+
+ do {
+ syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+ storage->tx_packets = np->stat_tx_packets;
+ storage->tx_bytes = np->stat_tx_bytes;
+ storage->tx_dropped = np->stat_tx_dropped;
+ } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
/* If the nic supports hw counters then retrieve latest values */
- if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
- nv_get_hw_stats(dev);
-
- /* copy to net_device stats */
- dev->stats.tx_packets = np->estats.tx_packets;
- dev->stats.rx_bytes = np->estats.rx_bytes;
- dev->stats.tx_bytes = np->estats.tx_bytes;
- dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
- dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
- dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
- dev->stats.rx_over_errors = np->estats.rx_over_errors;
- dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
- dev->stats.rx_errors = np->estats.rx_errors_total;
- dev->stats.tx_errors = np->estats.tx_errors_total;
- }
-
- return &dev->stats;
+ if (np->driver_data & DEV_HAS_STATISTICS_V123) {
+ spin_lock_bh(&np->hwstats_lock);
+
+ nv_update_stats(dev);
+
+ /* generic stats */
+ storage->rx_errors = np->estats.rx_errors_total;
+ storage->tx_errors = np->estats.tx_errors_total;
+
+ /* meaningful only when NIC supports stats v3 */
+ storage->multicast = np->estats.rx_multicast;
+
+ /* detailed rx_errors */
+ storage->rx_length_errors = np->estats.rx_length_error;
+ storage->rx_over_errors = np->estats.rx_over_errors;
+ storage->rx_crc_errors = np->estats.rx_crc_errors;
+ storage->rx_frame_errors = np->estats.rx_frame_align_error;
+ storage->rx_fifo_errors = np->estats.rx_drop_frame;
+
+ /* detailed tx_errors */
+ storage->tx_carrier_errors = np->estats.tx_carrier_errors;
+ storage->tx_fifo_errors = np->estats.tx_fifo_errors;
+
+ spin_unlock_bh(&np->hwstats_lock);
+ }
+
+ return storage;
}
/*
@@ -1752,8 +1830,12 @@ static int nv_alloc_rx(struct net_device *dev)
np->put_rx.orig = np->first_rx.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else
+ } else {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_dropped++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
+ }
}
return 0;
}
@@ -1784,8 +1866,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
np->put_rx.ex = np->first_rx.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else
+ } else {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_dropped++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
+ }
}
return 0;
}
@@ -1842,6 +1928,7 @@ static void nv_init_tx(struct net_device *dev)
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+ netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
@@ -1920,8 +2007,11 @@ static void nv_drain_tx(struct net_device *dev)
np->tx_ring.ex[i].bufhigh = 0;
np->tx_ring.ex[i].buflow = 0;
}
- if (nv_release_txskb(np, &np->tx_skb[i]))
- dev->stats.tx_dropped++;
+ if (nv_release_txskb(np, &np->tx_skb[i])) {
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ }
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
np->tx_skb[i].dma_single = 0;
@@ -2187,6 +2277,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+ netdev_sent_queue(np->dev, skb->len);
+
np->put_tx.orig = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
@@ -2331,6 +2424,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+ netdev_sent_queue(np->dev, skb->len);
+
np->put_tx.ex = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
@@ -2368,6 +2464,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
u32 flags;
int tx_work = 0;
struct ring_desc *orig_get_tx = np->get_tx.orig;
+ unsigned int bytes_compl = 0;
while ((np->get_tx.orig != np->put_tx.orig) &&
!((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2378,9 +2475,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
if (flags & NV_TX_ERROR) {
- if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+ if ((flags & NV_TX_RETRYERROR)
+ && !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
+ } else {
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2388,9 +2492,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
} else {
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+ if ((flags & NV_TX2_RETRYERROR)
+ && !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
+ } else {
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2401,6 +2512,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx;
}
+
+ netdev_completed_queue(np->dev, tx_work, bytes_compl);
+
if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
@@ -2414,6 +2528,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
u32 flags;
int tx_work = 0;
struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
+ unsigned long bytes_cleaned = 0;
while ((np->get_tx.ex != np->put_tx.ex) &&
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2423,14 +2538,21 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+ if ((flags & NV_TX2_RETRYERROR)
+ && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
nv_gear_backoff_reseed(dev);
else
nv_legacybackoff_reseed(dev);
}
+ } else {
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_cleaned += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2438,11 +2560,15 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (np->tx_limit)
nv_tx_flip_ownership(dev);
}
+
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->first_tx.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx;
}
+
+ netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
+
if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
@@ -2461,56 +2587,64 @@ static void nv_tx_timeout(struct net_device *dev)
u32 status;
union ring_type put_tx;
int saved_tx_limit;
- int i;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
else
status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
- netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+ netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
- netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
- netdev_info(dev, "Dumping tx registers\n");
- for (i = 0; i <= np->register_size; i += 32) {
- netdev_info(dev,
- "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- i,
- readl(base + i + 0), readl(base + i + 4),
- readl(base + i + 8), readl(base + i + 12),
- readl(base + i + 16), readl(base + i + 20),
- readl(base + i + 24), readl(base + i + 28));
- }
- netdev_info(dev, "Dumping tx ring\n");
- for (i = 0; i < np->tx_ring_size; i += 4) {
- if (!nv_optimized(np)) {
- netdev_info(dev,
- "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
- le32_to_cpu(np->tx_ring.orig[i].buf),
- le32_to_cpu(np->tx_ring.orig[i].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+1].buf),
- le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+2].buf),
- le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+3].buf),
- le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
- } else {
+ if (unlikely(debug_tx_timeout)) {
+ int i;
+
+ netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+ netdev_info(dev, "Dumping tx registers\n");
+ for (i = 0; i <= np->register_size; i += 32) {
netdev_info(dev,
- "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+ "%3x: %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
i,
- le32_to_cpu(np->tx_ring.ex[i].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i].buflow),
- le32_to_cpu(np->tx_ring.ex[i].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+1].buflow),
- le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+2].buflow),
- le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+3].buflow),
- le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ readl(base + i + 0), readl(base + i + 4),
+ readl(base + i + 8), readl(base + i + 12),
+ readl(base + i + 16), readl(base + i + 20),
+ readl(base + i + 24), readl(base + i + 28));
+ }
+ netdev_info(dev, "Dumping tx ring\n");
+ for (i = 0; i < np->tx_ring_size; i += 4) {
+ if (!nv_optimized(np)) {
+ netdev_info(dev,
+ "%03x: %08x %08x // %08x %08x "
+ "// %08x %08x // %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+ } else {
+ netdev_info(dev,
+ "%03x: %08x %08x %08x "
+ "// %08x %08x %08x "
+ "// %08x %08x %08x "
+ "// %08x %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ }
}
}
@@ -2633,8 +2767,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* the rest are hard errors */
else {
- if (flags & NV_RX_MISSEDFRAME)
- dev->stats.rx_missed_errors++;
+ if (flags & NV_RX_MISSEDFRAME) {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_missed_errors++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
+ }
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2677,7 +2814,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
- dev->stats.rx_packets++;
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_packets++;
+ np->stat_rx_bytes += len;
+ u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
np->get_rx.orig = np->first_rx.orig;
@@ -2760,7 +2900,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
__vlan_hwaccel_put_tag(skb, vid);
}
napi_gro_receive(&np->napi, skb);
- dev->stats.rx_packets++;
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_packets++;
+ np->stat_rx_bytes += len;
+ u64_stats_update_end(&np->swstats_rx_syncp);
} else {
dev_kfree_skb(skb);
}
@@ -3003,6 +3146,73 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
}
}
+static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 phyreg, txreg;
+ int mii_status;
+
+ np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
+ np->duplex = duplex;
+
+ /* see if gigabit phy */
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ phyreg = readl(base + NvRegSlotTime);
+ phyreg &= ~(0x3FF00);
+ if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+ phyreg |= NVREG_SLOTTIME_1000_FULL;
+ writel(phyreg, base + NvRegSlotTime);
+ }
+
+ phyreg = readl(base + NvRegPhyInterface);
+ phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+ if (np->duplex == 0)
+ phyreg |= PHY_HALF;
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+ phyreg |= PHY_100;
+ else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ phyreg |= PHY_1000;
+ writel(phyreg, base + NvRegPhyInterface);
+
+ if (phyreg & PHY_RGMII) {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+ else
+ txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+ } else {
+ txreg = NVREG_TX_DEFERRAL_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxDeferral);
+
+ if (np->desc_ver == DESC_VER_1) {
+ txreg = NVREG_TX_WM_DESC1_DEFAULT;
+ } else {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_WM_DESC2_3_1000;
+ else
+ txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxWatermark);
+
+ writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
+ base + NvRegMisc1);
+ pci_push(base);
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ pci_push(base);
+
+ return;
+}
+
/**
* nv_update_linkspeed: Setup the MAC according to the link partner
* @dev: Network device to be configured
@@ -3024,11 +3234,25 @@ static int nv_update_linkspeed(struct net_device *dev)
int newls = np->linkspeed;
int newdup = np->duplex;
int mii_status;
+ u32 bmcr;
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
u32 txrxFlags = 0;
u32 phy_exp;
+ /* If device loopback is enabled, set carrier on and enable max link
+ * speed.
+ */
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (bmcr & BMCR_LOOPBACK) {
+ if (netif_running(dev)) {
+ nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ }
+ return 1;
+ }
+
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
*/
@@ -3711,6 +3935,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
}
+ netdev_info(dev, "MSI-X enabled\n");
}
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
@@ -3732,6 +3957,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIMap1);
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ netdev_info(dev, "MSI enabled\n");
}
}
if (ret != 0) {
@@ -3886,11 +4112,18 @@ static void nv_poll_controller(struct net_device *dev)
#endif
static void nv_do_stats_poll(unsigned long data)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
- nv_get_hw_stats(dev);
+ /* If lock is currently taken, the stats are being refreshed
+ * and hence fresh enough */
+ if (spin_trylock(&np->hwstats_lock)) {
+ nv_update_stats(dev);
+ spin_unlock(&np->hwstats_lock);
+ }
if (!np->in_shutdown)
mod_timer(&np->stats_poll,
@@ -3900,9 +4133,9 @@ static void nv_do_stats_poll(unsigned long data)
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, FORCEDETH_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -4455,7 +4688,63 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
return 0;
}
-static u32 nv_fix_features(struct net_device *dev, u32 features)
+static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ unsigned long flags;
+ u32 miicontrol;
+ int err, retval = 0;
+
+ spin_lock_irqsave(&np->lock, flags);
+ miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (features & NETIF_F_LOOPBACK) {
+ if (miicontrol & BMCR_LOOPBACK) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Loopback already enabled\n");
+ return 0;
+ }
+ nv_disable_irq(dev);
+ /* Turn on loopback mode */
+ miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+ err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
+ if (err) {
+ retval = PHY_ERROR;
+ spin_unlock_irqrestore(&np->lock, flags);
+ phy_init(dev);
+ } else {
+ if (netif_running(dev)) {
+ /* Force 1000 Mbps full-duplex */
+ nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
+ 1);
+ /* Force link up */
+ netif_carrier_on(dev);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev,
+ "Internal PHY loopback mode enabled.\n");
+ }
+ } else {
+ if (!(miicontrol & BMCR_LOOPBACK)) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Loopback already disabled\n");
+ return 0;
+ }
+ nv_disable_irq(dev);
+ /* Turn off loopback */
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Internal PHY loopback mode disabled.\n");
+ phy_init(dev);
+ }
+ msleep(500);
+ spin_lock_irqsave(&np->lock, flags);
+ nv_enable_irq(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return retval;
+}
+
+static netdev_features_t nv_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/* vlan is dependent on rx checksum offload */
if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
@@ -4464,7 +4753,7 @@ static u32 nv_fix_features(struct net_device *dev, u32 features)
return features;
}
-static void nv_vlan_mode(struct net_device *dev, u32 features)
+static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = get_nvpriv(dev);
@@ -4485,11 +4774,18 @@ static void nv_vlan_mode(struct net_device *dev, u32 features)
spin_unlock_irq(&np->lock);
}
-static int nv_set_features(struct net_device *dev, u32 features)
+static int nv_set_features(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
+ int retval;
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
+ retval = nv_set_loopback(dev, features);
+ if (retval != 0)
+ return retval;
+ }
if (changed & NETIF_F_RXCSUM) {
spin_lock_irq(&np->lock);
@@ -4535,14 +4831,18 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
}
}
-static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+static void nv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *buffer)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
- /* update stats */
- nv_get_hw_stats(dev);
-
- memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+ spin_lock_bh(&np->hwstats_lock);
+ nv_update_stats(dev);
+ memcpy(buffer, &np->estats,
+ nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+ spin_unlock_bh(&np->hwstats_lock);
}
static int nv_link_test(struct net_device *dev)
@@ -5124,6 +5424,12 @@ static int nv_open(struct net_device *dev)
spin_unlock_irq(&np->lock);
+ /* If the loopback feature was set while the device was down, make sure
+ * that it's set correctly now.
+ */
+ if (dev->features & NETIF_F_LOOPBACK)
+ nv_set_loopback(dev, dev->features);
+
return 0;
out_drain:
nv_drain_rxtx(dev);
@@ -5180,7 +5486,7 @@ static int nv_close(struct net_device *dev)
static const struct net_device_ops nv_netdev_ops = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
- .ndo_get_stats = nv_get_stats,
+ .ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
@@ -5197,7 +5503,7 @@ static const struct net_device_ops nv_netdev_ops = {
static const struct net_device_ops nv_netdev_ops_optimized = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
- .ndo_get_stats = nv_get_stats,
+ .ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit_optimized,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
@@ -5236,6 +5542,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->dev = dev;
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
+ spin_lock_init(&np->hwstats_lock);
SET_NETDEV_DEV(dev, &pci_dev->dev);
init_timer(&np->oom_kick);
@@ -5244,7 +5551,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
np->nic_poll.function = nv_do_nic_poll; /* timer handler */
- init_timer(&np->stats_poll);
+ init_timer_deferrable(&np->stats_poll);
np->stats_poll.data = (unsigned long) dev;
np->stats_poll.function = nv_do_stats_poll; /* timer handler */
@@ -5328,6 +5635,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->features |= dev->hw_features;
+ /* Add loopback capability to the device. */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5603,12 +5913,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
- dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+ dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "",
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
"vlan " : "",
+ dev->features & (NETIF_F_LOOPBACK) ?
+ "loopback " : "",
id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
@@ -5982,6 +6294,9 @@ module_param(phy_cross, int, 0);
MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
module_param(phy_power_down, int, 0);
MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
+module_param(debug_tx_timeout, bool, 0);
+MODULE_PARM_DESC(debug_tx_timeout,
+ "Dump tx related registers and ring when tx_timeout happens");
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 8c8027176be..ac4e72d529e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -161,10 +161,10 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- strcpy(drvinfo->driver, KBUILD_MODNAME);
- strcpy(drvinfo->version, pch_driver_version);
- strcpy(drvinfo->fw_version, "N/A");
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 48406ca382f..964e9c0948b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2109,10 +2109,11 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
* Returns
* 0: HW state updated successfully
*/
-static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+static int pch_gbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (!(changed & NETIF_F_RXCSUM))
return 0;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 9c075ea2682..9cb5f912e48 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -18,8 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <linux/module.h> /* for __MODULE_STRING */
#include "pch_gbe.h"
+#include <linux/module.h> /* for __MODULE_STRING */
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
index 05db5434baf..90497ffb1ac 100644
--- a/drivers/net/ethernet/pasemi/Makefile
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -2,4 +2,5 @@
# Makefile for the A Semi network device drivers.
#
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index e09ea83b8c4..8a371985319 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -83,14 +83,18 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
u32 fw_minor = 0;
u32 fw_build = 0;
- strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
- strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+ strlcpy(drvinfo->driver, netxen_nic_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
- sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8cf3173ba48..7dd9a4b107e 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -544,7 +544,8 @@ static void netxen_set_multicast_list(struct net_device *dev)
adapter->set_multi(dev);
}
-static u32 netxen_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM)) {
netdev_info(dev, "disabling LRO as RXCSUM is off\n");
@@ -555,7 +556,8 @@ static u32 netxen_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int netxen_set_features(struct net_device *dev, u32 features)
+static int netxen_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int hw_lro;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index a4bdff438a5..7931531c3a4 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1735,10 +1735,11 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
- strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
- strncpy(drvinfo->version, ql3xxx_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ql3xxx_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7ed53dbb864..60976fc4ccc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1466,8 +1466,9 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
-int qlcnic_set_features(struct net_device *netdev, u32 features);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 8aa1c6e8667..cc228cf3d84 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -140,11 +140,14 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
- sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
-
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
- strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
}
static int
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index bcb81e47543..b528e52a8ee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -817,12 +817,13 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
}
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
}
@@ -833,10 +834,10 @@ u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
}
-int qlcnic_set_features(struct net_device *netdev, u32 features)
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
if (!(changed & NETIF_F_LRO))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bd163828e3..69b8e4ef14d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-static void qlcnic_vlan_rx_add(struct net_device *, u16);
-static void qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, u16);
/* PCI Device ID Table */
#define ENTRY(device) \
@@ -735,20 +735,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
adapter->pvid = 0;
}
-static void
+static int
qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
set_bit(vid, adapter->vlans);
+ return 0;
}
-static void
+static int
qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
clear_bit(vid, adapter->vlans);
+ return 0;
}
static void
@@ -792,7 +794,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- unsigned long features, vlan_features;
+ netdev_features_t features, vlan_features;
features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8731f79c9ef..b8478aab050 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -58,10 +58,8 @@
#define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
#else /* all other page sizes */
#define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
struct ob_mac_iocb_req *queue_entry;
u32 index;
struct oal oal;
- struct map_list map[MAX_SKB_FRAGS + 1];
+ struct map_list map[MAX_SKB_FRAGS + 2];
int map_cnt;
struct tx_ring_desc *next;
};
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 9b67bfea035..8e2c2a74f3a 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -366,13 +366,16 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- strncpy(drvinfo->driver, qlge_driver_name, 32);
- strncpy(drvinfo->version, qlge_driver_version, 32);
- snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+ strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, qlge_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "v%d.%d.%d",
(qdev->fw_rev_id & 0x00ff0000) >> 16,
(qdev->fw_rev_id & 0x0000ff00) >> 8,
(qdev->fw_rev_id & 0x000000ff));
- strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index c92afcd912e..b5489873728 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2307,7 +2307,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
return work_done;
}
-static void qlge_vlan_mode(struct net_device *ndev, u32 features)
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
@@ -2323,7 +2323,8 @@ static void qlge_vlan_mode(struct net_device *ndev, u32 features)
}
}
-static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -2337,9 +2338,10 @@ static u32 qlge_fix_features(struct net_device *ndev, u32 features)
return features;
}
-static int qlge_set_features(struct net_device *ndev, u32 features)
+static int qlge_set_features(struct net_device *ndev,
+ netdev_features_t features)
{
- u32 changed = ndev->features ^ features;
+ netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
qlge_vlan_mode(ndev, features);
@@ -2347,56 +2349,66 @@ static int qlge_set_features(struct net_device *ndev, u32 features)
return 0;
}
-static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
+ int err;
- if (ql_set_mac_addr_reg
- (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
- }
+ return err;
}
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
+ int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- return;
+ return status;
- __qlge_vlan_rx_add_vid(qdev, vid);
+ err = __qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
}
-static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = 0;
+ int err;
- if (ql_set_mac_addr_reg
- (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
- }
+ return err;
}
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
+ int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- return;
+ return status;
- __qlge_vlan_rx_kill_vid(qdev, vid);
+ err = __qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 1fc01ca72b4..4bf68cfef39 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -940,7 +940,7 @@ static void r6040_multicast_list(struct net_device *dev)
iowrite16(lp->mcr0, ioaddr + MCR0);
/* Fill the MAC hash tables with their values */
- if (lp->mcr0 && MCR0_HASH_EN) {
+ if (lp->mcr0 & MCR0_HASH_EN) {
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
iowrite16(hash_table[2], ioaddr + MAR2);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index ee5da9293ce..cc6b391479c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -859,7 +859,6 @@ static void __cp_set_rx_mode (struct net_device *dev)
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
int rx_mode;
- u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
@@ -886,11 +885,9 @@ static void __cp_set_rx_mode (struct net_device *dev)
}
/* We can safely update without stopping the chip. */
- tmp = cp_rx_config | rx_mode;
- if (cp->rx_config != tmp) {
- cpw32_f (RxConfig, tmp);
- cp->rx_config = tmp;
- }
+ cp->rx_config = cp_rx_config | rx_mode;
+ cpw32_f(RxConfig, cp->rx_config);
+
cpw32_f (MAR0 + 0, mc_filter[0]);
cpw32_f (MAR0 + 4, mc_filter[1]);
}
@@ -1319,9 +1316,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
{
struct cp_private *cp = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(cp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static void cp_get_ringparam(struct net_device *dev,
@@ -1392,7 +1389,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
cp->msg_enable = value;
}
-static int cp_set_features(struct net_device *dev, u32 features)
+static int cp_set_features(struct net_device *dev, netdev_features_t features)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@@ -1589,7 +1586,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p)
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
-#define eeprom_delay() readl(ee_addr)
+#define eeprom_delay() readb(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_EXTEND_CMD (4)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 4d6b254fc6c..a8779bedb3d 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1122,7 +1122,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
-#define eeprom_delay() (void)RTL_R32(Cfg9346)
+#define eeprom_delay() (void)RTL_R8(Cfg9346)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5)
@@ -2330,9 +2330,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct rtl8139_private *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
info->regdump_len = tp->regs_len;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 92b45f08858..7a0c800b50a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -69,9 +69,6 @@
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
-/* MAC address length */
-#define MAC_ADDR_LEN 6
-
#define MAX_READ_REQUEST_SHIFT 12
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
@@ -477,7 +474,6 @@ enum rtl_register_content {
/* Config1 register p.24 */
LEDS1 = (1 << 7),
LEDS0 = (1 << 6),
- MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
Speed_down = (1 << 4),
MEMMAP = (1 << 3),
IOMAP = (1 << 2),
@@ -485,6 +481,7 @@ enum rtl_register_content {
PMEnable = (1 << 0), /* Power Management Enable */
/* Config2 register p. 25 */
+ MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
PCI_Clock_66MHz = 0x01,
PCI_Clock_33MHz = 0x00,
@@ -1183,11 +1180,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
- RTL_W16(IntrMask, 0x0000);
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrStatus, 0xffff);
+ RTL_W16(IntrMask, 0x0000);
+ RTL_W16(IntrStatus, tp->intr_event);
+ RTL_R8(ChipCmd);
}
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -1292,7 +1291,7 @@ static void __rtl8169_check_link_status(struct net_device *dev,
netif_carrier_off(dev);
netif_info(tp, ifdown, dev, "link down\n");
if (pm)
- pm_schedule_suspend(&tp->pci_dev->dev, 100);
+ pm_schedule_suspend(&tp->pci_dev->dev, 5000);
}
spin_unlock_irqrestore(&tp->lock, flags);
}
@@ -1404,12 +1403,13 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strcpy(info->driver, MODULENAME);
- strcpy(info->version, RTL8169_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+ strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
- strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
- rtl_fw->version);
+ if (!IS_ERR_OR_NULL(rtl_fw))
+ strlcpy(info->fw_version, rtl_fw->version,
+ sizeof(info->fw_version));
}
static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1553,7 +1553,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret;
}
-static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t rtl8169_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1567,7 +1568,8 @@ static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int rtl8169_set_features(struct net_device *dev, u32 features)
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
@@ -3424,22 +3426,24 @@ static const struct rtl_cfg_info {
};
/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
const struct rtl_cfg_info *cfg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
unsigned msi = 0;
u8 cfg2;
cfg2 = RTL_R8(Config2) & ~MSIEnable;
if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(pdev)) {
- dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+ if (pci_enable_msi(tp->pci_dev)) {
+ netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
} else {
cfg2 |= MSIEnable;
msi = RTL_FEATURE_MSI;
}
}
- RTL_W8(Config2, cfg2);
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+ RTL_W8(Config2, cfg2);
return msi;
}
@@ -3933,8 +3937,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
break;
udelay(100);
}
-
- rtl8169_init_ring_indexes(tp);
}
static int __devinit
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+ tp->features |= rtl_try_msi(tp, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
if (rtl_tbi_enabled(tp)) {
@@ -4099,7 +4101,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&tp->lock);
/* Get MAC address */
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
@@ -4339,7 +4341,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
/* Disable interrupts */
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
rtl_rx_close(tp);
@@ -4885,8 +4887,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
@@ -5076,6 +5077,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+ tp->intr_event &= ~RxFIFOOver;
+ tp->napi_event &= ~RxFIFOOver;
+ }
+
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) {
int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5348,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
/* Wait for any pending NAPI task to complete */
napi_disable(&tp->napi);
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
tp->intr_mask = 0xffff;
RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5395,16 @@ static void rtl8169_reset_task(struct work_struct *work)
if (!netif_running(dev))
goto out_unlock;
+ rtl8169_hw_reset(tp);
+
rtl8169_wait_for_quiescence(dev);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
- rtl8169_hw_reset(tp);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5415,6 @@ out_unlock:
static void rtl8169_tx_timeout(struct net_device *dev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_hw_reset(tp);
-
- /* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -5804,6 +5807,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
*/
status = RTL_R16(IntrStatus);
while (status && status != 0xffff) {
+ status &= tp->intr_event;
+ if (!status)
+ break;
+
handled = 1;
/* Handle all of the error cases first. These will reset
@@ -5818,27 +5825,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
switch (tp->mac_version) {
/* Work around for rx fifo overflow */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_26:
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
goto done;
- /* Testers needed. */
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- /* Experimental science. Pktgen proof. */
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_25:
- if (status == RxFIFOOver)
- goto done;
- break;
default:
break;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 9b230740c6a..ebfb682dfe5 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1957,18 +1957,7 @@ static struct platform_driver sh_eth_driver = {
},
};
-static int __init sh_eth_init(void)
-{
- return platform_driver_register(&sh_eth_driver);
-}
-
-static void __exit sh_eth_cleanup(void)
-{
- platform_driver_unregister(&sh_eth_driver);
-}
-
-module_init(sh_eth_init);
-module_exit(sh_eth_cleanup);
+module_platform_driver(sh_eth_driver);
MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c3673f151a4..f955a19eb22 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -834,23 +834,7 @@ static struct platform_driver sgiseeq_driver = {
}
};
-static int __init sgiseeq_module_init(void)
-{
- if (platform_driver_register(&sgiseeq_driver)) {
- printk(KERN_ERR "Driver registration failed\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit sgiseeq_module_exit(void)
-{
- platform_driver_unregister(&sgiseeq_driver);
-}
-
-module_init(sgiseeq_module_init);
-module_exit(sgiseeq_module_exit);
+module_platform_driver(sgiseeq_driver);
MODULE_DESCRIPTION("SGI Seeq 8003 driver");
MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d5731f1fe6d..e43702f33b6 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1336,7 +1336,8 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- efx->rx_indir_table[i] = i % efx->n_rx_channels;
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->n_rx_channels);
efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1900,7 +1901,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
/* Otherwise efx_start_port() will do this */
}
-static int efx_set_features(struct net_device *net_dev, u32 data)
+static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2235,9 +2236,9 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
.driver_data = (unsigned long) &siena_a0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long) &siena_a0_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 4764793ed23..8a5336d86a1 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,10 +14,6 @@
#include "net_driver.h"
#include "filter.h"
-/* PCI IDs */
-#define BETHPAGE_A_P_DEVID 0x0803
-#define SIENA_A_P_DEVID 0x0813
-
/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f3cd96dfa39..1be51b2bfa4 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -956,40 +956,28 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
return rc < 0 ? rc : 0;
}
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
- struct ethtool_rxfh_indir *indir)
+static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- size_t copy_size =
- min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return -EOPNOTSUPP;
+ return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ?
+ 0 : ARRAY_SIZE(efx->rx_indir_table));
+}
+
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
- indir->size = ARRAY_SIZE(efx->rx_indir_table);
- memcpy(indir->ring_index, efx->rx_indir_table,
- copy_size * sizeof(indir->ring_index[0]));
+ memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
return 0;
}
static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
- const struct ethtool_rxfh_indir *indir)
+ const u32 *indir)
{
struct efx_nic *efx = netdev_priv(net_dev);
- size_t i;
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return -EOPNOTSUPP;
-
- /* Validate size and indices */
- if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- if (indir->ring_index[i] >= efx->n_rx_channels)
- return -EINVAL;
- memcpy(efx->rx_indir_table, indir->ring_index,
- sizeof(efx->rx_indir_table));
+ memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
efx_nic_push_rx_indir_table(efx);
return 0;
}
@@ -1020,6 +1008,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rx_ntuple = efx_ethtool_set_rx_ntuple,
+ .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 97b606b92e8..8ae1ebd3539 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -610,7 +610,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
if (!nic_data->stats_pending)
return;
- nic_data->stats_pending = 0;
+ nic_data->stats_pending = false;
if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
rmb(); /* read the done flag before the stats */
efx->mac_op->update_stats(efx);
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index b6304486f24..bc9dcd6b30d 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -496,7 +496,7 @@ static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
- part->mcdi.updating = 1;
+ part->mcdi.updating = true;
}
/* The MCDI interface can in fact do multiple erase blocks at once;
@@ -528,7 +528,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
- part->mcdi.updating = 1;
+ part->mcdi.updating = true;
}
while (offset < end) {
@@ -553,7 +553,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
int rc = 0;
if (part->mcdi.updating) {
- part->mcdi.updating = 0;
+ part->mcdi.updating = false;
rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b8e251a1ee4..c49502bab6a 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -908,7 +908,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels;
unsigned int tx_dc_base;
unsigned int rx_dc_base;
- u32 offload_features;
+ netdev_features_t offload_features;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 752d521c09b..aca34986176 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -479,11 +479,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
- skb_frag_set_page(skb, 0, page);
- skb_shinfo(skb)->frags[0].page_offset =
- efx_rx_buf_offset(efx, rx_buf);
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
- skb_shinfo(skb)->nr_frags = 1;
+ skb_fill_page_desc(skb, 0, page,
+ efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
skb->len = rx_buf->len;
skb->data_len = rx_buf->len;
@@ -669,7 +666,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->ptr_mask);
/* Allocate RX buffers */
- rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 822f6c2a6a7..52edd24fcde 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -503,8 +503,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
- state->skbs = kzalloc(sizeof(state->skbs[0]) *
- state->packet_count, GFP_KERNEL);
+ state->skbs = kcalloc(state->packet_count,
+ sizeof(state->skbs[0]), GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index cc2549cb707..4d5d619feaa 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -232,7 +232,7 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
- bool already_attached = 0;
+ bool already_attached = false;
efx_oword_t reg;
int rc;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index df88c5430f9..72f0fbc73b1 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -31,7 +31,9 @@
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@ -48,6 +50,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += buffer->skb->len;
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
@@ -250,6 +254,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
buffer->skb = skb;
buffer->continuation = false;
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
@@ -267,10 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
unwind:
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
--tx_queue->insert_count;
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->len = 0;
}
@@ -293,7 +300,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index)
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -311,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
buffer->continuation = true;
buffer->len = 0;
@@ -422,10 +431,12 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
- efx_dequeue_buffers(tx_queue, index);
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of the
@@ -468,7 +479,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
- tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
@@ -515,13 +526,15 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count;
}
+ netdev_tx_reset_queue(tx_queue->core_txq);
}
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1160,6 +1173,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
goto mem_err;
}
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 60135aa5580..53efe7c7b1c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -28,6 +28,7 @@
#include <linux/tcp.h> /* struct tcphdr */
#include <linux/skbuff.h>
#include <linux/mii.h> /* MII definitions */
+#include <linux/crc32.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
@@ -58,12 +59,19 @@ static int timeout = TX_TIMEOUT;
module_param(timeout, int, 0);
/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
+ */
+#define METH_MCF_LIMIT 32
+
+/*
* This structure is private to each device. It is used to pass
* packets in and out, so there is place for a packet
*/
struct meth_private {
/* in-memory copy of MAC Control register */
- unsigned long mac_ctrl;
+ u64 mac_ctrl;
+
/* in-memory copy of DMA Control register */
unsigned long dma_ctrl;
/* address of PHY, used by mdio_* functions, initialized in mdio_probe */
@@ -79,6 +87,9 @@ struct meth_private {
struct sk_buff *rx_skbs[RX_RING_ENTRIES];
unsigned long rx_write;
+ /* Multicast filter. */
+ u64 mcast_filter;
+
spinlock_t meth_lock;
};
@@ -765,6 +776,40 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
+static void meth_set_rx_mode(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ priv->mac_ctrl &= ~METH_PROMISC;
+
+ if (dev->flags & IFF_PROMISC) {
+ priv->mac_ctrl |= METH_PROMISC;
+ priv->mcast_filter = 0xffffffffffffffffUL;
+ } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ priv->mac_ctrl |= METH_ACCEPT_AMCAST;
+ priv->mcast_filter = 0xffffffffffffffffUL;
+ } else {
+ struct netdev_hw_addr *ha;
+ priv->mac_ctrl |= METH_ACCEPT_MCAST;
+
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26),
+ (volatile unsigned long *)&priv->mcast_filter);
+ }
+
+ /* Write the changes to the chip registers. */
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+ mace->eth.mcast_filter = priv->mcast_filter;
+
+ /* Done! */
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+ netif_wake_queue(dev);
+}
+
static const struct net_device_ops meth_netdev_ops = {
.ndo_open = meth_open,
.ndo_stop = meth_release,
@@ -774,6 +819,7 @@ static const struct net_device_ops meth_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = meth_set_rx_mode,
};
/*
@@ -830,24 +876,7 @@ static struct platform_driver meth_driver = {
}
};
-static int __init meth_init_module(void)
-{
- int err;
-
- err = platform_driver_register(&meth_driver);
- if (err)
- printk(KERN_ERR "Driver registration failed\n");
-
- return err;
-}
-
-static void __exit meth_exit_module(void)
-{
- platform_driver_unregister(&meth_driver);
-}
-
-module_init(meth_init_module);
-module_exit(meth_exit_module);
+module_platform_driver(meth_driver);
MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 1b4658c9939..5b118cd5bf9 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -47,8 +47,6 @@
#define sis190_rx_skb netif_rx
#define sis190_rx_quota(count, quota) count
-#define MAC_ADDR_LEN 6
-
#define NUM_TX_DESC 64 /* [8..1024] */
#define NUM_RX_DESC 64 /* [8..8192] */
#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
@@ -1601,7 +1599,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
}
/* Get MAC address from EEPROM */
- for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+ for (i = 0; i < ETH_ALEN / 2; i++) {
u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
@@ -1653,7 +1651,7 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
udelay(50);
pci_read_config_byte(isa_bridge, 0x48, &reg);
- for (i = 0; i < MAC_ADDR_LEN; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
outb(0x9 + i, 0x78);
dev->dev_addr[i] = inb(0x79);
}
@@ -1692,7 +1690,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
*/
SIS_W16(RxMacControl, ctl & ~0x0f00);
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
SIS_W16(RxMacControl, ctl);
@@ -1760,9 +1758,10 @@ static void sis190_get_drvinfo(struct net_device *dev,
{
struct sis190_private *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev),
+ sizeof(info->bus_info));
}
static int sis190_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index a184abc5ef1..c8efc708c79 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1991,9 +1991,10 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
- strcpy (info->driver, SIS900_MODULE_NAME);
- strcpy (info->version, SIS900_DRV_VERSION);
- strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+ strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+ sizeof(info->bus_info));
}
static u32 sis900_get_msglevel(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 0a5dfb81415..2c077ce0b6d 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1414,9 +1414,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct epic_private *np = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8f61fe9db1d..313ba3b32ab 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2196,15 +2196,4 @@ static struct platform_driver smc911x_driver = {
},
};
-static int __init smc911x_init(void)
-{
- return platform_driver_register(&smc911x_driver);
-}
-
-static void __exit smc911x_cleanup(void)
-{
- platform_driver_unregister(&smc911x_driver);
-}
-
-module_init(smc911x_init);
-module_exit(smc911x_cleanup);
+module_platform_driver(smc911x_driver);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index cbfa9818713..ada927aba7a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index f47f81e2532..64ad3ed7449 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2417,15 +2417,4 @@ static struct platform_driver smc_driver = {
},
};
-static int __init smc_init(void)
-{
- return platform_driver_register(&smc_driver);
-}
-
-static void __exit smc_cleanup(void)
-{
- platform_driver_unregister(&smc_driver);
-}
-
-module_init(smc_init);
-module_exit(smc_cleanup);
+module_platform_driver(smc_driver);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index d2be42aafbe..06d0df61bee 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -44,6 +44,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/bug.h>
@@ -88,6 +89,8 @@ struct smsc911x_ops {
unsigned int *buf, unsigned int wordcount);
};
+#define SMSC911X_NUM_SUPPLIES 2
+
struct smsc911x_data {
void __iomem *ioaddr;
@@ -138,6 +141,9 @@ struct smsc911x_data {
/* register access functions */
const struct smsc911x_ops *ops;
+
+ /* regulators */
+ struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
};
/* Easy access to information */
@@ -362,6 +368,76 @@ out:
spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
+/*
+ * enable resources, currently just regulators.
+ */
+static int smsc911x_enable_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ if (ret)
+ netdev_err(ndev, "failed to enable regulators %d\n",
+ ret);
+ return ret;
+}
+
+/*
+ * disable resources, currently just regulators.
+ */
+static int smsc911x_disable_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ return ret;
+}
+
+/*
+ * Request resources, currently just regulators.
+ *
+ * The SMSC911x has two power pins: vddvario and vdd33a, in designs where
+ * these are not always-on we need to request regulators to be turned on
+ * before we can try to access the device registers.
+ */
+static int smsc911x_request_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ /* Request regulators */
+ pdata->supplies[0].supply = "vdd33a";
+ pdata->supplies[1].supply = "vddvario";
+ ret = regulator_bulk_get(&pdev->dev,
+ ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ if (ret)
+ netdev_err(ndev, "couldn't get regulators %d\n",
+ ret);
+ return ret;
+}
+
+/*
+ * Free resources, currently just regulators.
+ *
+ */
+static void smsc911x_free_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+
+ /* Free regulators */
+ regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+}
+
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
* and smsc911x_mac_write, so assumes mac_lock is held */
static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -1937,6 +2013,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int byte_test;
+ unsigned int to = 100;
SMSC_TRACE(pdata, probe, "Driver Parameters:");
SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
@@ -1952,6 +2029,17 @@ static int __devinit smsc911x_init(struct net_device *dev)
return -ENODEV;
}
+ /*
+ * poll the READY bit in PMT_CTRL. Any other access to the device is
+ * forbidden while this bit isn't set. Try for 100ms
+ */
+ while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
+ udelay(1000);
+ if (to == 0) {
+ pr_err("Device not READY in 100ms aborting\n");
+ return -ENODEV;
+ }
+
/* Check byte ordering */
byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
@@ -2080,6 +2168,9 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
iounmap(pdata->ioaddr);
+ (void)smsc911x_disable_resources(pdev);
+ smsc911x_free_resources(pdev);
+
free_netdev(dev);
return 0;
@@ -2206,10 +2297,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
pdata->dev = dev;
pdata->msg_enable = ((1 << debug) - 1);
+ platform_set_drvdata(pdev, dev);
+
+ retval = smsc911x_request_resources(pdev);
+ if (retval)
+ goto out_return_resources;
+
+ retval = smsc911x_enable_resources(pdev);
+ if (retval)
+ goto out_disable_resources;
+
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
retval = -ENOMEM;
- goto out_free_netdev_2;
+ goto out_disable_resources;
}
retval = smsc911x_probe_config_dt(&pdata->config, np);
@@ -2221,7 +2322,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe, "Error smsc911x config not found");
- goto out_unmap_io_3;
+ goto out_disable_resources;
}
/* assume standard, non-shifted, access to HW registers */
@@ -2232,7 +2333,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_init(dev);
if (retval < 0)
- goto out_unmap_io_3;
+ goto out_disable_resources;
/* configure irq polarity and type before connecting isr */
if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
@@ -2252,15 +2353,13 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe,
"Unable to claim requested irq: %d", dev->irq);
- goto out_unmap_io_3;
+ goto out_free_irq;
}
- platform_set_drvdata(pdev, dev);
-
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_unset_drvdata_4;
+ goto out_free_irq;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
@@ -2309,12 +2408,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
out_unregister_netdev_5:
unregister_netdev(dev);
-out_unset_drvdata_4:
- platform_set_drvdata(pdev, NULL);
+out_free_irq:
free_irq(dev->irq, dev);
-out_unmap_io_3:
+out_disable_resources:
+ (void)smsc911x_disable_resources(pdev);
+out_return_resources:
+ smsc911x_free_resources(pdev);
+ platform_set_drvdata(pdev, NULL);
iounmap(pdata->ioaddr);
-out_free_netdev_2:
free_netdev(dev);
out_release_io_1:
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index edb24b0e337..a9efbdfe530 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -279,9 +279,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
{
struct smsc9420_pdata *pd = netdev_priv(netdev);
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->bus_info, pci_name(pd->pdev));
- strcpy(drvinfo->version, DRV_VERSION);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 22745d7bf53..036428348fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -12,11 +12,36 @@ config STMMAC_ETH
if STMMAC_ETH
+config STMMAC_PLATFORM
+ tristate "STMMAC platform bus support"
+ depends on STMMAC_ETH
+ default y
+ ---help---
+ This selects the platform specific bus support for
+ the stmmac device driver. This is the driver used
+ on many embedded STM platforms based on ARM and SuperH
+ processors.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config STMMAC_PCI
+ tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+ depends on STMMAC_ETH && PCI && EXPERIMENTAL
+ ---help---
+ This is to select the Synopsys DWMAC available on PCI devices,
+ if you have a controller with this interface, say Y or M here.
+
+ This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
+ D1215994A VIRTEX FPGA board.
+
+ If unsure, say N.
+
config STMMAC_DEBUG_FS
bool "Enable monitoring via sysFS "
default n
depends on STMMAC_ETH && DEBUG_FS
- -- help
+ ---help---
The stmmac entry in /sys reports DMA TX/RX rings
or (if supported) the HW cap register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index d7c45164ea7..bc965ac9e02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
+stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
+stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 2cc11929582..d0b814ef067 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,7 +22,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/init.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
@@ -315,5 +319,8 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
+
+extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da66ac511c4..4d5402a1d26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -39,10 +39,11 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- limit = 15000;
+ limit = 10;
while (limit--) {
if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
break;
+ mdelay(10);
}
if (limit < 0)
return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 627f656b0f3..bc17fd08b55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -41,10 +41,11 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- limit = 15000;
+ limit = 10;
while (limit--) {
if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
break;
+ mdelay(10);
}
if (limit < 0)
return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e25093510b0..f20aa12931d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -238,6 +238,19 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
writel(data, ioaddr + low);
}
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+ if (enable)
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+ else
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 9bafa6cf9e8..120740020e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_2011"
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+#define DRV_MODULE_VERSION "Dec_2011"
#include <linux/stmmac.h>
#include <linux/phy.h>
#include "common.h"
@@ -72,7 +73,6 @@ struct stmmac_priv {
spinlock_t lock;
spinlock_t tx_lock;
int wolopts;
- int wolenabled;
int wol_irq;
#ifdef CONFIG_STMMAC_TIMER
struct stmmac_timer *tm;
@@ -80,10 +80,21 @@ struct stmmac_priv {
struct plat_stmmacenet_data *plat;
struct stmmac_counters mmc;
struct dma_features dma_cap;
+ int hw_cap_support;
};
+extern int phyaddr;
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
+
+int stmmac_freeze(struct net_device *ndev);
+int stmmac_restore(struct net_device *ndev);
+int stmmac_resume(struct net_device *ndev);
+int stmmac_suspend(struct net_device *ndev);
+int stmmac_dvr_remove(struct net_device *ndev);
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e8eff09bbbd..ed83c4c47b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -185,9 +185,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_gmac)
- strcpy(info->driver, GMAC_ETHTOOL_NAME);
+ strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else
- strcpy(info->driver, MAC100_ETHTOOL_NAME);
+ strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+ sizeof(info->driver));
strcpy(info->version, DRV_MODULE_VERSION);
info->fw_version[0] = '\0';
@@ -430,6 +431,12 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct stmmac_priv *priv = netdev_priv(dev);
u32 support = WAKE_MAGIC | WAKE_UCAST;
+ /* By default almost all GMAC devices support the WoL via
+ * magic frame but we can disable it if the HW capability
+ * register shows no support for pmt_magic_frame. */
+ if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
+ wol->wolopts &= ~WAKE_MAGIC;
+
if (!device_can_wakeup(priv->device))
return -EINVAL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 20546bbbb8d..3738b470054 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -28,12 +28,8 @@
https://bugzilla.stlinux.com/
*******************************************************************************/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
@@ -52,8 +48,6 @@
#endif
#include "stmmac.h"
-#define STMMAC_RESOURCE_NAME "stmmaceth"
-
#undef STMMAC_DEBUG
/*#define STMMAC_DEBUG*/
#ifdef STMMAC_DEBUG
@@ -93,7 +87,7 @@ static int debug = -1; /* -1: default, 0: no output, 16: all */
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
-static int phyaddr = -1;
+int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
@@ -141,6 +135,11 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(void);
+#endif
+
/**
* stmmac_verify_args - verify the driver parameters.
* Description: it verifies if some wrong parameter is passed to the driver.
@@ -321,12 +320,10 @@ static int stmmac_init_phy(struct net_device *dev)
}
/* Stop Advertising 1000BASE Capability if interface is not GMII */
- if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) ||
- (interface == PHY_INTERFACE_MODE_RMII))) {
- phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause);
- phydev->advertising = phydev->supported;
- }
+ if ((interface == PHY_INTERFACE_MODE_MII) ||
+ (interface == PHY_INTERFACE_MODE_RMII))
+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
/*
* Broken HW is sometimes missing the pull-up resistor on the
@@ -347,22 +344,6 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static inline void stmmac_enable_mac(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
-
- value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
-static inline void stmmac_disable_mac(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
-
- value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
/**
* display_ring
* @p: pointer to the ring.
@@ -783,10 +764,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- /* Do not manage MMC IRQ (FIXME) */
+ /* Mask MMC irq, counters are managed in SW and registers
+ * are cleared on each READ eventually. */
dwmac_mmc_intr_all_mask(priv->ioaddr);
- dwmac_mmc_ctrl(priv->ioaddr, mode);
- memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ pr_info(" No MAC Management Counters available");
}
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -807,8 +793,29 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
return 0;
}
-/* New GMAC chips support a new register to indicate the
- * presence of the optional feature/functions.
+/**
+ * stmmac_selec_desc_mode
+ * @dev : device pointer
+ * Description: select the Enhanced/Alternate or Normal descriptors */
+static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
+{
+ if (priv->plat->enh_desc) {
+ pr_info(" Enhanced/Alternate descriptors\n");
+ priv->hw->desc = &enh_desc_ops;
+ } else {
+ pr_info(" Normal descriptors\n");
+ priv->hw->desc = &ndesc_ops;
+ }
+}
+
+/**
+ * stmmac_get_hw_features
+ * @priv : private device pointer
+ * Description:
+ * new GMAC chip generations have a new register to indicate the
+ * presence of the optional feature/functions.
+ * This can be also used to override the value passed through the
+ * platform and necessary for old MAC10/100 and GMAC chips.
*/
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
@@ -829,7 +836,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
(hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
priv->dma_cap.pmt_magic_frame =
(hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
- /*MMC*/
+ /* MMC */
priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
/* IEEE 1588-2002*/
priv->dma_cap.time_stamp =
@@ -857,13 +864,59 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
priv->dma_cap.enh_desc =
(hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
- } else
- pr_debug("\tNo HW DMA feature register supported");
+ }
return hw_cap;
}
/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: this is to attach the GMAC or MAC 10/100
+ * main core structures that will be completed during the
+ * open step.
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ struct mac_device_info *device;
+
+ if (priv->plat->has_gmac)
+ device = dwmac1000_setup(priv->ioaddr);
+ else
+ device = dwmac100_setup(priv->ioaddr);
+
+ if (!device)
+ return -ENOMEM;
+
+ priv->hw = device;
+ priv->hw->ring = &ring_mode_ops;
+
+ if (device_can_wakeup(priv->device)) {
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+ enable_irq_wake(priv->wol_irq);
+ }
+
+ return 0;
+}
+
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+ /* verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address */
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ priv->hw->mac->get_umac_addr((void __iomem *)
+ priv->dev->base_addr,
+ priv->dev->dev_addr, 0);
+ if (!is_valid_ether_addr(priv->dev->dev_addr))
+ random_ether_addr(priv->dev->dev_addr);
+ }
+ pr_warning("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
+}
+
+/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
@@ -877,18 +930,28 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- /* Check that the MAC address is valid. If its not, refuse
- * to bring the device up. The user must specify an
- * address using the following linux command:
- * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
- pr_warning("%s: generated random MAC address %pM\n", dev->name,
- dev->dev_addr);
- }
+ /* MAC HW device setup */
+ ret = stmmac_mac_device_setup(dev);
+ if (ret < 0)
+ return ret;
+
+ stmmac_check_ether_addr(priv);
stmmac_verify_args();
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
+
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(dev);
+ if (ret < 0) {
+ pr_debug("%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ return ret;
+ }
+
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
@@ -913,6 +976,44 @@ static int stmmac_open(struct net_device *dev)
goto open_error;
}
+ stmmac_get_synopsys_id(priv);
+
+ priv->hw_cap_support = stmmac_get_hw_features(priv);
+
+ if (priv->hw_cap_support) {
+ pr_info(" Support DMA HW capability register");
+
+ /* We can override some gmac/dma configuration fields: e.g.
+ * enh_desc, tx_coe (e.g. that are passed through the
+ * platform) with the values from the HW capability
+ * register (if supported).
+ */
+ priv->plat->enh_desc = priv->dma_cap.enh_desc;
+ priv->plat->tx_coe = priv->dma_cap.tx_coe;
+ priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+
+ /* By default disable wol on magic frame if not supported */
+ if (!priv->dma_cap.pmt_magic_frame)
+ priv->wolopts &= ~WAKE_MAGIC;
+
+ } else
+ pr_info(" No HW DMA feature register supported");
+
+ /* Select the enhnaced/normal descriptor structures */
+ stmmac_selec_desc_mode(priv);
+
+ /* PMT module is not integrated in all the MAC devices. */
+ if (priv->plat->pmt) {
+ pr_info(" Remote wake-up capable\n");
+ device_set_wakeup_capable(priv->device, 1);
+ }
+
+ priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
+ if (priv->rx_coe)
+ pr_info(" Checksum Offload Engine supported\n");
+ if (priv->plat->tx_coe)
+ pr_info(" Checksum insertion supported\n");
+
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
@@ -935,15 +1036,6 @@ static int stmmac_open(struct net_device *dev)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->ioaddr);
- stmmac_get_synopsys_id(priv);
-
- stmmac_get_hw_features(priv);
-
- priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
- if (priv->rx_coe)
- pr_info("stmmac: Rx Checksum Offload Engine supported\n");
- if (priv->plat->tx_coe)
- pr_info("\tTX Checksum insertion supported\n");
netdev_update_features(dev);
/* Request the IRQ lines */
@@ -956,7 +1048,7 @@ static int stmmac_open(struct net_device *dev)
}
/* Enable the MAC Rx/Tx */
- stmmac_enable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -965,9 +1057,13 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- if (priv->dma_cap.rmon)
- stmmac_mmc_setup(priv);
+ stmmac_mmc_setup(priv);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(dev);
+ if (ret < 0)
+ pr_warning("\tFailed debugFS registration");
+#endif
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
priv->hw->dma->start_tx(priv->ioaddr);
@@ -1040,10 +1136,15 @@ static int stmmac_release(struct net_device *dev)
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
- stmmac_disable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(dev);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ stmmac_exit_fs();
+#endif
+ stmmac_mdio_unregister(dev);
+
return 0;
}
@@ -1419,7 +1520,8 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1489,9 +1591,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!priv->phydev)
return -EINVAL;
- spin_lock(&priv->lock);
ret = phy_mii_ioctl(priv->phydev, rq, cmd);
- spin_unlock(&priv->lock);
return ret;
}
@@ -1558,7 +1658,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
- if (!stmmac_get_hw_features(priv)) {
+ if (!priv->hw_cap_support) {
seq_printf(seq, "DMA HW features not supported\n");
return 0;
}
@@ -1689,28 +1789,41 @@ static const struct net_device_ops stmmac_netdev_ops = {
};
/**
- * stmmac_probe - Initialization of the adapter .
- * @dev : device pointer
- * Description: The function initializes the network device structure for
- * the STMMAC driver. It also calls the low level routines
- * in order to init the HW (i.e. the DMA engine)
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
*/
-static int stmmac_probe(struct net_device *dev)
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat)
{
int ret = 0;
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+
+ ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ if (!ndev) {
+ pr_err("%s: ERROR: allocating the device\n", __func__);
+ return NULL;
+ }
+
+ SET_NETDEV_DEV(ndev, device);
+
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
- ether_setup(dev);
+ ether_setup(ndev);
- dev->netdev_ops = &stmmac_netdev_ops;
- stmmac_set_ethtool_ops(dev);
+ ndev->netdev_ops = &stmmac_netdev_ops;
+ stmmac_set_ethtool_ops(ndev);
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
- dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
- dev->features |= NETIF_F_HW_VLAN_RX;
+ ndev->features |= NETIF_F_HW_VLAN_RX;
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
@@ -1718,272 +1831,73 @@ static int stmmac_probe(struct net_device *dev)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
priv->pause = pause;
- netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
-
- /* Get the MAC address */
- priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
- dev->dev_addr, 0);
-
- if (!is_valid_ether_addr(dev->dev_addr))
- pr_warning("\tno valid MAC address;"
- "please, use ifconfig or nwhwconfig!\n");
+ priv->plat = plat_dat;
+ netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->tx_lock);
- ret = register_netdev(dev);
+ ret = register_netdev(ndev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n",
__func__, ret);
- return -ENODEV;
+ goto error;
}
DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
- dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
- (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
-
- return ret;
-}
-
-/**
- * stmmac_mac_device_setup
- * @dev : device pointer
- * Description: select and initialise the mac device (mac100 or Gmac).
- */
-static int stmmac_mac_device_setup(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- struct mac_device_info *device;
-
- if (priv->plat->has_gmac) {
- dev->priv_flags |= IFF_UNICAST_FLT;
- device = dwmac1000_setup(priv->ioaddr);
- } else {
- device = dwmac100_setup(priv->ioaddr);
- }
-
- if (!device)
- return -ENOMEM;
-
- if (priv->plat->enh_desc) {
- device->desc = &enh_desc_ops;
- pr_info("\tEnhanced descriptor structure\n");
- } else
- device->desc = &ndesc_ops;
-
- priv->hw = device;
- priv->hw->ring = &ring_mode_ops;
-
- if (device_can_wakeup(priv->device)) {
- priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- enable_irq_wake(priv->wol_irq);
- }
-
- return 0;
-}
-
-/**
- * stmmac_dvr_probe
- * @pdev: platform device pointer
- * Description: the driver is initialized through platform_device.
- */
-static int stmmac_dvr_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct resource *res;
- void __iomem *addr = NULL;
- struct net_device *ndev = NULL;
- struct stmmac_priv *priv = NULL;
- struct plat_stmmacenet_data *plat_dat;
-
- pr_info("STMMAC driver:\n\tplatform registration... ");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- pr_info("\tdone!\n");
-
- if (!request_mem_region(res->start, resource_size(res),
- pdev->name)) {
- pr_err("%s: ERROR: memory allocation failed"
- "cannot get the I/O addr 0x%x\n",
- __func__, (unsigned int)res->start);
- return -EBUSY;
- }
-
- addr = ioremap(res->start, resource_size(res));
- if (!addr) {
- pr_err("%s: ERROR: memory mapping failed\n", __func__);
- ret = -ENOMEM;
- goto out_release_region;
- }
-
- ndev = alloc_etherdev(sizeof(struct stmmac_priv));
- if (!ndev) {
- pr_err("%s: ERROR: allocating the device\n", __func__);
- ret = -ENOMEM;
- goto out_unmap;
- }
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- /* Get the MAC information */
- ndev->irq = platform_get_irq_byname(pdev, "macirq");
- if (ndev->irq == -ENXIO) {
- pr_err("%s: ERROR: MAC IRQ configuration "
- "information not found\n", __func__);
- ret = -ENXIO;
- goto out_free_ndev;
- }
-
- priv = netdev_priv(ndev);
- priv->device = &(pdev->dev);
- priv->dev = ndev;
- plat_dat = pdev->dev.platform_data;
-
- priv->plat = plat_dat;
-
- priv->ioaddr = addr;
-
- /* PMT module is not integrated in all the MAC devices. */
- if (plat_dat->pmt) {
- pr_info("\tPMT module supported\n");
- device_set_wakeup_capable(&pdev->dev, 1);
- }
- /*
- * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
- * The external wake up irq can be passed through the platform code
- * named as "eth_wake_irq"
- *
- * In case the wake up interrupt is not passed from the platform
- * so the driver will continue to use the mac irq (ndev->irq)
- */
- priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq == -ENXIO)
- priv->wol_irq = ndev->irq;
-
-
- platform_set_drvdata(pdev, ndev);
-
- /* Set the I/O base addr */
- ndev->base_addr = (unsigned long)addr;
+ ndev->name, (ndev->features & NETIF_F_SG) ? "on" : "off",
+ (ndev->features & NETIF_F_IP_CSUM) ? "on" : "off");
- /* Custom initialisation */
- if (priv->plat->init) {
- ret = priv->plat->init(pdev);
- if (unlikely(ret))
- goto out_free_ndev;
- }
-
- /* MAC HW revice detection */
- ret = stmmac_mac_device_setup(ndev);
- if (ret < 0)
- goto out_plat_exit;
-
- /* Network Device Registration */
- ret = stmmac_probe(ndev);
- if (ret < 0)
- goto out_plat_exit;
-
- /* Override with kernel parameters if supplied XXX CRS XXX
- * this needs to have multiple instances */
- if ((phyaddr >= 0) && (phyaddr <= 31))
- priv->plat->phy_addr = phyaddr;
-
- pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
- "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
- pdev->id, ndev->irq, addr);
-
- /* MDIO bus Registration */
- pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
- ret = stmmac_mdio_register(ndev);
- if (ret < 0)
- goto out_unregister;
- pr_debug("registered!\n");
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- ret = stmmac_init_fs(ndev);
- if (ret < 0)
- pr_warning("\tFailed debugFS registration");
-#endif
+ return priv;
- return 0;
+error:
+ netif_napi_del(&priv->napi);
-out_unregister:
unregister_netdev(ndev);
-out_plat_exit:
- if (priv->plat->exit)
- priv->plat->exit(pdev);
-out_free_ndev:
free_netdev(ndev);
- platform_set_drvdata(pdev, NULL);
-out_unmap:
- iounmap(addr);
-out_release_region:
- release_mem_region(res->start, resource_size(res));
- return ret;
+ return NULL;
}
/**
* stmmac_dvr_remove
- * @pdev: platform device pointer
+ * @ndev: net device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
- * changes the link status, releases the DMA descriptor rings,
- * unregisters the MDIO bus and unmaps the allocated memory.
+ * changes the link status, releases the DMA descriptor rings.
*/
-static int stmmac_dvr_remove(struct platform_device *pdev)
+int stmmac_dvr_remove(struct net_device *ndev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_disable_mac(priv->ioaddr);
-
+ stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
-
- stmmac_mdio_unregister(ndev);
-
- if (priv->plat->exit)
- priv->plat->exit(pdev);
-
- platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
-
- iounmap((void *)priv->ioaddr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- stmmac_exit_fs();
-#endif
-
free_netdev(ndev);
return 0;
}
#ifdef CONFIG_PM
-static int stmmac_suspend(struct device *dev)
+int stmmac_suspend(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
int dis_ic = 0;
if (!ndev || !netif_running(ndev))
return 0;
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+
spin_lock(&priv->lock);
netif_device_detach(ndev);
netif_stop_queue(ndev);
- if (priv->phydev)
- phy_stop(priv->phydev);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_stop();
@@ -2004,15 +1918,14 @@ static int stmmac_suspend(struct device *dev)
if (device_may_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
else
- stmmac_disable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, false);
spin_unlock(&priv->lock);
return 0;
}
-static int stmmac_resume(struct device *dev)
+int stmmac_resume(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
if (!netif_running(ndev))
@@ -2031,7 +1944,7 @@ static int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
/* Enable the MAC and DMA */
- stmmac_enable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, true);
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
@@ -2041,77 +1954,33 @@ static int stmmac_resume(struct device *dev)
#endif
napi_enable(&priv->napi);
- if (priv->phydev)
- phy_start(priv->phydev);
-
netif_start_queue(ndev);
spin_unlock(&priv->lock);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
return 0;
}
-static int stmmac_freeze(struct device *dev)
+int stmmac_freeze(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
if (!ndev || !netif_running(ndev))
return 0;
return stmmac_release(ndev);
}
-static int stmmac_restore(struct device *dev)
+int stmmac_restore(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
if (!ndev || !netif_running(ndev))
return 0;
return stmmac_open(ndev);
}
-
-static const struct dev_pm_ops stmmac_pm_ops = {
- .suspend = stmmac_suspend,
- .resume = stmmac_resume,
- .freeze = stmmac_freeze,
- .thaw = stmmac_restore,
- .restore = stmmac_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pm_ops;
#endif /* CONFIG_PM */
-static struct platform_driver stmmac_driver = {
- .probe = stmmac_dvr_probe,
- .remove = stmmac_dvr_remove,
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- .owner = THIS_MODULE,
- .pm = &stmmac_pm_ops,
- },
-};
-
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
- int ret;
-
- ret = platform_driver_register(&stmmac_driver);
- return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
- platform_driver_unregister(&stmmac_driver);
-}
-
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -2171,9 +2040,6 @@ err:
__setup("stmmaceth=", stmmac_cmdline_opt);
#endif
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 9c3b9d5c341..51f44123396 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -109,6 +109,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
*/
static int stmmac_mdio_reset(struct mii_bus *bus)
{
+#if defined(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
@@ -123,7 +124,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
* on MDC, so perform a dummy mdio read.
*/
writel(0, priv->ioaddr + mii_address);
-
+#endif
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
new file mode 100644
index 00000000000..54a819a3648
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -0,0 +1,221 @@
+/*******************************************************************************
+ This contains the functions to handle the pci driver.
+
+ Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include "stmmac.h"
+
+struct plat_stmmacenet_data plat_dat;
+struct stmmac_mdio_bus_data mdio_data;
+
+static void stmmac_default_data(void)
+{
+ memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+ plat_dat.bus_id = 1;
+ plat_dat.phy_addr = 0;
+ plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+ plat_dat.pbl = 32;
+ plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat_dat.has_gmac = 1;
+ plat_dat.force_sf_dma_mode = 1;
+
+ mdio_data.bus_id = 1;
+ mdio_data.phy_reset = NULL;
+ mdio_data.phy_mask = 0;
+ plat_dat.mdio_bus_data = &mdio_data;
+}
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret = 0;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ int i;
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pr_err("%s : ERROR: failed to enable %s device\n", __func__,
+ pci_name(pdev));
+ return ret;
+ }
+ if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
+ pr_err("%s: ERROR: failed to get PCI region\n", __func__);
+ ret = -ENODEV;
+ goto err_out_req_reg_failed;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i <= 5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ addr = pci_iomap(pdev, i, 0);
+ if (addr == NULL) {
+ pr_err("%s: ERROR: cannot map regiser memory, aborting",
+ __func__);
+ ret = -EIO;
+ goto err_out_map_failed;
+ }
+ break;
+ }
+ pci_set_master(pdev);
+
+ stmmac_default_data();
+
+ priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat);
+ if (!priv) {
+ pr_err("%s: main drivr probe failed", __func__);
+ goto err_out;
+ }
+ priv->ioaddr = addr;
+ priv->dev->base_addr = (unsigned long)addr;
+ priv->dev->irq = pdev->irq;
+ priv->wol_irq = pdev->irq;
+
+ pci_set_drvdata(pdev, priv->dev);
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+err_out:
+ pci_clear_master(pdev);
+err_out_map_failed:
+ pci_release_regions(pdev);
+err_out_req_reg_failed:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void __devexit stmmac_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_dvr_remove(ndev);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, priv->ioaddr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = stmmac_suspend(ndev);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return ret;
+}
+
+static int stmmac_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return stmmac_resume(ndev);
+}
+#endif
+
+#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_DEVICE_ID 0x1108
+
+static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
+ {
+ PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {
+ }
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+static struct pci_driver stmmac_driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .id_table = stmmac_id_table,
+ .probe = stmmac_pci_probe,
+ .remove = __devexit_p(stmmac_pci_remove),
+#ifdef CONFIG_PM
+ .suspend = stmmac_pci_suspend,
+ .resume = stmmac_pci_resume,
+#endif
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&stmmac_driver);
+ if (ret < 0)
+ pr_err("%s: ERROR: driver registration failed\n", __func__);
+
+ return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+ pci_unregister_driver(&stmmac_driver);
+}
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
new file mode 100644
index 00000000000..7b1594f4944
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -0,0 +1,198 @@
+/*******************************************************************************
+ This contains the functions to handle the platform driver.
+
+ Copyright (C) 2007-2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include "stmmac.h"
+
+/**
+ * stmmac_pltfr_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int stmmac_pltfr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ struct plat_stmmacenet_data *plat_dat;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ pr_err("%s: ERROR: memory allocation failed"
+ "cannot get the I/O addr 0x%x\n",
+ __func__, (unsigned int)res->start);
+ return -EBUSY;
+ }
+
+ addr = ioremap(res->start, resource_size(res));
+ if (!addr) {
+ pr_err("%s: ERROR: memory mapping failed", __func__);
+ ret = -ENOMEM;
+ goto out_release_region;
+ }
+ plat_dat = pdev->dev.platform_data;
+ priv = stmmac_dvr_probe(&(pdev->dev), plat_dat);
+ if (!priv) {
+ pr_err("%s: main drivr probe failed", __func__);
+ goto out_release_region;
+ }
+
+ priv->ioaddr = addr;
+ /* Set the I/O base addr */
+ priv->dev->base_addr = (unsigned long)addr;
+
+ /* Get the MAC information */
+ priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
+ if (priv->dev->irq == -ENXIO) {
+ pr_err("%s: ERROR: MAC IRQ configuration "
+ "information not found\n", __func__);
+ ret = -ENXIO;
+ goto out_unmap;
+ }
+
+ /*
+ * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (priv->wol_irq == -ENXIO)
+ priv->wol_irq = priv->dev->irq;
+
+ platform_set_drvdata(pdev, priv->dev);
+
+ /* Custom initialisation */
+ if (priv->plat->init) {
+ ret = priv->plat->init(pdev);
+ if (unlikely(ret))
+ goto out_unmap;
+ }
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+out_unmap:
+ iounmap(addr);
+ platform_set_drvdata(pdev, NULL);
+
+out_release_region:
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+ int ret = stmmac_dvr_remove(ndev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ iounmap((void *)priv->ioaddr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pltfr_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_suspend(ndev);
+}
+
+static int stmmac_pltfr_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_resume(ndev);
+}
+
+int stmmac_pltfr_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_freeze(ndev);
+}
+
+int stmmac_pltfr_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_restore(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+ .suspend = stmmac_pltfr_suspend,
+ .resume = stmmac_pltfr_resume,
+ .freeze = stmmac_pltfr_freeze,
+ .thaw = stmmac_pltfr_restore,
+ .restore = stmmac_pltfr_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pltfr_pm_ops;
+#endif /* CONFIG_PM */
+
+static struct platform_driver stmmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &stmmac_pltfr_pm_ops,
+ },
+};
+
+module_platform_driver(stmmac_driver);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index fd40988c19a..f10665f594c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4532,10 +4532,9 @@ static void cas_set_multicast(struct net_device *dev)
static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cas *cp = netdev_priv(dev);
- strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
- info->fw_version[0] = '\0';
- strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
cp->casreg_len : CAS_MAX_REGS;
info->n_stats = CAS_NUM_STAT_KEYS;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 73c708107a3..cf433931304 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1151,19 +1151,8 @@ static int link_status_mii(struct niu *np, int *link_up_p)
supported |= SUPPORTED_1000baseT_Full;
lp->supported = supported;
- advertising = 0;
- if (advert & ADVERTISE_10HALF)
- advertising |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- advertising |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- advertising |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- advertising |= ADVERTISED_100baseT_Full;
- if (ctrl1000 & ADVERTISE_1000HALF)
- advertising |= ADVERTISED_1000baseT_Half;
- if (ctrl1000 & ADVERTISE_1000FULL)
- advertising |= ADVERTISED_1000baseT_Full;
+ advertising = mii_adv_to_ethtool_adv_t(advert);
+ advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
if (bmcr & BMCR_ANENABLE) {
int neg, neg1000;
@@ -3609,6 +3598,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
{
struct netdev_queue *txq;
+ unsigned int tx_bytes;
u16 pkt_cnt, tmp;
int cons, index;
u64 cs;
@@ -3631,12 +3621,18 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
netif_printk(np, tx_done, KERN_DEBUG, np->dev,
"%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
- while (pkt_cnt--)
+ tx_bytes = 0;
+ tmp = pkt_cnt;
+ while (tmp--) {
+ tx_bytes += rp->tx_buffs[cons].skb->len;
cons = release_tx_packet(np, rp, cons);
+ }
rp->cons = cons;
smp_mb();
+ netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
+
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4337,6 +4333,7 @@ static void niu_free_channels(struct niu *np)
struct tx_ring_info *rp = &np->tx_rings[i];
niu_free_tx_ring_info(np, rp);
+ netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
}
kfree(np->tx_rings);
np->tx_rings = NULL;
@@ -6742,6 +6739,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
prod = NEXT_TX(rp, prod);
}
+ netdev_tx_sent_queue(txq, skb->len);
+
if (prod < rp->prod)
rp->wrap_bit ^= TX_RING_KICK_WRAP;
rp->prod = prod;
@@ -6823,12 +6822,13 @@ static void niu_get_drvinfo(struct net_device *dev,
struct niu *np = netdev_priv(dev);
struct niu_vpd *vpd = &np->vpd;
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- sprintf(info->fw_version, "%d.%d",
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
vpd->fcode_major, vpd->fcode_minor);
if (np->parent->plat_type != PLAT_TYPE_NIU)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
}
static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -8589,9 +8589,11 @@ static int __devinit phy_record(struct niu_parent *parent,
if (dev_id_1 < 0 || dev_id_2 < 0)
return 0;
if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+ /* Because of the NIU_PHY_ID_MASK being applied, the 8704
+ * test covers the 8706 as well.
+ */
if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+ ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
return 0;
} else {
if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d8cfd9ea05..220f724c337 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1293,15 +1293,4 @@ static struct platform_driver bigmac_sbus_driver = {
.remove = __devexit_p(bigmac_sbus_remove),
};
-static int __init bigmac_init(void)
-{
- return platform_driver_register(&bigmac_sbus_driver);
-}
-
-static void __exit bigmac_exit(void)
-{
- platform_driver_unregister(&bigmac_sbus_driver);
-}
-
-module_init(bigmac_init);
-module_exit(bigmac_exit);
+module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index ceab215bb4a..31441a870b0 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2517,9 +2517,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct gem *gp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(gp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index c517dac02ae..09c518655db 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2457,11 +2457,11 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct happy_meal *hp = netdev_priv(dev);
- strcpy(info->driver, "sunhme");
- strcpy(info->version, "2.02");
+ strlcpy(info->driver, "sunhme", sizeof(info->driver));
+ strlcpy(info->version, "2.02", sizeof(info->version));
if (hp->happy_flags & HFLAG_PCI) {
struct pci_dev *pdev = hp->happy_dev;
- strcpy(info->bus_info, pci_name(pdev));
+ strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
}
#ifdef CONFIG_SBUS
else {
@@ -2469,7 +2469,8 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct platform_device *op = hp->happy_dev;
regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
- sprintf(info->bus_info, "SBUS:%d",
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "SBUS:%d",
regs->which_io);
}
#endif
@@ -2637,7 +2638,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
sbus_dp = op->dev.parent->of_node;
/* We can match PCI devices too, do not accept those here. */
- if (strcmp(sbus_dp->name, "sbus"))
+ if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
return err;
if (is_qfe) {
@@ -2849,7 +2850,7 @@ err_out:
static int is_quattro_p(struct pci_dev *pdev)
{
struct pci_dev *busdev = pdev->bus->self;
- struct list_head *tmp;
+ struct pci_dev *this_pdev;
int n_hmes;
if (busdev == NULL ||
@@ -2858,15 +2859,10 @@ static int is_quattro_p(struct pci_dev *pdev)
return 0;
n_hmes = 0;
- tmp = pdev->bus->devices.next;
- while (tmp != &pdev->bus->devices) {
- struct pci_dev *this_pdev = pci_dev_b(tmp);
-
+ list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
n_hmes++;
-
- tmp = tmp->next;
}
if (n_hmes != 4)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 3a90af6d111..4b19e9b0606 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -727,9 +727,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
* @ndev network device
* @vid VLAN vid to add
*/
-static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
{
__bdx_vlan_rx_vid(ndev, vid, 1);
+ return 0;
}
/*
@@ -737,9 +738,10 @@ static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
* @ndev network device
* @vid VLAN vid to kill
*/
-static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
{
__bdx_vlan_rx_vid(ndev, vid, 0);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index dca9d3369cd..c97d2f59085 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
/* handle completed packets */
+ spin_unlock_irqrestore(&chan->lock, flags);
do {
ret = __cpdma_chan_process(chan);
if (ret < 0)
break;
} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+ spin_lock_irqsave(&chan->lock, flags);
/* remaining packets haven't been tx/rx'ed, clean them up */
while (chan->head) {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 815c7970261..794ac30a577 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -115,6 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_NUM_DESC (128)
+#define EMAC_DEF_TX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -336,6 +337,7 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
+ atomic_t cur_tx;
const char *phy_id;
struct phy_device *phydev;
spinlock_t lock;
@@ -1044,6 +1046,9 @@ static void emac_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ atomic_dec(&priv->cur_tx);
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
@@ -1092,6 +1097,9 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
goto fail_tx;
}
+ if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
fail_tx:
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2..6b75063988e 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
goto done;
/* Re-enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
/* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
@@ -1256,7 +1256,7 @@ static void tile_net_stop_aux(struct net_device *dev)
sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
panic("Failed to stop LIPP/LEPP!\n");
- priv->partly_opened = 0;
+ priv->partly_opened = false;
}
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
info->napi_enabled = true;
/* Enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
}
@@ -1507,7 +1507,7 @@ static int tile_net_open(struct net_device *dev)
priv->network_cpus_count, priv->network_cpus_credits);
#endif
- priv->partly_opened = 1;
+ priv->partly_opened = true;
} else {
/* FIXME: Is this possible? */
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
for (i = 0; i < sh->nr_frags; i++) {
skb_frag_t *f = &sh->frags[i];
- unsigned long pfn = page_to_pfn(f->page);
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
/* FIXME: Compute "hash_for_home" properly. */
/* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
/* FIXME: Hmmm. */
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
- BUG_ON(PageHighMem(f->page));
+ BUG_ON(PageHighMem(skb_frag_page(f)));
finv_buffer_remote(va, f->size, 0);
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index a8df7eca095..a9ce01bafd2 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1688,18 +1688,6 @@ static void tsi108_timed_checker(unsigned long dev_ptr)
mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
}
-static int tsi108_ether_init(void)
-{
- int ret;
- ret = platform_driver_register (&tsi_eth_driver);
- if (ret < 0){
- printk("tsi108_ether_init: error initializing ethernet "
- "device\n");
- return ret;
- }
- return 0;
-}
-
static int tsi108_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -1714,13 +1702,7 @@ static int tsi108_ether_remove(struct platform_device *pdev)
return 0;
}
-static void tsi108_ether_exit(void)
-{
- platform_driver_unregister(&tsi_eth_driver);
-}
-
-module_init(tsi108_ether_init);
-module_exit(tsi108_ether_exit);
+module_platform_driver(tsi_eth_driver);
MODULE_AUTHOR("Tundra Semiconductor Corporation");
MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f34dd99fe57..5c4983b2870 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -35,6 +35,7 @@
#define DRV_VERSION "1.5.0"
#define DRV_RELDATE "2010-10-09"
+#include <linux/types.h>
/* A few user-configurable values.
These may be modified when a driver module is loaded. */
@@ -55,7 +56,7 @@ static int rx_copybreak;
/* Work-around for broken BIOSes: they are unable to get the chip back out of
power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
-static int avoid_D3;
+static bool avoid_D3;
/*
* In case you are looking for 'options[]' or 'full_duplex[]', they
@@ -488,8 +489,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
static void rhine_shutdown (struct pci_dev *pdev);
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
@@ -1261,7 +1262,7 @@ static void rhine_update_vcam(struct net_device *dev)
rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
}
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1269,9 +1270,10 @@ static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, rp->active_vlans);
rhine_update_vcam(dev);
spin_unlock_irq(&rp->lock);
+ return 0;
}
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1279,6 +1281,7 @@ static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, rp->active_vlans);
rhine_update_vcam(dev);
spin_unlock_irq(&rp->lock);
+ return 0;
}
static void init_registers(struct net_device *dev)
@@ -2009,9 +2012,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct rhine_private *rp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(rp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2320,7 +2323,7 @@ static int __init rhine_init(void)
#endif
if (dmi_check_system(rhine_dmi_table)) {
/* these BIOSes fail at PXE boot if chip is in D3 */
- avoid_D3 = 1;
+ avoid_D3 = true;
pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
}
else if (avoid_D3)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4535d7cc848..4128d6b8cc2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
}
-static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -530,9 +530,10 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
+ return 0;
}
-static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -540,6 +541,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
clear_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
+ return 0;
}
static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
@@ -3270,9 +3272,9 @@ static int velocity_set_settings(struct net_device *dev,
static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct velocity_info *vptr = netdev_priv(dev);
- strcpy(info->driver, VELOCITY_NAME);
- strcpy(info->version, VELOCITY_VERSION);
- strcpy(info->bus_info, pci_name(vptr->pdev));
+ strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+ strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
}
static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index caf3659e173..f21addb1db9 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -114,6 +114,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
return;
temac_iow(lp, XTE_LSW0_OFFSET, value);
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
+ temac_indirect_busywait(lp);
}
/**
@@ -203,6 +204,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
struct temac_local *lp = netdev_priv(ndev);
int i;
+ /* Reset Local Link (DMA) */
+ lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
+
for (i = 0; i < RX_BD_NUM; i++) {
if (!lp->rx_skb[i])
break;
@@ -233,7 +237,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
struct sk_buff *skb;
int i;
- lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+ lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb) {
dev_err(&ndev->dev,
"can't allocate memory for DMA RX buffer\n");
@@ -860,6 +864,8 @@ static int temac_open(struct net_device *ndev)
phy_start(lp->phy_dev);
}
+ temac_device_reset(ndev);
+
rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
if (rc)
goto err_tx_irq;
@@ -867,7 +873,6 @@ static int temac_open(struct net_device *ndev)
if (rc)
goto err_rx_irq;
- temac_device_reset(ndev);
return 0;
err_rx_irq:
@@ -915,12 +920,26 @@ temac_poll_controller(struct net_device *ndev)
}
#endif
+static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!lp->phy_dev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+}
+
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = temac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
@@ -1072,7 +1091,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
of_node_put(np); /* Finished with the DMA node; drop the reference */
- if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ if (!lp->rx_irq || !lp->tx_irq) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
goto err_iounmap_2;
@@ -1162,17 +1181,7 @@ static struct platform_driver temac_of_driver = {
},
};
-static int __init temac_init(void)
-{
- return platform_driver_register(&temac_of_driver);
-}
-module_init(temac_init);
-
-static void __exit temac_exit(void)
-{
- platform_driver_unregister(&temac_of_driver);
-}
-module_exit(temac_exit);
+module_platform_driver(temac_of_driver);
MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
MODULE_AUTHOR("Yoshio Kashiwagi");
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 8018d7d045b..79013e5731a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -662,7 +662,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
- bool tx_complete = 0;
+ bool tx_complete = false;
struct net_device *dev = dev_id;
struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
@@ -683,7 +683,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
- tx_complete = 1;
+ tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
@@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
tx_status);
- tx_complete = 1;
+ tx_complete = true;
}
/* If there was a Tx interrupt, call the Tx Handler */
@@ -1129,7 +1129,7 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
/* Get IRQ for the device */
rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
- if (rc == NO_IRQ) {
+ if (!rc) {
dev_err(dev, "no IRQ found\n");
return rc;
}
@@ -1303,27 +1303,7 @@ static struct platform_driver xemaclite_of_driver = {
.remove = __devexit_p(xemaclite_of_remove),
};
-/**
- * xgpiopss_init - Initial driver registration call
- *
- * Return: 0 upon success, or a negative error upon failure.
- */
-static int __init xemaclite_init(void)
-{
- /* No kernel boot options used, we just need to register the driver */
- return platform_driver_register(&xemaclite_of_driver);
-}
-
-/**
- * xemaclite_cleanup - Driver un-registration call
- */
-static void __exit xemaclite_cleanup(void)
-{
- platform_driver_unregister(&xemaclite_of_driver);
-}
-
-module_init(xemaclite_init);
-module_exit(xemaclite_cleanup);
+module_platform_driver(xemaclite_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bbe8b7dbf3f..33979c3ac94 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1411,7 +1411,7 @@ do_open(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "xirc2ps_cs");
+ strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig
index 7393eb732ee..95eb34fdbba 100644
--- a/drivers/net/hippi/Kconfig
+++ b/drivers/net/hippi/Kconfig
@@ -36,4 +36,4 @@ config ROADRUNNER_LARGE_RINGS
kernel code or by user space programs. Say Y here only if you have
the memory.
-endif /* HIPPI */
+endif # HIPPI
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 46b5f5fd686..e05b645bbc3 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -164,7 +164,7 @@ static const struct net_device_ops ifb_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
+#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 9d4ce1aba10..a561ae44a9a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -806,18 +806,7 @@ static struct platform_driver bfin_ir_driver = {
},
};
-static int __init bfin_sir_init(void)
-{
- return platform_driver_register(&bfin_ir_driver);
-}
-
-static void __exit bfin_sir_exit(void)
-{
- platform_driver_unregister(&bfin_ir_driver);
-}
-
-module_init(bfin_sir_init);
-module_exit(bfin_sir_exit);
+module_platform_driver(bfin_ir_driver);
module_param(max_rate, int, 0);
MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b45b2cc4280..64f403da101 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -197,7 +197,7 @@ static char *driver_name = DRIVER_NAME;
static int max_baud = 4000000;
#ifdef USE_PROBE
-static int do_probe = 0;
+static bool do_probe = false;
#endif
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index d0851dfa037..81d5275a15e 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -966,18 +966,7 @@ static struct platform_driver pxa_ir_driver = {
.resume = pxa_irda_resume,
};
-static int __init pxa_irda_init(void)
-{
- return platform_driver_register(&pxa_ir_driver);
-}
-
-static void __exit pxa_irda_exit(void)
-{
- platform_driver_unregister(&pxa_ir_driver);
-}
-
-module_init(pxa_irda_init);
-module_exit(pxa_irda_exit);
+module_platform_driver(pxa_ir_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-ir");
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index d275e276e74..725d6b36782 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -873,18 +873,7 @@ static struct platform_driver sh_irda_driver = {
},
};
-static int __init sh_irda_init(void)
-{
- return platform_driver_register(&sh_irda_driver);
-}
-
-static void __exit sh_irda_exit(void)
-{
- platform_driver_unregister(&sh_irda_driver);
-}
-
-module_init(sh_irda_init);
-module_exit(sh_irda_exit);
+module_platform_driver(sh_irda_driver);
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index ed7d7d62bf6..e6661b5c1f8 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -808,18 +808,7 @@ static struct platform_driver sh_sir_driver = {
},
};
-static int __init sh_sir_init(void)
-{
- return platform_driver_register(&sh_sir_driver);
-}
-
-static void __exit sh_sir_exit(void)
-{
- platform_driver_unregister(&sh_sir_driver);
-}
-
-module_init(sh_sir_init);
-module_exit(sh_sir_exit);
+module_platform_driver(sh_sir_driver);
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8b1c3484d27..6c95d4087b2 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
MODULE_LICENSE("GPL");
-static int smsc_nopnp = 1;
+static bool smsc_nopnp = true;
module_param_named(nopnp, smsc_nopnp, bool, 0);
MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 4ce9e5f2c06..b71998d0b5b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_ALL_TSO
| NETIF_F_UFO
- | NETIF_F_NO_CSUM
+ | NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 74134970b70..f2f820c4b40 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -26,6 +26,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <net/rtnetlink.h>
@@ -520,26 +521,23 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
return stats;
}
-static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+static int macvlan_vlan_rx_add_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_add_vid)
- ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+ return vlan_vid_add(lowerdev, vid);
}
-static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_kill_vid)
- ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+ vlan_vid_del(lowerdev, vid);
+ return 0;
}
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 1b7082d08f3..58dc117a8d7 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -145,8 +145,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
if (vlan) {
int index = get_slot(vlan, q);
- rcu_assign_pointer(vlan->taps[index], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ RCU_INIT_POINTER(vlan->taps[index], NULL);
+ RCU_INIT_POINTER(q->vlan, NULL);
sock_put(&q->sk);
--vlan->numvtaps;
}
@@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
if (!numvtaps)
goto out;
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_rxhash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ if (tap)
+ goto out;
+ }
+
if (likely(skb_rx_queue_recorded(skb))) {
rxq = skb_get_rx_queue(skb);
@@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out;
}
- /* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(skb);
- if (rxq) {
- tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
- if (tap)
- goto out;
- }
-
/* Everything failed - find first available queue */
for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
tap = rcu_dereference(vlan->taps[rxq]);
@@ -223,8 +223,8 @@ static void macvtap_del_queues(struct net_device *dev)
lockdep_is_held(&macvtap_lock));
if (q) {
qlist[j++] = q;
- rcu_assign_pointer(vlan->taps[i], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ RCU_INIT_POINTER(vlan->taps[i], NULL);
+ RCU_INIT_POINTER(q->vlan, NULL);
vlan->numvtaps--;
}
}
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index c62e7816d54..c70c2332d15 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -35,26 +35,11 @@
static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
{
- u32 result = 0;
int advert;
advert = mii->mdio_read(mii->dev, mii->phy_id, addr);
- if (advert & LPA_LPACK)
- result |= ADVERTISED_Autoneg;
- if (advert & ADVERTISE_10HALF)
- result |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- result |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- result |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- result |= ADVERTISED_100baseT_Full;
- if (advert & ADVERTISE_PAUSE_CAP)
- result |= ADVERTISED_Pause;
- if (advert & ADVERTISE_PAUSE_ASYM)
- result |= ADVERTISED_Asym_Pause;
-
- return result;
+
+ return mii_lpa_to_ethtool_lpa_t(advert);
}
/**
@@ -104,19 +89,14 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
- if (ctrl1000 & ADVERTISE_1000HALF)
- ecmd->advertising |= ADVERTISED_1000baseT_Half;
- if (ctrl1000 & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (mii->supports_gmii)
+ ecmd->advertising |=
+ mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
if (bmsr & BMSR_ANEGCOMPLETE) {
ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
- if (stat1000 & LPA_1000HALF)
- ecmd->lp_advertising |=
- ADVERTISED_1000baseT_Half;
- if (stat1000 & LPA_1000FULL)
- ecmd->lp_advertising |=
- ADVERTISED_1000baseT_Full;
+ ecmd->lp_advertising |=
+ mii_stat1000_to_ethtool_lpa_t(stat1000);
} else {
ecmd->lp_advertising = 0;
}
@@ -204,20 +184,11 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
}
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
- tmp |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
- tmp |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
- tmp |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
- tmp |= ADVERTISE_100FULL;
- if (mii->supports_gmii) {
- if (ecmd->advertising & ADVERTISED_1000baseT_Half)
- tmp2 |= ADVERTISE_1000HALF;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
- tmp2 |= ADVERTISE_1000FULL;
- }
+ tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising);
+
+ if (mii->supports_gmii)
+ tmp2 |=
+ ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising);
if (advert != tmp) {
mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
mii->advertising = tmp;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index bb88e12101c..fbdcdf83cbf 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig PHYLIB
- bool "PHY Device support and infrastructure"
+ tristate "PHY Device support and infrastructure"
depends on !S390
depends on NETDEVICES
help
@@ -131,3 +131,7 @@ config MDIO_OCTEON
If in doubt, say Y.
endif # PHYLIB
+
+config MICREL_KS8995MA
+ tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+ depends on SPI
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2333215bbb3..e15c83fecbe 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 65391891d8c..daec9b05d16 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -202,6 +202,14 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
return 0;
}
+static int mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ if (ctrl->reset)
+ ctrl->reset(bus);
+ return 0;
+}
+
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
struct mii_bus *bus;
@@ -214,6 +222,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
bus->read = mdiobb_read;
bus->write = mdiobb_write;
+ bus->reset = mdiobb_reset;
bus->priv = ctrl;
return bus;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 2843c90f712..89c5a3eccc1 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -95,6 +95,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
goto out;
bitbang->ctrl.ops = &mdio_gpio_ops;
+ bitbang->ctrl.reset = pdata->reset;
bitbang->mdc = pdata->mdc;
bitbang->mdio = pdata->mdio;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 83a5a5afec6..f320f466f03 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -563,20 +563,9 @@ static int genphy_config_advert(struct phy_device *phydev)
if (adv < 0)
return adv;
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
- if (advertise & ADVERTISED_10baseT_Half)
- adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- adv |= ADVERTISE_100FULL;
- if (advertise & ADVERTISED_Pause)
- adv |= ADVERTISE_PAUSE_CAP;
- if (advertise & ADVERTISED_Asym_Pause)
- adv |= ADVERTISE_PAUSE_ASYM;
+ adv |= ethtool_adv_to_mii_adv_t(advertise);
if (adv != oldadv) {
err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -595,10 +584,7 @@ static int genphy_config_advert(struct phy_device *phydev)
return adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (advertise & SUPPORTED_1000baseT_Half)
- adv |= ADVERTISE_1000HALF;
- if (advertise & SUPPORTED_1000baseT_Full)
- adv |= ADVERTISE_1000FULL;
+ adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
if (adv != oldadv) {
err = phy_write(phydev, MII_CTRL1000, adv);
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
new file mode 100644
index 00000000000..116a2dd7c87
--- /dev/null
+++ b/drivers/net/phy/spi_ks8995.c
@@ -0,0 +1,375 @@
+/*
+ * SPI driver for Micrel/Kendin KS8995M ethernet switch
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ * Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION "0.1.1"
+#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0 0x00 /* Chip ID0 */
+#define KS8995_REG_ID1 0x01 /* Chip ID1 */
+
+#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+#define KS8995_REG_GC1 0x03 /* Global Control 1 */
+#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+#define KS8995_REG_GC3 0x05 /* Global Control 3 */
+#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+#define KS8995_REG_GC5 0x07 /* Global Control 5 */
+#define KS8995_REG_GC6 0x08 /* Global Control 6 */
+#define KS8995_REG_GC7 0x09 /* Global Control 7 */
+#define KS8995_REG_GC8 0x0a /* Global Control 8 */
+#define KS8995_REG_GC9 0x0b /* Global Control 9 */
+
+#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */
+#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */
+
+#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0 0x68 /* MAC address 0 */
+#define KS8995_REG_MAC1 0x69 /* MAC address 1 */
+#define KS8995_REG_MAC2 0x6a /* MAC address 2 */
+#define KS8995_REG_MAC3 0x6b /* MAC address 3 */
+#define KS8995_REG_MAC4 0x6c /* MAC address 4 */
+#define KS8995_REG_MAC5 0x6d /* MAC address 5 */
+
+#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
+
+#define KS8995_REGS_SIZE 0x80
+
+#define ID1_CHIPID_M 0xf
+#define ID1_CHIPID_S 4
+#define ID1_REVISION_M 0x7
+#define ID1_REVISION_S 1
+#define ID1_START_SW 1 /* start the switch */
+
+#define FAMILY_KS8995 0x95
+#define CHIPID_M 0
+
+#define KS8995_CMD_WRITE 0x02U
+#define KS8995_CMD_READ 0x03U
+
+#define KS8995_RESET_DELAY 10 /* usec */
+
+struct ks8995_pdata {
+ /* not yet implemented */
+};
+
+struct ks8995_switch {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct ks8995_pdata *pdata;
+};
+
+static inline u8 get_chip_id(u8 val)
+{
+ return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+ return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ u8 cmd[2];
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = KS8995_CMD_READ;
+ cmd[1] = offset;
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ u8 cmd[2];
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = KS8995_CMD_WRITE;
+ cmd[1] = offset;
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+ return (ks8995_read(ks, buf, addr, 1) != 1);
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+ char buf = val;
+
+ return (ks8995_write(ks, &buf, addr, 1) != 1);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+ int err;
+
+ err = ks8995_stop(ks);
+ if (err)
+ return err;
+
+ udelay(KS8995_RESET_DELAY);
+
+ return ks8995_start(ks);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct ks8995_switch *ks8995;
+
+ dev = container_of(kobj, struct device, kobj);
+ ks8995 = dev_get_drvdata(dev);
+
+ if (unlikely(off > KS8995_REGS_SIZE))
+ return 0;
+
+ if ((off + count) > KS8995_REGS_SIZE)
+ count = KS8995_REGS_SIZE - off;
+
+ if (unlikely(!count))
+ return count;
+
+ return ks8995_read(ks8995, buf, off, count);
+}
+
+
+static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct ks8995_switch *ks8995;
+
+ dev = container_of(kobj, struct device, kobj);
+ ks8995 = dev_get_drvdata(dev);
+
+ if (unlikely(off >= KS8995_REGS_SIZE))
+ return -EFBIG;
+
+ if ((off + count) > KS8995_REGS_SIZE)
+ count = KS8995_REGS_SIZE - off;
+
+ if (unlikely(!count))
+ return count;
+
+ return ks8995_write(ks8995, buf, off, count);
+}
+
+
+static struct bin_attribute ks8995_registers_attr = {
+ .attr = {
+ .name = "registers",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = KS8995_REGS_SIZE,
+ .read = ks8995_registers_read,
+ .write = ks8995_registers_write,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int __devinit ks8995_probe(struct spi_device *spi)
+{
+ struct ks8995_switch *ks;
+ struct ks8995_pdata *pdata;
+ u8 ids[2];
+ int err;
+
+ /* Chip description */
+ pdata = spi->dev.platform_data;
+
+ ks = kzalloc(sizeof(*ks), GFP_KERNEL);
+ if (!ks) {
+ dev_err(&spi->dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ks->lock);
+ ks->pdata = pdata;
+ ks->spi = spi_dev_get(spi);
+ dev_set_drvdata(&spi->dev, ks);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+ goto err_drvdata;
+ }
+
+ err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
+ if (err < 0) {
+ dev_err(&spi->dev, "unable to read id registers, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+
+ switch (ids[0]) {
+ case FAMILY_KS8995:
+ break;
+ default:
+ dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
+ err = -ENODEV;
+ goto err_drvdata;
+ }
+
+ err = ks8995_reset(ks);
+ if (err)
+ goto err_drvdata;
+
+ err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+ if (err) {
+ dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+
+ dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, "
+ "Revision:%01x\n", ids[0],
+ get_chip_id(ids[1]), get_chip_rev(ids[1]));
+
+ return 0;
+
+err_drvdata:
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ks);
+ return err;
+}
+
+static int __devexit ks8995_remove(struct spi_device *spi)
+{
+ struct ks8995_data *ks8995;
+
+ ks8995 = dev_get_drvdata(&spi->dev);
+ sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ks8995);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct spi_driver ks8995_driver = {
+ .driver = {
+ .name = "spi-ks8995",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8995_probe,
+ .remove = __devexit_p(ks8995_remove),
+};
+
+static int __init ks8995_init(void)
+{
+ printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+
+ return spi_register_driver(&ks8995_driver);
+}
+module_init(ks8995_init);
+
+static void __exit ks8995_exit(void)
+{
+ spi_unregister_driver(&ks8995_driver);
+}
+module_exit(ks8995_exit);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f5f72..c1c9293c2bb 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -162,7 +162,7 @@ static void del_chan(struct pppox_sock *sock)
{
spin_lock(&chan_lock);
clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+ RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
spin_unlock(&chan_lock);
synchronize_rcu();
}
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po)) {
- release_sock(sk);
+ if (add_chan(po))
error = -EBUSY;
- }
release_sock(sk);
return error;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
new file mode 100644
index 00000000000..248a144033c
--- /dev/null
+++ b/drivers/net/team/Kconfig
@@ -0,0 +1,43 @@
+menuconfig NET_TEAM
+ tristate "Ethernet team driver support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ ---help---
+ This allows one to create virtual interfaces that teams together
+ multiple ethernet devices.
+
+ Team devices can be added using the "ip" command from the
+ iproute2 package:
+
+ "ip link add link [ address MAC ] [ NAME ] type team"
+
+ To compile this driver as a module, choose M here: the module
+ will be called team.
+
+if NET_TEAM
+
+config NET_TEAM_MODE_ROUNDROBIN
+ tristate "Round-robin mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where port used for transmitting packets is selected in
+ round-robin fashion using packet counter.
+
+ All added ports are setup to have bond's mac address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_roundrobin.
+
+config NET_TEAM_MODE_ACTIVEBACKUP
+ tristate "Active-backup mode support"
+ depends on NET_TEAM
+ ---help---
+ Only one port is active at a time and the rest of ports are used
+ for backup.
+
+ Mac addresses of ports are not modified. Userspace is responsible
+ to do so.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_activebackup.
+
+endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
new file mode 100644
index 00000000000..85f2028a87a
--- /dev/null
+++ b/drivers/net/team/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the network team driver
+#
+
+obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
new file mode 100644
index 00000000000..ed2a862b835
--- /dev/null
+++ b/drivers/net/team/team.c
@@ -0,0 +1,1684 @@
+/*
+ * net/drivers/team/team.c - Network team device driver
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <linux/if_team.h>
+
+#define DRV_NAME "team"
+
+
+/**********
+ * Helpers
+ **********/
+
+#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
+
+static struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ struct team_port *port = rcu_dereference(dev->rx_handler_data);
+
+ return team_port_exists(dev) ? port : NULL;
+}
+
+static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+{
+ struct team_port *port = rtnl_dereference(dev->rx_handler_data);
+
+ return team_port_exists(dev) ? port : NULL;
+}
+
+/*
+ * Since the ability to change mac address for open port device is tested in
+ * team_port_add, this function can be called without control of return value
+ */
+static int __set_port_mac(struct net_device *port_dev,
+ const unsigned char *dev_addr)
+{
+ struct sockaddr addr;
+
+ memcpy(addr.sa_data, dev_addr, ETH_ALEN);
+ addr.sa_family = ARPHRD_ETHER;
+ return dev_set_mac_address(port_dev, &addr);
+}
+
+int team_port_set_orig_mac(struct team_port *port)
+{
+ return __set_port_mac(port->dev, port->orig.dev_addr);
+}
+
+int team_port_set_team_mac(struct team_port *port)
+{
+ return __set_port_mac(port->dev, port->team->dev->dev_addr);
+}
+EXPORT_SYMBOL(team_port_set_team_mac);
+
+
+/*******************
+ * Options handling
+ *******************/
+
+struct team_option *__team_find_option(struct team *team, const char *opt_name)
+{
+ struct team_option *option;
+
+ list_for_each_entry(option, &team->option_list, list) {
+ if (strcmp(option->name, opt_name) == 0)
+ return option;
+ }
+ return NULL;
+}
+
+int team_options_register(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+ struct team_option **dst_opts;
+ int err;
+
+ dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
+ GFP_KERNEL);
+ if (!dst_opts)
+ return -ENOMEM;
+ for (i = 0; i < option_count; i++, option++) {
+ if (__team_find_option(team, option->name)) {
+ err = -EEXIST;
+ goto rollback;
+ }
+ dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
+ if (!dst_opts[i]) {
+ err = -ENOMEM;
+ goto rollback;
+ }
+ }
+
+ for (i = 0; i < option_count; i++)
+ list_add_tail(&dst_opts[i]->list, &team->option_list);
+
+ kfree(dst_opts);
+ return 0;
+
+rollback:
+ for (i = 0; i < option_count; i++)
+ kfree(dst_opts[i]);
+
+ kfree(dst_opts);
+ return err;
+}
+
+EXPORT_SYMBOL(team_options_register);
+
+static void __team_options_change_check(struct team *team,
+ struct team_option *changed_option);
+
+static void __team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+
+ for (i = 0; i < option_count; i++, option++) {
+ struct team_option *del_opt;
+
+ del_opt = __team_find_option(team, option->name);
+ if (del_opt) {
+ list_del(&del_opt->list);
+ kfree(del_opt);
+ }
+ }
+}
+
+void team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ __team_options_unregister(team, option, option_count);
+ __team_options_change_check(team, NULL);
+}
+EXPORT_SYMBOL(team_options_unregister);
+
+static int team_option_get(struct team *team, struct team_option *option,
+ void *arg)
+{
+ return option->getter(team, arg);
+}
+
+static int team_option_set(struct team *team, struct team_option *option,
+ void *arg)
+{
+ int err;
+
+ err = option->setter(team, arg);
+ if (err)
+ return err;
+
+ __team_options_change_check(team, option);
+ return err;
+}
+
+/****************
+ * Mode handling
+ ****************/
+
+static LIST_HEAD(mode_list);
+static DEFINE_SPINLOCK(mode_list_lock);
+
+static struct team_mode *__find_mode(const char *kind)
+{
+ struct team_mode *mode;
+
+ list_for_each_entry(mode, &mode_list, list) {
+ if (strcmp(mode->kind, kind) == 0)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool is_good_mode_name(const char *name)
+{
+ while (*name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return false;
+ name++;
+ }
+ return true;
+}
+
+int team_mode_register(struct team_mode *mode)
+{
+ int err = 0;
+
+ if (!is_good_mode_name(mode->kind) ||
+ mode->priv_size > TEAM_MODE_PRIV_SIZE)
+ return -EINVAL;
+ spin_lock(&mode_list_lock);
+ if (__find_mode(mode->kind)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+ list_add_tail(&mode->list, &mode_list);
+unlock:
+ spin_unlock(&mode_list_lock);
+ return err;
+}
+EXPORT_SYMBOL(team_mode_register);
+
+int team_mode_unregister(struct team_mode *mode)
+{
+ spin_lock(&mode_list_lock);
+ list_del_init(&mode->list);
+ spin_unlock(&mode_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(team_mode_unregister);
+
+static struct team_mode *team_mode_get(const char *kind)
+{
+ struct team_mode *mode;
+
+ spin_lock(&mode_list_lock);
+ mode = __find_mode(kind);
+ if (!mode) {
+ spin_unlock(&mode_list_lock);
+ request_module("team-mode-%s", kind);
+ spin_lock(&mode_list_lock);
+ mode = __find_mode(kind);
+ }
+ if (mode)
+ if (!try_module_get(mode->owner))
+ mode = NULL;
+
+ spin_unlock(&mode_list_lock);
+ return mode;
+}
+
+static void team_mode_put(const struct team_mode *mode)
+{
+ module_put(mode->owner);
+}
+
+static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+rx_handler_result_t team_dummy_receive(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb)
+{
+ return RX_HANDLER_ANOTHER;
+}
+
+static void team_adjust_ops(struct team *team)
+{
+ /*
+ * To avoid checks in rx/tx skb paths, ensure here that non-null and
+ * correct ops are always set.
+ */
+
+ if (list_empty(&team->port_list) ||
+ !team->mode || !team->mode->ops->transmit)
+ team->ops.transmit = team_dummy_transmit;
+ else
+ team->ops.transmit = team->mode->ops->transmit;
+
+ if (list_empty(&team->port_list) ||
+ !team->mode || !team->mode->ops->receive)
+ team->ops.receive = team_dummy_receive;
+ else
+ team->ops.receive = team->mode->ops->receive;
+}
+
+/*
+ * We can benefit from the fact that it's ensured no port is present
+ * at the time of mode change. Therefore no packets are in fly so there's no
+ * need to set mode operations in any special way.
+ */
+static int __team_change_mode(struct team *team,
+ const struct team_mode *new_mode)
+{
+ /* Check if mode was previously set and do cleanup if so */
+ if (team->mode) {
+ void (*exit_op)(struct team *team) = team->ops.exit;
+
+ /* Clear ops area so no callback is called any longer */
+ memset(&team->ops, 0, sizeof(struct team_mode_ops));
+ team_adjust_ops(team);
+
+ if (exit_op)
+ exit_op(team);
+ team_mode_put(team->mode);
+ team->mode = NULL;
+ /* zero private data area */
+ memset(&team->mode_priv, 0,
+ sizeof(struct team) - offsetof(struct team, mode_priv));
+ }
+
+ if (!new_mode)
+ return 0;
+
+ if (new_mode->ops->init) {
+ int err;
+
+ err = new_mode->ops->init(team);
+ if (err)
+ return err;
+ }
+
+ team->mode = new_mode;
+ memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
+ team_adjust_ops(team);
+
+ return 0;
+}
+
+static int team_change_mode(struct team *team, const char *kind)
+{
+ struct team_mode *new_mode;
+ struct net_device *dev = team->dev;
+ int err;
+
+ if (!list_empty(&team->port_list)) {
+ netdev_err(dev, "No ports can be present during mode change\n");
+ return -EBUSY;
+ }
+
+ if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+ netdev_err(dev, "Unable to change to the same mode the team is in\n");
+ return -EINVAL;
+ }
+
+ new_mode = team_mode_get(kind);
+ if (!new_mode) {
+ netdev_err(dev, "Mode \"%s\" not found\n", kind);
+ return -EINVAL;
+ }
+
+ err = __team_change_mode(team, new_mode);
+ if (err) {
+ netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
+ team_mode_put(new_mode);
+ return err;
+ }
+
+ netdev_info(dev, "Mode changed to \"%s\"\n", kind);
+ return 0;
+}
+
+
+/************************
+ * Rx path frame handler
+ ************************/
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct team_port *port;
+ struct team *team;
+ rx_handler_result_t res;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+
+ *pskb = skb;
+
+ port = team_port_get_rcu(skb->dev);
+ team = port->team;
+
+ res = team->ops.receive(team, port, skb);
+ if (res == RX_HANDLER_ANOTHER) {
+ struct team_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ pcpu_stats->rx_multicast++;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->dev = team->dev;
+ } else {
+ this_cpu_inc(team->pcpu_stats->rx_dropped);
+ }
+
+ return res;
+}
+
+
+/****************
+ * Port handling
+ ****************/
+
+static bool team_port_find(const struct team *team,
+ const struct team_port *port)
+{
+ struct team_port *cur;
+
+ list_for_each_entry(cur, &team->port_list, list)
+ if (cur == port)
+ return true;
+ return false;
+}
+
+/*
+ * Add/delete port to the team port list. Write guarded by rtnl_lock.
+ * Takes care of correct port->index setup (might be racy).
+ */
+static void team_port_list_add_port(struct team *team,
+ struct team_port *port)
+{
+ port->index = team->port_count++;
+ hlist_add_head_rcu(&port->hlist,
+ team_port_index_hash(team, port->index));
+ list_add_tail_rcu(&port->list, &team->port_list);
+}
+
+static void __reconstruct_port_hlist(struct team *team, int rm_index)
+{
+ int i;
+ struct team_port *port;
+
+ for (i = rm_index + 1; i < team->port_count; i++) {
+ port = team_get_port_by_index(team, i);
+ hlist_del_rcu(&port->hlist);
+ port->index--;
+ hlist_add_head_rcu(&port->hlist,
+ team_port_index_hash(team, port->index));
+ }
+}
+
+static void team_port_list_del_port(struct team *team,
+ struct team_port *port)
+{
+ int rm_index = port->index;
+
+ hlist_del_rcu(&port->hlist);
+ list_del_rcu(&port->list);
+ __reconstruct_port_hlist(team, rm_index);
+ team->port_count--;
+}
+
+#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+static void __team_compute_features(struct team *team)
+{
+ struct team_port *port;
+ u32 vlan_features = TEAM_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ vlan_features = netdev_increment_features(vlan_features,
+ port->dev->vlan_features,
+ TEAM_VLAN_FEATURES);
+
+ if (port->dev->hard_header_len > max_hard_header_len)
+ max_hard_header_len = port->dev->hard_header_len;
+ }
+
+ team->dev->vlan_features = vlan_features;
+ team->dev->hard_header_len = max_hard_header_len;
+
+ netdev_change_features(team->dev);
+}
+
+static void team_compute_features(struct team *team)
+{
+ mutex_lock(&team->lock);
+ __team_compute_features(team);
+ mutex_unlock(&team->lock);
+}
+
+static int team_port_enter(struct team *team, struct team_port *port)
+{
+ int err = 0;
+
+ dev_hold(team->dev);
+ port->dev->priv_flags |= IFF_TEAM_PORT;
+ if (team->ops.port_enter) {
+ err = team->ops.port_enter(team, port);
+ if (err) {
+ netdev_err(team->dev, "Device %s failed to enter team mode\n",
+ port->dev->name);
+ goto err_port_enter;
+ }
+ }
+
+ return 0;
+
+err_port_enter:
+ port->dev->priv_flags &= ~IFF_TEAM_PORT;
+ dev_put(team->dev);
+
+ return err;
+}
+
+static void team_port_leave(struct team *team, struct team_port *port)
+{
+ if (team->ops.port_leave)
+ team->ops.port_leave(team, port);
+ port->dev->priv_flags &= ~IFF_TEAM_PORT;
+ dev_put(team->dev);
+}
+
+static void __team_port_change_check(struct team_port *port, bool linkup);
+
+static int team_port_add(struct team *team, struct net_device *port_dev)
+{
+ struct net_device *dev = team->dev;
+ struct team_port *port;
+ char *portname = port_dev->name;
+ int err;
+
+ if (port_dev->flags & IFF_LOOPBACK ||
+ port_dev->type != ARPHRD_ETHER) {
+ netdev_err(dev, "Device %s is of an unsupported type\n",
+ portname);
+ return -EINVAL;
+ }
+
+ if (team_port_exists(port_dev)) {
+ netdev_err(dev, "Device %s is already a port "
+ "of a team device\n", portname);
+ return -EBUSY;
+ }
+
+ if (port_dev->flags & IFF_UP) {
+ netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
+ portname);
+ return -EBUSY;
+ }
+
+ port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = port_dev;
+ port->team = team;
+
+ port->orig.mtu = port_dev->mtu;
+ err = dev_set_mtu(port_dev, dev->mtu);
+ if (err) {
+ netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+ goto err_set_mtu;
+ }
+
+ memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+
+ err = team_port_enter(team, port);
+ if (err) {
+ netdev_err(dev, "Device %s failed to enter team mode\n",
+ portname);
+ goto err_port_enter;
+ }
+
+ err = dev_open(port_dev);
+ if (err) {
+ netdev_dbg(dev, "Device %s opening failed\n",
+ portname);
+ goto err_dev_open;
+ }
+
+ err = vlan_vids_add_by_dev(port_dev, dev);
+ if (err) {
+ netdev_err(dev, "Failed to add vlan ids to device %s\n",
+ portname);
+ goto err_vids_add;
+ }
+
+ err = netdev_set_master(port_dev, dev);
+ if (err) {
+ netdev_err(dev, "Device %s failed to set master\n", portname);
+ goto err_set_master;
+ }
+
+ err = netdev_rx_handler_register(port_dev, team_handle_frame,
+ port);
+ if (err) {
+ netdev_err(dev, "Device %s failed to register rx_handler\n",
+ portname);
+ goto err_handler_register;
+ }
+
+ team_port_list_add_port(team, port);
+ team_adjust_ops(team);
+ __team_compute_features(team);
+ __team_port_change_check(port, !!netif_carrier_ok(port_dev));
+
+ netdev_info(dev, "Port device %s added\n", portname);
+
+ return 0;
+
+err_handler_register:
+ netdev_set_master(port_dev, NULL);
+
+err_set_master:
+ vlan_vids_del_by_dev(port_dev, dev);
+
+err_vids_add:
+ dev_close(port_dev);
+
+err_dev_open:
+ team_port_leave(team, port);
+ team_port_set_orig_mac(port);
+
+err_port_enter:
+ dev_set_mtu(port_dev, port->orig.mtu);
+
+err_set_mtu:
+ kfree(port);
+
+ return err;
+}
+
+static int team_port_del(struct team *team, struct net_device *port_dev)
+{
+ struct net_device *dev = team->dev;
+ struct team_port *port;
+ char *portname = port_dev->name;
+
+ port = team_port_get_rtnl(port_dev);
+ if (!port || !team_port_find(team, port)) {
+ netdev_err(dev, "Device %s does not act as a port of this team\n",
+ portname);
+ return -ENOENT;
+ }
+
+ __team_port_change_check(port, false);
+ team_port_list_del_port(team, port);
+ team_adjust_ops(team);
+ netdev_rx_handler_unregister(port_dev);
+ netdev_set_master(port_dev, NULL);
+ vlan_vids_del_by_dev(port_dev, dev);
+ dev_close(port_dev);
+ team_port_leave(team, port);
+ team_port_set_orig_mac(port);
+ dev_set_mtu(port_dev, port->orig.mtu);
+ synchronize_rcu();
+ kfree(port);
+ netdev_info(dev, "Port device %s removed\n", portname);
+ __team_compute_features(team);
+
+ return 0;
+}
+
+
+/*****************
+ * Net device ops
+ *****************/
+
+static const char team_no_mode_kind[] = "*NOMODE*";
+
+static int team_mode_option_get(struct team *team, void *arg)
+{
+ const char **str = arg;
+
+ *str = team->mode ? team->mode->kind : team_no_mode_kind;
+ return 0;
+}
+
+static int team_mode_option_set(struct team *team, void *arg)
+{
+ const char **str = arg;
+
+ return team_change_mode(team, *str);
+}
+
+static const struct team_option team_options[] = {
+ {
+ .name = "mode",
+ .type = TEAM_OPTION_TYPE_STRING,
+ .getter = team_mode_option_get,
+ .setter = team_mode_option_set,
+ },
+};
+
+static int team_init(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ int i;
+ int err;
+
+ team->dev = dev;
+ mutex_init(&team->lock);
+
+ team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ if (!team->pcpu_stats)
+ return -ENOMEM;
+
+ for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
+ INIT_HLIST_HEAD(&team->port_hlist[i]);
+ INIT_LIST_HEAD(&team->port_list);
+
+ team_adjust_ops(team);
+
+ INIT_LIST_HEAD(&team->option_list);
+ err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
+ if (err)
+ goto err_options_register;
+ netif_carrier_off(dev);
+
+ return 0;
+
+err_options_register:
+ free_percpu(team->pcpu_stats);
+
+ return err;
+}
+
+static void team_uninit(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ struct team_port *tmp;
+
+ mutex_lock(&team->lock);
+ list_for_each_entry_safe(port, tmp, &team->port_list, list)
+ team_port_del(team, port->dev);
+
+ __team_change_mode(team, NULL); /* cleanup */
+ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ mutex_unlock(&team->lock);
+}
+
+static void team_destructor(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+
+ free_percpu(team->pcpu_stats);
+ free_netdev(dev);
+}
+
+static int team_open(struct net_device *dev)
+{
+ netif_carrier_on(dev);
+ return 0;
+}
+
+static int team_close(struct net_device *dev)
+{
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ bool tx_success = false;
+ unsigned int len = skb->len;
+
+ tx_success = team->ops.transmit(team, skb);
+ if (tx_success) {
+ struct team_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(team->pcpu_stats->tx_dropped);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void team_change_rx_flags(struct net_device *dev, int change)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int inc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (change & IFF_PROMISC) {
+ inc = dev->flags & IFF_PROMISC ? 1 : -1;
+ dev_set_promiscuity(port->dev, inc);
+ }
+ if (change & IFF_ALLMULTI) {
+ inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
+ dev_set_allmulti(port->dev, inc);
+ }
+ }
+ rcu_read_unlock();
+}
+
+static void team_set_rx_mode(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ dev_uc_sync(port->dev, dev);
+ dev_mc_sync(port->dev, dev);
+ }
+ rcu_read_unlock();
+}
+
+static int team_set_mac_address(struct net_device *dev, void *p)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ if (team->ops.port_change_mac)
+ team->ops.port_change_mac(team, port);
+ rcu_read_unlock();
+ return 0;
+}
+
+static int team_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = dev_set_mtu(port->dev, new_mtu);
+ if (err) {
+ netdev_err(dev, "Device %s failed to change mtu",
+ port->dev->name);
+ goto unwind;
+ }
+ }
+ mutex_unlock(&team->lock);
+
+ dev->mtu = new_mtu;
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ dev_set_mtu(port->dev, dev->mtu);
+ mutex_unlock(&team->lock);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+ u32 rx_dropped = 0, tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(team->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ rx_multicast = p->rx_multicast;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->multicast += rx_multicast;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /*
+ * rx_dropped & tx_dropped are u32, updated
+ * without syncp protection.
+ */
+ rx_dropped += p->rx_dropped;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = vlan_vid_add(port->dev, vid);
+ if (err)
+ goto unwind;
+ }
+ mutex_unlock(&team->lock);
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ mutex_unlock(&team->lock);
+
+ return err;
+}
+
+static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&team->lock);
+ err = team_port_add(team, port_dev);
+ mutex_unlock(&team->lock);
+ return err;
+}
+
+static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&team->lock);
+ err = team_port_del(team, port_dev);
+ mutex_unlock(&team->lock);
+ return err;
+}
+
+static netdev_features_t team_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct team_port *port;
+ struct team *team = netdev_priv(dev);
+ netdev_features_t mask;
+
+ mask = features;
+ features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+ }
+ rcu_read_unlock();
+ return features;
+}
+
+static const struct net_device_ops team_netdev_ops = {
+ .ndo_init = team_init,
+ .ndo_uninit = team_uninit,
+ .ndo_open = team_open,
+ .ndo_stop = team_close,
+ .ndo_start_xmit = team_xmit,
+ .ndo_change_rx_flags = team_change_rx_flags,
+ .ndo_set_rx_mode = team_set_rx_mode,
+ .ndo_set_mac_address = team_set_mac_address,
+ .ndo_change_mtu = team_change_mtu,
+ .ndo_get_stats64 = team_get_stats64,
+ .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
+ .ndo_add_slave = team_add_slave,
+ .ndo_del_slave = team_del_slave,
+ .ndo_fix_features = team_fix_features,
+};
+
+
+/***********************
+ * rt netlink interface
+ ***********************/
+
+static void team_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &team_netdev_ops;
+ dev->destructor = team_destructor;
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_MULTICAST;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+
+ /*
+ * Indicate we support unicast address filtering. That way core won't
+ * bring us to promisc mode in case a unicast addr is added.
+ * Let this up to underlay drivers.
+ */
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_GRO;
+ dev->hw_features = NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ dev->features |= dev->hw_features;
+}
+
+static int team_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int err;
+
+ if (tb[IFLA_ADDRESS] == NULL)
+ random_ether_addr(dev->dev_addr);
+
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int team_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static struct rtnl_link_ops team_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct team),
+ .setup = team_setup,
+ .newlink = team_newlink,
+ .validate = team_validate,
+};
+
+
+/***********************************
+ * Generic netlink custom interface
+ ***********************************/
+
+static struct genl_family team_nl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = TEAM_GENL_NAME,
+ .version = TEAM_GENL_VERSION,
+ .maxattr = TEAM_ATTR_MAX,
+ .netnsok = true,
+};
+
+static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
+ [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
+ [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
+ [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
+ [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TEAM_ATTR_OPTION_NAME] = {
+ .type = NLA_STRING,
+ .len = TEAM_STRING_MAX_LEN,
+ },
+ [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
+ [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
+ [TEAM_ATTR_OPTION_DATA] = {
+ .type = NLA_BINARY,
+ .len = TEAM_STRING_MAX_LEN,
+ },
+};
+
+static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ &team_nl_family, 0, TEAM_CMD_NOOP);
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto err_msg_put;
+ }
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_msg_put:
+ nlmsg_free(msg);
+
+ return err;
+}
+
+/*
+ * Netlink cmd functions should be locked by following two functions.
+ * Since dev gets held here, that ensures dev won't disappear in between.
+ */
+static struct team *team_nl_team_get(struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ int ifindex;
+ struct net_device *dev;
+ struct team *team;
+
+ if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
+ return NULL;
+
+ ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
+ dev = dev_get_by_index(net, ifindex);
+ if (!dev || dev->netdev_ops != &team_netdev_ops) {
+ if (dev)
+ dev_put(dev);
+ return NULL;
+ }
+
+ team = netdev_priv(dev);
+ mutex_lock(&team->lock);
+ return team;
+}
+
+static void team_nl_team_put(struct team *team)
+{
+ mutex_unlock(&team->lock);
+ dev_put(team->dev);
+}
+
+static int team_nl_send_generic(struct genl_info *info, struct team *team,
+ int (*fill_func)(struct sk_buff *skb,
+ struct genl_info *info,
+ int flags, struct team *team))
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = fill_func(skb, info, NLM_F_ACK, team);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_fill_options_get_changed(struct sk_buff *skb,
+ u32 pid, u32 seq, int flags,
+ struct team *team,
+ struct team_option *changed_option)
+{
+ struct nlattr *option_list;
+ void *hdr;
+ struct team_option *option;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ TEAM_CMD_OPTIONS_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+ option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+ if (!option_list)
+ return -EMSGSIZE;
+
+ list_for_each_entry(option, &team->option_list, list) {
+ struct nlattr *option_item;
+ long arg;
+
+ option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ if (!option_item)
+ goto nla_put_failure;
+ NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
+ if (option == changed_option)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
+ switch (option->type) {
+ case TEAM_OPTION_TYPE_U32:
+ NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
+ team_option_get(team, option, &arg);
+ NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
+ team_option_get(team, option, &arg);
+ NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
+ (char *) arg);
+ break;
+ default:
+ BUG();
+ }
+ nla_nest_end(skb, option_item);
+ }
+
+ nla_nest_end(skb, option_list);
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int team_nl_fill_options_get(struct sk_buff *skb,
+ struct genl_info *info, int flags,
+ struct team *team)
+{
+ return team_nl_fill_options_get_changed(skb, info->snd_pid,
+ info->snd_seq, NLM_F_ACK,
+ team, NULL);
+}
+
+static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct team *team;
+ int err;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = team_nl_send_generic(info, team, team_nl_fill_options_get);
+
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct team *team;
+ int err = 0;
+ int i;
+ struct nlattr *nl_option;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = -EINVAL;
+ if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
+ err = -EINVAL;
+ goto team_put;
+ }
+
+ nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
+ struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+ enum team_option_type opt_type;
+ struct team_option *option;
+ char *opt_name;
+ bool opt_found = false;
+
+ if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
+ err = -EINVAL;
+ goto team_put;
+ }
+ err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+ nl_option, team_nl_option_policy);
+ if (err)
+ goto team_put;
+ if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
+ !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
+ !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+ err = -EINVAL;
+ goto team_put;
+ }
+ switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+ case NLA_U32:
+ opt_type = TEAM_OPTION_TYPE_U32;
+ break;
+ case NLA_STRING:
+ opt_type = TEAM_OPTION_TYPE_STRING;
+ break;
+ default:
+ goto team_put;
+ }
+
+ opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
+ list_for_each_entry(option, &team->option_list, list) {
+ long arg;
+ struct nlattr *opt_data_attr;
+
+ if (option->type != opt_type ||
+ strcmp(option->name, opt_name))
+ continue;
+ opt_found = true;
+ opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+ switch (opt_type) {
+ case TEAM_OPTION_TYPE_U32:
+ arg = nla_get_u32(opt_data_attr);
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ arg = (long) nla_data(opt_data_attr);
+ break;
+ default:
+ BUG();
+ }
+ err = team_option_set(team, option, &arg);
+ if (err)
+ goto team_put;
+ }
+ if (!opt_found) {
+ err = -ENOENT;
+ goto team_put;
+ }
+ }
+
+team_put:
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
+ u32 pid, u32 seq, int flags,
+ struct team *team,
+ struct team_port *changed_port)
+{
+ struct nlattr *port_list;
+ void *hdr;
+ struct team_port *port;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ TEAM_CMD_PORT_LIST_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+ port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+ if (!port_list)
+ return -EMSGSIZE;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ struct nlattr *port_item;
+
+ port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+ if (!port_item)
+ goto nla_put_failure;
+ NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+ if (port == changed_port)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+ if (port->linkup)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
+ NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
+ NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+ nla_nest_end(skb, port_item);
+ }
+
+ nla_nest_end(skb, port_list);
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int team_nl_fill_port_list_get(struct sk_buff *skb,
+ struct genl_info *info, int flags,
+ struct team *team)
+{
+ return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
+ info->snd_seq, NLM_F_ACK,
+ team, NULL);
+}
+
+static int team_nl_cmd_port_list_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct team *team;
+ int err;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
+
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static struct genl_ops team_nl_ops[] = {
+ {
+ .cmd = TEAM_CMD_NOOP,
+ .doit = team_nl_cmd_noop,
+ .policy = team_nl_policy,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_SET,
+ .doit = team_nl_cmd_options_set,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_GET,
+ .doit = team_nl_cmd_options_get,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_PORT_LIST_GET,
+ .doit = team_nl_cmd_port_list_get,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static struct genl_multicast_group team_change_event_mcgrp = {
+ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+};
+
+static int team_nl_send_event_options_get(struct team *team,
+ struct team_option *changed_option)
+{
+ struct sk_buff *skb;
+ int err;
+ struct net *net = dev_net(team->dev);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
+ changed_option);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+ GFP_KERNEL);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_send_event_port_list_get(struct team_port *port)
+{
+ struct sk_buff *skb;
+ int err;
+ struct net *net = dev_net(port->team->dev);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
+ port->team, port);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+ GFP_KERNEL);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_init(void)
+{
+ int err;
+
+ err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
+ ARRAY_SIZE(team_nl_ops));
+ if (err)
+ return err;
+
+ err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
+ if (err)
+ goto err_change_event_grp_reg;
+
+ return 0;
+
+err_change_event_grp_reg:
+ genl_unregister_family(&team_nl_family);
+
+ return err;
+}
+
+static void team_nl_fini(void)
+{
+ genl_unregister_family(&team_nl_family);
+}
+
+
+/******************
+ * Change checkers
+ ******************/
+
+static void __team_options_change_check(struct team *team,
+ struct team_option *changed_option)
+{
+ int err;
+
+ err = team_nl_send_event_options_get(team, changed_option);
+ if (err)
+ netdev_warn(team->dev, "Failed to send options change via netlink\n");
+}
+
+/* rtnl lock is held */
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+ int err;
+
+ if (port->linkup == linkup)
+ return;
+
+ port->linkup = linkup;
+ if (linkup) {
+ struct ethtool_cmd ecmd;
+
+ err = __ethtool_get_settings(port->dev, &ecmd);
+ if (!err) {
+ port->speed = ethtool_cmd_speed(&ecmd);
+ port->duplex = ecmd.duplex;
+ goto send_event;
+ }
+ }
+ port->speed = 0;
+ port->duplex = 0;
+
+send_event:
+ err = team_nl_send_event_port_list_get(port);
+ if (err)
+ netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
+ port->dev->name);
+
+}
+
+static void team_port_change_check(struct team_port *port, bool linkup)
+{
+ struct team *team = port->team;
+
+ mutex_lock(&team->lock);
+ __team_port_change_check(port, linkup);
+ mutex_unlock(&team->lock);
+}
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static int team_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct team_port *port;
+
+ port = team_port_get_rtnl(dev);
+ if (!port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (netif_carrier_ok(dev))
+ team_port_change_check(port, true);
+ case NETDEV_DOWN:
+ team_port_change_check(port, false);
+ case NETDEV_CHANGE:
+ if (netif_running(port->dev))
+ team_port_change_check(port,
+ !!netif_carrier_ok(port->dev));
+ break;
+ case NETDEV_UNREGISTER:
+ team_del_slave(port->team->dev, dev);
+ break;
+ case NETDEV_FEAT_CHANGE:
+ team_compute_features(port->team);
+ break;
+ case NETDEV_CHANGEMTU:
+ /* Forbid to change mtu of underlaying device */
+ return NOTIFY_BAD;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid to change type of underlaying device */
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block team_notifier_block __read_mostly = {
+ .notifier_call = team_device_event,
+};
+
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init team_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&team_notifier_block);
+
+ err = rtnl_link_register(&team_link_ops);
+ if (err)
+ goto err_rtnl_reg;
+
+ err = team_nl_init();
+ if (err)
+ goto err_nl_init;
+
+ return 0;
+
+err_nl_init:
+ rtnl_link_unregister(&team_link_ops);
+
+err_rtnl_reg:
+ unregister_netdevice_notifier(&team_notifier_block);
+
+ return err;
+}
+
+static void __exit team_module_exit(void)
+{
+ team_nl_fini();
+ rtnl_link_unregister(&team_link_ops);
+ unregister_netdevice_notifier(&team_notifier_block);
+}
+
+module_init(team_module_init);
+module_exit(team_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Ethernet team device driver");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
new file mode 100644
index 00000000000..f4d960e82e2
--- /dev/null
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -0,0 +1,136 @@
+/*
+ * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/if_team.h>
+
+struct ab_priv {
+ struct team_port __rcu *active_port;
+};
+
+static struct ab_priv *ab_priv(struct team *team)
+{
+ return (struct ab_priv *) &team->mode_priv;
+}
+
+static rx_handler_result_t ab_receive(struct team *team, struct team_port *port,
+ struct sk_buff *skb) {
+ struct team_port *active_port;
+
+ active_port = rcu_dereference(ab_priv(team)->active_port);
+ if (active_port != port)
+ return RX_HANDLER_EXACT;
+ return RX_HANDLER_ANOTHER;
+}
+
+static bool ab_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *active_port;
+
+ active_port = rcu_dereference(ab_priv(team)->active_port);
+ if (unlikely(!active_port))
+ goto drop;
+ skb->dev = active_port->dev;
+ if (dev_queue_xmit(skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static void ab_port_leave(struct team *team, struct team_port *port)
+{
+ if (ab_priv(team)->active_port == port)
+ RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+}
+
+static int ab_active_port_get(struct team *team, void *arg)
+{
+ u32 *ifindex = arg;
+
+ *ifindex = 0;
+ if (ab_priv(team)->active_port)
+ *ifindex = ab_priv(team)->active_port->dev->ifindex;
+ return 0;
+}
+
+static int ab_active_port_set(struct team *team, void *arg)
+{
+ u32 *ifindex = arg;
+ struct team_port *port;
+
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (port->dev->ifindex == *ifindex) {
+ rcu_assign_pointer(ab_priv(team)->active_port, port);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static const struct team_option ab_options[] = {
+ {
+ .name = "activeport",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = ab_active_port_get,
+ .setter = ab_active_port_set,
+ },
+};
+
+int ab_init(struct team *team)
+{
+ return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+void ab_exit(struct team *team)
+{
+ team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+static const struct team_mode_ops ab_mode_ops = {
+ .init = ab_init,
+ .exit = ab_exit,
+ .receive = ab_receive,
+ .transmit = ab_transmit,
+ .port_leave = ab_port_leave,
+};
+
+static struct team_mode ab_mode = {
+ .kind = "activebackup",
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct ab_priv),
+ .ops = &ab_mode_ops,
+};
+
+static int __init ab_init_module(void)
+{
+ return team_mode_register(&ab_mode);
+}
+
+static void __exit ab_cleanup_module(void)
+{
+ team_mode_unregister(&ab_mode);
+}
+
+module_init(ab_init_module);
+module_exit(ab_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Active-backup mode for team");
+MODULE_ALIAS("team-mode-activebackup");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
new file mode 100644
index 00000000000..a0e8f806331
--- /dev/null
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -0,0 +1,107 @@
+/*
+ * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+struct rr_priv {
+ unsigned int sent_packets;
+};
+
+static struct rr_priv *rr_priv(struct team *team)
+{
+ return (struct rr_priv *) &team->mode_priv;
+}
+
+static struct team_port *__get_first_port_up(struct team *team,
+ struct team_port *port)
+{
+ struct team_port *cur;
+
+ if (port->linkup)
+ return port;
+ cur = port;
+ list_for_each_entry_continue_rcu(cur, &team->port_list, list)
+ if (cur->linkup)
+ return cur;
+ list_for_each_entry_rcu(cur, &team->port_list, list) {
+ if (cur == port)
+ break;
+ if (cur->linkup)
+ return cur;
+ }
+ return NULL;
+}
+
+static bool rr_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *port;
+ int port_index;
+
+ port_index = rr_priv(team)->sent_packets++ % team->port_count;
+ port = team_get_port_by_index_rcu(team, port_index);
+ port = __get_first_port_up(team, port);
+ if (unlikely(!port))
+ goto drop;
+ skb->dev = port->dev;
+ if (dev_queue_xmit(skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static int rr_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_mac(port);
+}
+
+static void rr_port_change_mac(struct team *team, struct team_port *port)
+{
+ team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops rr_mode_ops = {
+ .transmit = rr_transmit,
+ .port_enter = rr_port_enter,
+ .port_change_mac = rr_port_change_mac,
+};
+
+static struct team_mode rr_mode = {
+ .kind = "roundrobin",
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct rr_priv),
+ .ops = &rr_mode_ops,
+};
+
+static int __init rr_init_module(void)
+{
+ return team_mode_register(&rr_mode);
+}
+
+static void __exit rr_cleanup_module(void)
+{
+ team_mode_unregister(&rr_mode);
+}
+
+module_init(rr_init_module);
+module_exit(rr_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Round-robin mode for team");
+MODULE_ALIAS("team-mode-roundrobin");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7bea9c65119..93c5d72711b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -123,7 +123,7 @@ struct tun_struct {
gid_t group;
struct net_device *dev;
- u32 set_features;
+ netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
struct fasync_struct *fasync;
@@ -454,7 +454,8 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static u32 tun_net_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tun_net_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -1196,7 +1197,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
* privs required. */
static int set_offload(struct tun_struct *tun, unsigned long arg)
{
- u32 features = 0;
+ netdev_features_t features = 0;
if (arg & TUN_F_CSUM) {
features |= NETIF_F_HW_CSUM;
@@ -1589,16 +1590,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tun_struct *tun = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case TUN_TUN_DEV:
- strcpy(info->bus_info, "tun");
+ strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case TUN_TAP_DEV:
- strcpy(info->bus_info, "tap");
+ strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e81e22e3d1d..dbdca225b84 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,7 +36,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
-#define DRIVER_VERSION "26-Sep-2011"
+#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -163,7 +163,7 @@
#define MARVELL_CTRL_TXDELAY 0x0002
#define MARVELL_CTRL_RXDELAY 0x0080
-#define PHY_MODE_RTL8211CL 0x0004
+#define PHY_MODE_RTL8211CL 0x000C
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
@@ -652,9 +652,17 @@ static u32 asix_get_phyid(struct usbnet *dev)
{
int phy_reg;
u32 phy_id;
+ int i;
- phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
- if (phy_reg < 0)
+ /* Poll for the rare case the FW or phy isn't ready yet. */
+ for (i = 0; i < 100; i++) {
+ phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+ if (phy_reg != 0 && phy_reg != 0xFFFF)
+ break;
+ mdelay(1);
+ }
+
+ if (phy_reg <= 0 || phy_reg == 0xFFFF)
return 0;
phy_id = (phy_reg & 0xffff) << 16;
@@ -681,6 +689,10 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
}
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = 0;
+ if (opt & AX_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & AX_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
}
static int
@@ -1075,7 +1087,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret;
+ int ret, embd_phy;
struct asix_data *data = (struct asix_data *)&dev->data;
u8 buf[ETH_ALEN];
u32 phyid;
@@ -1100,16 +1112,36 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = asix_get_phy_addr(dev);
- phyid = asix_get_phyid(dev);
- dbg("PHYID=0x%08x", phyid);
-
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
- ret = ax88772_reset(dev);
+ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
+
+ /* Reset the PHY to normal operation mode */
+ ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ dbg("Select PHY #1 failed: %d", ret);
+ return ret;
+ }
+
+ ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
+ if (ret < 0)
+ return ret;
+
+ msleep(150);
+
+ ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
if (ret < 0)
return ret;
+ msleep(150);
+
+ ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
+
+ /* Read PHYID register *AFTER* the PHY was reset properly */
+ phyid = asix_get_phyid(dev);
+ dbg("PHYID=0x%08x", phyid);
+
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
@@ -1220,6 +1252,7 @@ static int ax88178_reset(struct usbnet *dev)
__le16 eeprom;
u8 status;
int gpio0 = 0;
+ u32 phyid;
asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
dbg("GPIO Status: 0x%04x", status);
@@ -1235,12 +1268,13 @@ static int ax88178_reset(struct usbnet *dev)
data->ledmode = 0;
gpio0 = 1;
} else {
- data->phymode = le16_to_cpu(eeprom) & 7;
+ data->phymode = le16_to_cpu(eeprom) & 0x7F;
data->ledmode = le16_to_cpu(eeprom) >> 8;
gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
}
dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
+ /* Power up external GigaPHY through AX88178 GPIO pin */
asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
if ((le16_to_cpu(eeprom) >> 8) != 1) {
asix_write_gpio(dev, 0x003c, 30);
@@ -1252,6 +1286,13 @@ static int ax88178_reset(struct usbnet *dev)
asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
}
+ /* Read PHYID register *AFTER* powering up PHY */
+ phyid = asix_get_phyid(dev);
+ dbg("PHYID=0x%08x", phyid);
+
+ /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */
+ asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL);
+
asix_sw_reset(dev, 0);
msleep(150);
@@ -1396,7 +1437,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
u8 buf[ETH_ALEN];
- u32 phyid;
struct asix_data *data = (struct asix_data *)&dev->data;
data->eeprom_len = AX88772_EEPROM_LEN;
@@ -1423,12 +1463,12 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88178_netdev_ops;
dev->net->ethtool_ops = &ax88178_ethtool_ops;
- phyid = asix_get_phyid(dev);
- dbg("PHYID=0x%08x", phyid);
+ /* Blink LEDS so users know driver saw dongle */
+ asix_sw_reset(dev, 0);
+ msleep(150);
- ret = ax88178_reset(dev);
- if (ret < 0)
- return ret;
+ asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
+ msleep(150);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
@@ -1619,6 +1659,10 @@ static const struct usb_device_id products [] = {
// ASIX 88772a
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
+}, {
+ // Asus USB Ethernet Adapter
+ USB_DEVICE (0x0b95, 0x7e2b),
+ .driver_info = (unsigned long) &ax88772_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index a60d0069cc4..331e44056f5 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = __netdev_alloc_page(dev, gfp_flags);
+ page = alloc_page(gfp_flags);
if (!page)
return -ENOMEM;
@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
err = usb_submit_urb(req, gfp_flags);
if (unlikely(err)) {
dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
- netdev_free_page(dev, page);
+ put_page(page);
}
return err;
}
@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req)
dev->stats.rx_errors++;
resubmit:
if (page)
- netdev_free_page(dev, page);
+ put_page(page);
if (req)
- rx_submit(pnd, req, GFP_ATOMIC);
+ rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
}
static int usbpn_close(struct net_device *dev);
@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev)
for (i = 0; i < rxq_size; i++) {
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
- if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+ if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
usbpn_close(dev);
return -ENOMEM;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c924ea2bce0..99ed6eb4dfa 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -567,7 +567,7 @@ static const struct usb_device_id products [] = {
{
USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&wwan_info,
+ .driver_info = 0,
},
/*
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f06fb78383a..009dd0f1853 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -465,12 +465,10 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
int temp;
u8 iface_no;
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return -ENODEV;
- memset(ctx, 0, sizeof(*ctx));
-
init_timer(&ctx->tx_timer);
spin_lock_init(&ctx->mtx);
ctx->netdev = dev->net;
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index d43db32f947..9c26c6390d6 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -144,10 +144,11 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
frame = (struct vl600_frame_hdr *) buf->data;
- /* NOTE: Should check that frame->magic == 0x53544448?
- * Otherwise if we receive garbage at the beginning of the frame
- * we may end up allocating a huge buffer and saving all the
- * future incoming data into it. */
+ /* Yes, check that frame->magic == 0x53544448 (or 0x44544d48),
+ * otherwise we may run out of memory w/a bad packet */
+ if (ntohl(frame->magic) != 0x53544448 &&
+ ntohl(frame->magic) != 0x44544d48)
+ goto error;
if (buf->len < sizeof(*frame) ||
buf->len != le32_to_cpup(&frame->len)) {
@@ -296,6 +297,11 @@ encapsulate:
* overwrite the remaining fields.
*/
packet = (struct vl600_pkt_hdr *) skb->data;
+ /* The VL600 wants IPv6 packets to have an IPv4 ethertype
+ * Since this modem only supports IPv4 and IPv6, just set all
+ * frames to 0x0800 (ETH_P_IP)
+ */
+ packet->h_proto = htons(ETH_P_IP);
memset(&packet->dummy, 0, sizeof(packet->dummy));
packet->len = cpu_to_le32(orig_len);
@@ -308,21 +314,12 @@ encapsulate:
if (skb->len < full_len) /* Pad */
skb_put(skb, full_len - skb->len);
- /* The VL600 wants IPv6 packets to have an IPv4 ethertype
- * Check if this is an IPv6 packet, and set the ethertype
- * to 0x800
- */
- if ((skb->data[sizeof(struct vl600_pkt_hdr *) + 0x22] & 0xf0) == 0x60) {
- skb->data[sizeof(struct vl600_pkt_hdr *) + 0x20] = 0x08;
- skb->data[sizeof(struct vl600_pkt_hdr *) + 0x21] = 0;
- }
-
return skb;
}
static const struct driver_info vl600_info = {
.description = "LG VL600 modem",
- .flags = FLAG_ETHER | FLAG_RX_ASSEMBLE,
+ .flags = FLAG_RX_ASSEMBLE | FLAG_WWAN,
.bind = vl600_bind,
.unbind = vl600_unbind,
.status = usbnet_cdc_status,
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 769f5090bda..5d99b8cacd7 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -55,8 +55,8 @@ static const char driver_name[] = "pegasus";
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
-static int loopback;
-static int mii_mode;
+static bool loopback;
+static bool mii_mode;
static char *devid;
static struct usb_eth_dev usb_dev_id[] = {
@@ -517,7 +517,7 @@ static inline int reset_mac(pegasus_t *pegasus)
for (i = 0; i < REG_TIMEOUT; i++) {
get_registers(pegasus, EthCtrl1, 1, &data);
if (~data & 0x08) {
- if (loopback & 1)
+ if (loopback)
break;
if (mii_mode && (pegasus->features & HAS_HOME_PNA))
set_register(pegasus, Gpio1, 0x34);
@@ -561,7 +561,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
data[1] |= 0x10; /* set 100 Mbps */
if (mii_mode)
data[1] = 0;
- data[2] = (loopback & 1) ? 0x09 : 0x01;
+ data[2] = loopback ? 0x09 : 0x01;
memcpy(pegasus->eth_regs, data, sizeof(data));
ret = set_registers(pegasus, EthCtrl0, 3, data);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 22a7cf951e7..0d5da82f0ff 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -51,6 +51,7 @@
#define USB_VENDOR_ID_SMSC (0x0424)
#define USB_PRODUCT_ID_LAN7500 (0x7500)
#define USB_PRODUCT_ID_LAN7505 (0x7505)
+#define RXW_PADDING 2
#define check_warn(ret, fmt, args...) \
({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); })
@@ -75,7 +76,7 @@ struct usb_context {
struct usbnet *dev;
};
-static int turbo_mode = true;
+static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -727,7 +728,8 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
}
/* Enable or disable Rx checksum offload engine */
-static int smsc75xx_set_features(struct net_device *netdev, u32 features)
+static int smsc75xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct usbnet *dev = netdev_priv(netdev);
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -1088,13 +1090,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
le32_to_cpus(&rx_cmd_b);
- skb_pull(skb, 4 + NET_IP_ALIGN);
+ skb_pull(skb, 4 + RXW_PADDING);
packet = skb->data;
/* get the packet length */
- size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN;
- align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
+ size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
+ align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
netif_dbg(dev, rx_err, dev->net,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index eff67678c5a..db217ad66f2 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -59,7 +59,7 @@ struct usb_context {
struct usbnet *dev;
};
-static int turbo_mode = true;
+static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -516,7 +516,8 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
}
/* Enable or disable Tx & Rx checksum offload engines */
-static int smsc95xx_set_features(struct net_device *netdev, u32 features)
+static int smsc95xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct usbnet *dev = netdev_priv(netdev);
u32 read_buf;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ef883e97cee..49f4667e1fa 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -27,8 +27,8 @@
struct veth_net_stats {
u64 rx_packets;
- u64 tx_packets;
u64 rx_bytes;
+ u64 tx_packets;
u64 tx_bytes;
u64 rx_dropped;
struct u64_stats_sync syncp;
@@ -66,9 +66,8 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -271,7 +270,7 @@ static void veth_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->destructor = veth_dev_free;
- dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6ee8410443c..2055386eda5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,7 +30,7 @@
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
@@ -39,6 +39,7 @@ module_param(gso, bool, 0444);
#define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
struct u64_stats_sync syncp;
@@ -439,7 +440,13 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
int err;
@@ -501,7 +508,7 @@ static void refill_work(struct work_struct *work)
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
+ queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -520,7 +527,7 @@ again:
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
}
/* Out of packets? */
@@ -699,6 +706,7 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
}
tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
@@ -719,6 +727,10 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure we have some buffers: if oom use wq. */
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
virtnet_napi_enable(vi);
return 0;
}
@@ -772,6 +784,8 @@ static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure refill_work doesn't re-enable napi! */
+ cancel_delayed_work_sync(&vi->refill);
napi_disable(&vi->napi);
return 0;
@@ -853,7 +867,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
kfree(buf);
}
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
@@ -863,9 +877,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+ return 0;
}
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
@@ -875,6 +890,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+ return 0;
}
static void virtnet_get_ringparam(struct net_device *dev,
@@ -889,7 +905,21 @@ static void virtnet_get_ringparam(struct net_device *dev,
}
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct virtio_device *vdev = vi->vdev;
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
+ .get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
};
@@ -1082,7 +1112,6 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
- cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
free_stats:
@@ -1121,9 +1150,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
-
unregister_netdev(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d96bfb1ac20..de7fc345148 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1926,7 +1926,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
}
-static void
+static int
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1943,10 +1943,12 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void
+static int
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1963,6 +1965,8 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
}
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
@@ -2163,7 +2167,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = i % adapter->num_rx_queues;
+ rssConf->indTable[i] = ethtool_rxfh_indir_default(
+ i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
devRead->rssConfDesc.confLen = sizeof(*rssConf);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index e662cbc8bfb..a3eb75a62ea 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -202,14 +202,9 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
- drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
- drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
-
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
ETHTOOL_BUSINFO_LEN);
@@ -262,11 +257,11 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
}
}
-int vmxnet3_set_features(struct net_device *netdev, u32 features)
+int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
if (features & NETIF_F_RXCSUM)
@@ -570,44 +565,38 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
}
#ifdef VMXNET3_RSS
+static u32
+vmxnet3_get_rss_indir_size(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ return rssConf->indTableSize;
+}
+
static int
-vmxnet3_get_rss_indir(struct net_device *netdev,
- struct ethtool_rxfh_indir *p)
+vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+ unsigned int n = rssConf->indTableSize;
- p->size = rssConf->indTableSize;
while (n--)
- p->ring_index[n] = rssConf->indTable[n];
+ p[n] = rssConf->indTable[n];
return 0;
}
static int
-vmxnet3_set_rss_indir(struct net_device *netdev,
- const struct ethtool_rxfh_indir *p)
+vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
{
unsigned int i;
unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- if (p->size != rssConf->indTableSize)
- return -EINVAL;
- for (i = 0; i < rssConf->indTableSize; i++) {
- /*
- * Return with error code if any of the queue indices
- * is out of range
- */
- if (p->ring_index[i] < 0 ||
- p->ring_index[i] >= adapter->num_rx_queues)
- return -EINVAL;
- }
-
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = p->ring_index[i];
+ rssConf->indTable[i] = p[i];
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -634,6 +623,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.set_ringparam = vmxnet3_set_ringparam,
.get_rxnfc = vmxnet3_get_rxnfc,
#ifdef VMXNET3_RSS
+ .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh_indir = vmxnet3_get_rss_indir,
.set_rxfh_indir = vmxnet3_set_rss_indir,
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index b18eac1dcca..ed54797db19 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -401,7 +401,7 @@ void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
int
-vmxnet3_set_features(struct net_device *netdev, u32 features);
+vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 783168cce07..d43f4efd3e0 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -155,7 +155,7 @@ static int emancipate( struct net_device * );
static const char version[] =
"Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
-static int skip_pci_probe __initdata = 0;
+static bool skip_pci_probe __initdata = false;
static int scandone __initdata = 0;
static int num __initdata = 0;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0b4fd05e150..4f774847898 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -362,7 +362,7 @@ static int io=0x238;
static int txdma=1;
static int rxdma=3;
static int irq=5;
-static int slow=0;
+static bool slow=false;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 4b9ecb20dee..f20886ade1c 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -562,7 +562,7 @@ void i2400m_tx_new(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg;
- bool try_head = 0;
+ bool try_head = false;
BUG_ON(i2400m->tx_msg != NULL);
/*
* In certain situations, TX queue might have enough space to
@@ -580,7 +580,7 @@ try_head:
else if (tx_msg == TAIL_FULL) {
i2400m_tx_skip_tail(i2400m);
d_printf(2, dev, "new TX message: tail full, trying head\n");
- try_head = 1;
+ try_head = true;
goto try_head;
}
memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -720,7 +720,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
unsigned long flags;
size_t padded_len;
void *ptr;
- bool try_head = 0;
+ bool try_head = false;
unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
|| pl_type == I2400M_PT_RESET_COLD;
@@ -771,7 +771,7 @@ try_new:
d_printf(2, dev, "pl append: tail full\n");
i2400m_tx_close(i2400m);
i2400m_tx_skip_tail(i2400m);
- try_head = 1;
+ try_head = true;
goto try_new;
} else if (ptr == NULL) { /* All full */
result = -ENOSPC;
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index ac357acfb3e..99ef81b3d5a 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -177,7 +177,6 @@ retry:
static
int i2400mu_txd(void *_i2400mu)
{
- int result = 0;
struct i2400mu *i2400mu = _i2400mu;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
@@ -208,16 +207,14 @@ int i2400mu_txd(void *_i2400mu)
/* Yeah, we ignore errors ... not much we can do */
i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */
- if (result < 0)
- break;
}
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400mu->tx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
- d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
- return result;
+ d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+ return 0;
}
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ac1176a4f46..1c008c61b95 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1418,7 +1418,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
- emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
+ emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
emmh32_final(&context->seed, (u8*)&mic->mic);
/* New Type/length ?????????? */
@@ -1506,7 +1506,7 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));
- emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);
+ emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);
//Calculate MIC
emmh32_final(&context->seed, digest);
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index e564e585b22..c2b2518c2ec 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -914,7 +914,7 @@ enum ath5k_dmasize {
*/
#define AR5K_KEYCACHE_SIZE 8
-extern int ath5k_modparam_nohwcrypt;
+extern bool ath5k_modparam_nohwcrypt;
/***********************\
HW RELATED DEFINITIONS
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 178a4dd1031..d366dadcf86 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -68,15 +68,15 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-int ath5k_modparam_nohwcrypt;
+bool ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static int modparam_all_channels;
+static bool modparam_all_channels;
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
-static int modparam_fastchanswitch;
+static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 4aed3a3ab10..250db40b751 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -1159,7 +1159,7 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
*/
if (fast && (ah->ah_radio != AR5K_RF2413) &&
(ah->ah_radio != AR5K_RF5413))
- fast = 0;
+ fast = false;
/* Disable sleep clock operation
* to avoid register access delay on certain
@@ -1185,7 +1185,7 @@ ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ret && fast) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"DMA didn't stop, falling back to normal reset\n");
- fast = 0;
+ fast = false;
/* Non fatal, just continue with
* normal reset */
ret = 0;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 368ecbd172a..7f55be3092d 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -430,7 +430,7 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
"capabilities (%d) - assuming P2P not "
"supported\n", ret);
- ar->p2p = 0;
+ ar->p2p = false;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index f57084ec49e..619b95d764f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -383,7 +383,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
((eep->baseEepHeader.version & 0xff) > 0x0a) &&
(eep->baseEepHeader.pwdclkind == 0))
- ah->need_an_top2_fixup = 1;
+ ah->need_an_top2_fixup = true;
if ((common->bus_ops->ath_bus_type == ATH_USB) &&
(AR_SREV_9280(ah)))
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index b092523caed..9aa01997b1e 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1961,7 +1961,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
ieee80211_stop_queue(sc->hw, q);
- txq->stopped = 1;
+ txq->stopped = true;
}
ath_tx_start_dma(sc, skb, txctl);
@@ -2017,7 +2017,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
ieee80211_wake_queue(sc->hw, q);
- txq->stopped = 0;
+ txq->stopped = false;
}
}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f06e0695d41..db774212161 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -48,7 +48,7 @@
#include "carl9170.h"
#include "cmd.h"
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
@@ -446,7 +446,7 @@ static void carl9170_op_stop(struct ieee80211_hw *hw)
mutex_lock(&ar->mutex);
if (IS_ACCEPTING_CMD(ar)) {
- rcu_assign_pointer(ar->beacon_iter, NULL);
+ RCU_INIT_POINTER(ar->beacon_iter, NULL);
carl9170_led_set_state(ar, 0);
@@ -678,7 +678,7 @@ unlock:
vif_priv->active = false;
bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
ar->vifs--;
- rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
+ RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
list_del_rcu(&vif_priv->list);
mutex_unlock(&ar->mutex);
synchronize_rcu();
@@ -716,7 +716,7 @@ static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
WARN_ON(vif_priv->enable_beacon);
vif_priv->enable_beacon = false;
list_del_rcu(&vif_priv->list);
- rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
+ RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
if (vif == main_vif) {
rcu_read_unlock();
@@ -1258,7 +1258,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
}
for (i = 0; i < CARL9170_NUM_TID; i++)
- rcu_assign_pointer(sta_info->agg[i], NULL);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
sta_info->ht_sta = true;
@@ -1285,7 +1285,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
struct carl9170_sta_tid *tid_info;
tid_info = rcu_dereference(sta_info->agg[i]);
- rcu_assign_pointer(sta_info->agg[i], NULL);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
if (!tid_info)
continue;
@@ -1398,7 +1398,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->tx_ampdu_list_lock);
}
- rcu_assign_pointer(sta_info->agg[tid], NULL);
+ RCU_INIT_POINTER(sta_info->agg[tid], NULL);
rcu_read_unlock();
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 56d37dc967a..b5f1b91002b 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -890,7 +890,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
else
ring->ops = &dma32_ops;
if (for_tx) {
- ring->tx = 1;
+ ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
@@ -1061,7 +1061,7 @@ void b43_dma_free(struct b43_wldev *dev)
static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
- bool fallback = 0;
+ bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
@@ -1075,12 +1075,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
- fallback = 1;
+ fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
- fallback = 1;
+ fallback = true;
continue;
}
b43err(dev->wl, "The machine/kernel does not support "
@@ -1307,7 +1307,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
- meta->is_last_fragment = 1;
+ meta->is_last_fragment = true;
priv_info->bouncebuffer = NULL;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
@@ -1468,7 +1468,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
unsigned int skb_mapping = skb_get_queue_mapping(skb);
ieee80211_stop_queue(dev->wl->hw, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = 1;
- ring->stopped = 1;
+ ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
}
@@ -1586,7 +1586,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
- ring->stopped = 0;
+ ring->stopped = false;
}
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index a38c1c6446a..d79ab2a227e 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -74,7 +74,7 @@ static void b43_led_update(struct b43_wldev *dev,
if (radio_enabled)
turn_on = atomic_read(&led->state) != LED_OFF;
else
- turn_on = 0;
+ turn_on = false;
if (turn_on == led->hw_state)
return;
led->hw_state = turn_on;
@@ -225,11 +225,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
if (sprom[led_index] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
- *activelow = 0;
+ *activelow = false;
switch (led_index) {
case 0:
*behaviour = B43_LED_ACTIVITY;
- *activelow = 1;
+ *activelow = true;
if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
*behaviour = B43_LED_RADIO_ALL;
break;
@@ -267,11 +267,11 @@ void b43_leds_init(struct b43_wldev *dev)
if (led->wl) {
if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
b43_led_turn_on(dev, led->index, led->activelow);
- led->hw_state = 1;
+ led->hw_state = true;
atomic_set(&led->state, 1);
} else {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
}
@@ -280,19 +280,19 @@ void b43_leds_init(struct b43_wldev *dev)
led = &dev->wl->leds.led_tx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_rx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_assoc;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 4c82d582a52..916123a3d74 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -826,7 +826,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
const struct b43_rfatt *rfatt;
const struct b43_bbatt *bbatt;
u64 power_vector;
- bool table_changed = 0;
+ bool table_changed = false;
BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
@@ -876,7 +876,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
| (val & 0x00FF);
}
- table_changed = 1;
+ table_changed = true;
}
if (table_changed) {
/* The table changed in memory. Update the hardware table. */
@@ -938,7 +938,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
unsigned long now;
unsigned long expire;
struct b43_lo_calib *cal, *tmp;
- bool current_item_expired = 0;
+ bool current_item_expired = false;
bool hwpctl;
if (!lo)
@@ -968,7 +968,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
B43_WARN_ON(current_item_expired);
- current_item_expired = 1;
+ current_item_expired = true;
}
if (b43_debug(dev, B43_DBG_LO)) {
b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 989f654de00..1c6f19393ef 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1122,17 +1122,17 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP));
if (ps_flags & B43_PS_ENABLED) {
- hwps = 1;
+ hwps = true;
} else if (ps_flags & B43_PS_DISABLED) {
- hwps = 0;
+ hwps = false;
} else {
//TODO: If powersave is not off and FIXME is not set and we are not in adhoc
// and thus is not an AP and we are associated, set bit 25
}
if (ps_flags & B43_PS_AWAKE) {
- awake = 1;
+ awake = true;
} else if (ps_flags & B43_PS_ASLEEP) {
- awake = 0;
+ awake = false;
} else {
//TODO: If the device is awake or this is an AP, or we are scanning, or FIXME,
// or we are associated, or FIXME, or the latest PS-Poll packet sent was
@@ -1140,8 +1140,8 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
}
/* FIXME: For now we force awake-on and hwps-off */
- hwps = 0;
- awake = 1;
+ hwps = false;
+ awake = true;
macctl = b43_read32(dev, B43_MMIO_MACCTL);
if (hwps)
@@ -1339,7 +1339,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
return;
if (dev->noisecalc.calculation_running)
return;
- dev->noisecalc.calculation_running = 1;
+ dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43_generate_noise_sample(dev);
@@ -1408,7 +1408,7 @@ static void handle_irq_noise(struct b43_wldev *dev)
average -= 48;
dev->stats.link_noise = average;
- dev->noisecalc.calculation_running = 0;
+ dev->noisecalc.calculation_running = false;
return;
}
generate_new:
@@ -1424,7 +1424,7 @@ static void handle_irq_tbtt_indication(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
- dev->dfq_valid = 1;
+ dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43_wldev *dev)
@@ -1433,7 +1433,7 @@ static void handle_irq_atim_end(struct b43_wldev *dev)
b43_write32(dev, B43_MMIO_MACCMD,
b43_read32(dev, B43_MMIO_MACCMD)
| B43_MACCMD_DFQ_VALID);
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
}
}
@@ -1539,7 +1539,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
- bool tim_found = 0;
+ bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
@@ -1588,7 +1588,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
- tim_found = 1;
+ tim_found = true;
tim_position = sizeof(struct b43_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable);
@@ -1625,7 +1625,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev)
if (wl->beacon0_uploaded)
return;
b43_write_beacon_template(dev, 0x68, 0x18);
- wl->beacon0_uploaded = 1;
+ wl->beacon0_uploaded = true;
}
static void b43_upload_beacon1(struct b43_wldev *dev)
@@ -1635,7 +1635,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev)
if (wl->beacon1_uploaded)
return;
b43_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43_wldev *dev)
@@ -1667,7 +1667,7 @@ static void handle_irq_beacon(struct b43_wldev *dev)
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
- wl->beacon_templates_virgin = 0;
+ wl->beacon_templates_virgin = false;
b43_upload_beacon0(dev);
b43_upload_beacon1(dev);
cmd = b43_read32(dev, B43_MMIO_MACCMD);
@@ -1755,8 +1755,8 @@ static void b43_update_templates(struct b43_wl *wl)
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
@@ -1913,7 +1913,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
b43err(dev->wl, "This device does not support DMA "
"on your system. It will now be switched to PIO.\n");
/* Fall back to PIO transfers if we get fatal DMA errors! */
- dev->use_pio = 1;
+ dev->use_pio = true;
b43_controller_restart(dev, "DMA error");
return;
}
@@ -2240,12 +2240,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
filename = NULL;
else
goto err_no_pcm;
- fw->pcm_request_failed = 0;
+ fw->pcm_request_failed = false;
err = b43_do_request_fw(ctx, filename, &fw->pcm);
if (err == -ENOENT) {
/* We did not find a PCM file? Not fatal, but
* core rev <= 10 must do without hwcrypto then. */
- fw->pcm_request_failed = 1;
+ fw->pcm_request_failed = true;
} else if (err)
goto err_load;
@@ -2535,7 +2535,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
dev->qos_enabled = !!modparam_qos;
/* Default to firmware/hardware crypto acceleration. */
- dev->hwcrypto_enabled = 1;
+ dev->hwcrypto_enabled = true;
if (dev->fw.opensource) {
u16 fwcapa;
@@ -2549,7 +2549,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
/* Disable hardware crypto and fall back to software crypto. */
- dev->hwcrypto_enabled = 0;
+ dev->hwcrypto_enabled = false;
}
if (!(fwcapa & B43_FWCAPA_QOS)) {
b43info(dev->wl, "QoS not supported by firmware\n");
@@ -2557,7 +2557,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
* ieee80211_unregister to make sure the networking core can
* properly free possible resources. */
dev->wl->hw->queues = 1;
- dev->qos_enabled = 0;
+ dev->qos_enabled = false;
}
} else {
b43info(dev->wl, "Loading firmware version %u.%u "
@@ -3361,10 +3361,10 @@ static int b43_rng_init(struct b43_wl *wl)
wl->rng.name = wl->rng_name;
wl->rng.data_read = b43_rng_read;
wl->rng.priv = (unsigned long)wl;
- wl->rng_initialized = 1;
+ wl->rng_initialized = true;
err = hwrng_register(&wl->rng);
if (err) {
- wl->rng_initialized = 0;
+ wl->rng_initialized = false;
b43err(wl, "Failed to register the random "
"number generator (%d)\n", err);
}
@@ -3718,13 +3718,13 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
case IEEE80211_BAND_5GHZ:
if (d->phy.supports_5ghz) {
up_dev = d;
- gmode = 0;
+ gmode = false;
}
break;
case IEEE80211_BAND_2GHZ:
if (d->phy.supports_2ghz) {
up_dev = d;
- gmode = 1;
+ gmode = true;
}
break;
default:
@@ -4444,18 +4444,18 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
#if B43_DEBUG
- phy->phy_locked = 0;
- phy->radio_locked = 0;
+ phy->phy_locked = false;
+ phy->radio_locked = false;
#endif
}
static void setup_struct_wldev_for_init(struct b43_wldev *dev)
{
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
- dev->radio_hw_enable = 1;
+ dev->radio_hw_enable = true;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
@@ -4689,16 +4689,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if (b43_bus_host_is_pcmcia(dev->dev) ||
b43_bus_host_is_sdio(dev->dev)) {
- dev->__using_pio_transfers = 1;
+ dev->__using_pio_transfers = true;
err = b43_pio_init(dev);
} else if (dev->use_pio) {
b43warn(dev->wl, "Forced PIO by use_pio module parameter. "
"This should not be needed and will result in lower "
"performance.\n");
- dev->__using_pio_transfers = 1;
+ dev->__using_pio_transfers = true;
err = b43_pio_init(dev);
} else {
- dev->__using_pio_transfers = 0;
+ dev->__using_pio_transfers = false;
err = b43_dma_init(dev);
}
if (err)
@@ -4752,7 +4752,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
- wl->operating = 1;
+ wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -4786,7 +4786,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
B43_WARN_ON(wl->vif != vif);
wl->vif = NULL;
- wl->operating = 0;
+ wl->operating = false;
b43_adjust_opmode(dev);
memset(wl->mac_addr, 0, ETH_ALEN);
@@ -4808,12 +4808,12 @@ static int b43_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
- wl->radiotap_enabled = 0;
+ wl->radiotap_enabled = false;
b43_qos_clear(wl);
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
- wl->beacon_templates_virgin = 1;
- wl->radio_enabled = 1;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
+ wl->beacon_templates_virgin = true;
+ wl->radio_enabled = true;
mutex_lock(&wl->mutex);
@@ -4859,7 +4859,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
goto out_unlock;
}
b43_wireless_core_exit(dev);
- wl->radio_enabled = 0;
+ wl->radio_enabled = false;
out_unlock:
mutex_unlock(&wl->mutex);
@@ -5047,7 +5047,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
struct pci_dev *pdev = NULL;
int err;
u32 tmp;
- bool have_2ghz_phy = 0, have_5ghz_phy = 0;
+ bool have_2ghz_phy = false, have_5ghz_phy = false;
/* Do NOT do any device initialization here.
* Do it in wireless_core_init() instead.
@@ -5090,7 +5090,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
}
dev->phy.gmode = have_2ghz_phy;
- dev->phy.radio_on = 1;
+ dev->phy.radio_on = true;
b43_wireless_core_reset(dev, dev->phy.gmode);
err = b43_phy_versioning(dev);
@@ -5101,11 +5101,11 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
(pdev->device != 0x4312 &&
pdev->device != 0x4319 && pdev->device != 0x4324)) {
/* No multiband support. */
- have_2ghz_phy = 0;
- have_5ghz_phy = 0;
+ have_2ghz_phy = false;
+ have_5ghz_phy = false;
switch (dev->phy.type) {
case B43_PHYTYPE_A:
- have_5ghz_phy = 1;
+ have_5ghz_phy = true;
break;
case B43_PHYTYPE_LP: //FIXME not always!
#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
@@ -5115,7 +5115,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
case B43_PHYTYPE_N:
case B43_PHYTYPE_HT:
case B43_PHYTYPE_LCN:
- have_2ghz_phy = 1;
+ have_2ghz_phy = true;
break;
default:
B43_WARN_ON(1);
@@ -5131,8 +5131,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
/* FIXME: For now we disable the A-PHY on multi-PHY devices. */
if (dev->phy.type != B43_PHYTYPE_N &&
dev->phy.type != B43_PHYTYPE_LP) {
- have_2ghz_phy = 1;
- have_5ghz_phy = 0;
+ have_2ghz_phy = true;
+ have_5ghz_phy = false;
}
}
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3ea44bb0368..3f8883b14d9 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -145,7 +145,7 @@ void b43_radio_lock(struct b43_wldev *dev)
#if B43_DEBUG
B43_WARN_ON(dev->phy.radio_locked);
- dev->phy.radio_locked = 1;
+ dev->phy.radio_locked = true;
#endif
macctl = b43_read32(dev, B43_MMIO_MACCTL);
@@ -163,7 +163,7 @@ void b43_radio_unlock(struct b43_wldev *dev)
#if B43_DEBUG
B43_WARN_ON(!dev->phy.radio_locked);
- dev->phy.radio_locked = 0;
+ dev->phy.radio_locked = false;
#endif
/* Commit any write */
@@ -178,7 +178,7 @@ void b43_phy_lock(struct b43_wldev *dev)
{
#if B43_DEBUG
B43_WARN_ON(dev->phy.phy_locked);
- dev->phy.phy_locked = 1;
+ dev->phy.phy_locked = true;
#endif
B43_WARN_ON(dev->dev->core_rev < 3);
@@ -190,7 +190,7 @@ void b43_phy_unlock(struct b43_wldev *dev)
{
#if B43_DEBUG
B43_WARN_ON(!dev->phy.phy_locked);
- dev->phy.phy_locked = 0;
+ dev->phy.phy_locked = false;
#endif
B43_WARN_ON(dev->dev->core_rev < 3);
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 8e157bc213f..12f467b8d56 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -897,7 +897,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
if (b43_phy_read(dev, 0x0033) & 0x0800)
break;
- gphy->aci_enable = 1;
+ gphy->aci_enable = true;
phy_stacksave(B43_PHY_RADIO_BITFIELD);
phy_stacksave(B43_PHY_G_CRS);
@@ -1038,7 +1038,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
if (!(b43_phy_read(dev, 0x0033) & 0x0800))
break;
- gphy->aci_enable = 0;
+ gphy->aci_enable = false;
phy_stackrestore(B43_PHY_RADIO_BITFIELD);
phy_stackrestore(B43_PHY_G_CRS);
@@ -1956,10 +1956,10 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
bbatt.att = 11;
if (phy->radio_rev == 8) {
rfatt.att = 15;
- rfatt.with_padmix = 1;
+ rfatt.with_padmix = true;
} else {
rfatt.att = 9;
- rfatt.with_padmix = 0;
+ rfatt.with_padmix = false;
}
b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
}
@@ -2137,7 +2137,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
struct b43_bus_dev *bdev = dev->dev;
struct b43_phy *phy = &dev->phy;
- rf->with_padmix = 0;
+ rf->with_padmix = false;
if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
dev->dev->board_type == SSB_BOARD_BCM4309G) {
@@ -2221,7 +2221,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
return;
case 8:
rf->att = 0xA;
- rf->with_padmix = 1;
+ rf->with_padmix = true;
return;
case 9:
default:
@@ -2389,7 +2389,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
(phy->radio_ver != 0x2050)); /* Not supported anymore */
- gphy->dyn_tssi_tbl = 0;
+ gphy->dyn_tssi_tbl = false;
if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
pab0 != -1 && pab1 != -1 && pab2 != -1) {
@@ -2404,7 +2404,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
pab1, pab2);
if (!gphy->tssi2dbm)
return -ENOMEM;
- gphy->dyn_tssi_tbl = 1;
+ gphy->dyn_tssi_tbl = true;
} else {
/* pabX values not set in SPROM. */
gphy->tgt_idle_tssi = 52;
@@ -2504,7 +2504,7 @@ static void b43_gphy_op_free(struct b43_wldev *dev)
if (gphy->dyn_tssi_tbl)
kfree(gphy->tssi2dbm);
- gphy->dyn_tssi_tbl = 0;
+ gphy->dyn_tssi_tbl = false;
gphy->tssi2dbm = NULL;
kfree(gphy);
@@ -2531,10 +2531,10 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
if (phy->rev == 1) {
/* Workaround: Temporarly disable gmode through the early init
* phase, as the gmode stuff is not needed for phy rev 1 */
- phy->gmode = 0;
+ phy->gmode = false;
b43_wireless_core_reset(dev, 0);
b43_phy_initg(dev);
- phy->gmode = 1;
+ phy->gmode = true;
b43_wireless_core_reset(dev, 1);
}
@@ -2613,7 +2613,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
gphy->radio_off_context.rfover);
b43_phy_write(dev, B43_PHY_RFOVERVAL,
gphy->radio_off_context.rfoverval);
- gphy->radio_off_context.valid = 0;
+ gphy->radio_off_context.valid = false;
}
channel = phy->channel;
b43_gphy_channel_switch(dev, 6, 1);
@@ -2626,7 +2626,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
gphy->radio_off_context.rfover = rfover;
gphy->radio_off_context.rfoverval = rfoverval;
- gphy->radio_off_context.valid = 1;
+ gphy->radio_off_context.valid = true;
b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
}
@@ -2711,10 +2711,10 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
if ((phy->rev == 0) || (!phy->gmode))
return -ENODEV;
- gphy->aci_wlan_automatic = 0;
+ gphy->aci_wlan_automatic = false;
switch (mode) {
case B43_INTERFMODE_AUTOWLAN:
- gphy->aci_wlan_automatic = 1;
+ gphy->aci_wlan_automatic = true;
if (gphy->aci_enable)
mode = B43_INTERFMODE_MANUALWLAN;
else
@@ -2735,8 +2735,8 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
b43_radio_interference_mitigation_disable(dev, currentmode);
if (mode == B43_INTERFMODE_NONE) {
- gphy->aci_enable = 0;
- gphy->aci_hw_rssi = 0;
+ gphy->aci_enable = false;
+ gphy->aci_hw_rssi = false;
} else
b43_radio_interference_mitigation_enable(dev, mode);
gphy->interfmode = mode;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index f93d66b1817..3ae28561f7a 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -736,9 +736,9 @@ static void lpphy_set_deaf(struct b43_wldev *dev, bool user)
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
- lpphy->crs_usr_disable = 1;
+ lpphy->crs_usr_disable = true;
else
- lpphy->crs_sys_disable = 1;
+ lpphy->crs_sys_disable = true;
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80);
}
@@ -747,9 +747,9 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
- lpphy->crs_usr_disable = 0;
+ lpphy->crs_usr_disable = false;
else
- lpphy->crs_sys_disable = 0;
+ lpphy->crs_sys_disable = false;
if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index e89b04bcd1f..0d25fe4069b 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -3289,7 +3289,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
if (dev->phy.rev >= 4) {
avoid = nphy->hang_avoid;
- nphy->hang_avoid = 0;
+ nphy->hang_avoid = false;
}
b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
@@ -3399,7 +3399,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
if (phy6or5x && updated[core] == 0) {
b43_nphy_update_tx_cal_ladder(dev, core);
- updated[core] = 1;
+ updated[core] = true;
}
tmp = (params[core].ncorr[type] << 8) | 0x66;
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index fcff923b3c1..d07b412a32c 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -539,7 +539,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* Not enough memory on the queue. */
err = -EBUSY;
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- q->stopped = 1;
+ q->stopped = true;
goto out;
}
@@ -566,7 +566,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
(q->free_packet_slots == 0)) {
/* The queue is full. */
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- q->stopped = 1;
+ q->stopped = true;
}
out:
@@ -601,7 +601,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
if (q->stopped) {
ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
- q->stopped = 0;
+ q->stopped = false;
}
}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 5f77cbe0b6a..2c5367884b3 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -874,7 +874,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
struct ieee80211_tx_info *report,
const struct b43_txstatus *status)
{
- bool frame_success = 1;
+ bool frame_success = true;
int retry_limit;
/* preserve the confiured retry limit before clearing the status
@@ -890,7 +890,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
/* The frame was not ACKed... */
if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ...but we expected an ACK. */
- frame_success = 0;
+ frame_success = false;
}
}
if (status->frame_count == 0) {
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c5535adf699..1ee31c55c7b 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -715,7 +715,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring->index = controller_index;
if (for_tx) {
- ring->tx = 1;
+ ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
@@ -806,7 +806,7 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
- bool fallback = 0;
+ bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
@@ -820,12 +820,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
- fallback = 1;
+ fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
- fallback = 1;
+ fallback = true;
continue;
}
b43legacyerr(dev->wl, "The machine/kernel does not support "
@@ -858,7 +858,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
#ifdef CONFIG_B43LEGACY_PIO
b43legacywarn(dev->wl, "DMA for this device not supported. "
"Falling back to PIO\n");
- dev->__using_pio = 1;
+ dev->__using_pio = true;
return -EAGAIN;
#else
b43legacyerr(dev->wl, "DMA for this device not supported and "
@@ -1068,7 +1068,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
- meta->is_last_fragment = 1;
+ meta->is_last_fragment = true;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
@@ -1187,7 +1187,7 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
should_inject_overflow(ring)) {
/* This TX ring is full. */
ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
- ring->stopped = 1;
+ ring->stopped = true;
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Stopped TX ring %d\n",
ring->index);
@@ -1286,7 +1286,7 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
if (ring->stopped) {
B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
- ring->stopped = 0;
+ ring->stopped = false;
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Woke up TX ring %d\n",
ring->index);
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index 2f1bfdc44f9..fd4565389c7 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -203,11 +203,11 @@ void b43legacy_leds_init(struct b43legacy_wldev *dev)
if (sprom[i] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
- activelow = 0;
+ activelow = false;
switch (i) {
case 0:
behaviour = B43legacy_LED_ACTIVITY;
- activelow = 1;
+ activelow = true;
if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
behaviour = B43legacy_LED_RADIO_ALL;
break;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 20f02437af8..200138cdb03 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -722,9 +722,9 @@ void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags)
macctl &= ~B43legacy_MACCTL_GMODE;
if (flags & B43legacy_TMSLOW_GMODE) {
macctl |= B43legacy_MACCTL_GMODE;
- dev->phy.gmode = 1;
+ dev->phy.gmode = true;
} else
- dev->phy.gmode = 0;
+ dev->phy.gmode = false;
macctl |= B43legacy_MACCTL_IHR_ENABLED;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
}
@@ -811,7 +811,7 @@ static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev)
if (dev->noisecalc.calculation_running)
return;
dev->noisecalc.channel_at_start = dev->phy.channel;
- dev->noisecalc.calculation_running = 1;
+ dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43legacy_generate_noise_sample(dev);
@@ -873,7 +873,7 @@ static void handle_irq_noise(struct b43legacy_wldev *dev)
dev->stats.link_noise = average;
drop_calculation:
- dev->noisecalc.calculation_running = 0;
+ dev->noisecalc.calculation_running = false;
return;
}
generate_new:
@@ -889,7 +889,7 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
b43legacy_power_saving_ctl_bits(dev, -1, -1);
}
if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
- dev->dfq_valid = 1;
+ dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43legacy_wldev *dev)
@@ -898,7 +898,7 @@ static void handle_irq_atim_end(struct b43legacy_wldev *dev)
b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
| B43legacy_MACCMD_DFQ_VALID);
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
}
}
@@ -971,7 +971,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
- bool tim_found = 0;
+ bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
@@ -1019,7 +1019,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
- tim_found = 1;
+ tim_found = true;
tim_position = sizeof(struct b43legacy_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt,
@@ -1172,7 +1172,7 @@ static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev)
* but we don't use that feature anyway. */
b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
&__b43legacy_ratetable[3]);
- wl->beacon0_uploaded = 1;
+ wl->beacon0_uploaded = true;
}
static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
@@ -1182,7 +1182,7 @@ static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
if (wl->beacon1_uploaded)
return;
b43legacy_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43legacy_wldev *dev)
@@ -1212,7 +1212,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev)
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
- wl->beacon_templates_virgin = 0;
+ wl->beacon_templates_virgin = false;
b43legacy_upload_beacon0(dev);
b43legacy_upload_beacon1(dev);
cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
@@ -1275,8 +1275,8 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
@@ -2510,7 +2510,7 @@ static int find_wldev_for_phymode(struct b43legacy_wl *wl,
if (d->phy.possible_phymodes & phymode) {
/* Ok, this device supports the PHY-mode.
* Set the gmode bit. */
- *gmode = 1;
+ *gmode = true;
*dev = d;
return 0;
@@ -2546,7 +2546,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
struct b43legacy_wldev *uninitialized_var(up_dev);
struct b43legacy_wldev *down_dev;
int err;
- bool gmode = 0;
+ bool gmode = false;
int prev_status;
err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode);
@@ -3044,12 +3044,12 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
- dev->radio_hw_enable = 1;
+ dev->radio_hw_enable = true;
phy->savedpctlreg = 0xFFFF;
- phy->aci_enable = 0;
- phy->aci_wlan_automatic = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_wlan_automatic = false;
+ phy->aci_hw_rssi = false;
lo = phy->_lo_pairs;
if (lo)
@@ -3081,7 +3081,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
{
/* Flags */
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
@@ -3187,9 +3187,9 @@ static void prepare_phy_data_for_init(struct b43legacy_wldev *dev)
phy->lofcal = 0xFFFF;
phy->initval = 0xFFFF;
- phy->aci_enable = 0;
- phy->aci_wlan_automatic = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_wlan_automatic = false;
+ phy->aci_hw_rssi = false;
phy->antenna_diversity = 0xFFFF;
memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
@@ -3355,7 +3355,7 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
- wl->operating = 1;
+ wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -3389,7 +3389,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
- wl->operating = 0;
+ wl->operating = false;
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
@@ -3413,10 +3413,10 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
- wl->beacon_templates_virgin = 1;
- wl->radio_enabled = 1;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
+ wl->beacon_templates_virgin = true;
+ wl->radio_enabled = true;
mutex_lock(&wl->mutex);
@@ -3455,7 +3455,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(dev);
b43legacy_wireless_core_exit(dev);
- wl->radio_enabled = 0;
+ wl->radio_enabled = false;
mutex_unlock(&wl->mutex);
}
@@ -3614,7 +3614,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
have_bphy = 1;
dev->phy.gmode = (have_gphy || have_bphy);
- dev->phy.radio_on = 1;
+ dev->phy.radio_on = true;
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);
@@ -3705,7 +3705,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
(void (*)(unsigned long))b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
- wldev->__using_pio = 1;
+ wldev->__using_pio = true;
INIT_LIST_HEAD(&wldev->list);
err = b43legacy_wireless_core_attach(wldev);
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 475eb14e665..fcbafcd603c 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1067,7 +1067,7 @@ b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev,
if (b43legacy_phy_read(dev, 0x0033) & 0x0800)
break;
- phy->aci_enable = 1;
+ phy->aci_enable = true;
phy_stacksave(B43legacy_PHY_RADIO_BITFIELD);
phy_stacksave(B43legacy_PHY_G_CRS);
@@ -1279,7 +1279,7 @@ b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev,
if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800))
break;
- phy->aci_enable = 0;
+ phy->aci_enable = false;
phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD);
phy_stackrestore(B43legacy_PHY_G_CRS);
@@ -1346,10 +1346,10 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
(phy->rev == 0) || (!phy->gmode))
return -ENODEV;
- phy->aci_wlan_automatic = 0;
+ phy->aci_wlan_automatic = false;
switch (mode) {
case B43legacy_RADIO_INTERFMODE_AUTOWLAN:
- phy->aci_wlan_automatic = 1;
+ phy->aci_wlan_automatic = true;
if (phy->aci_enable)
mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN;
else
@@ -1371,8 +1371,8 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
currentmode);
if (mode == B43legacy_RADIO_INTERFMODE_NONE) {
- phy->aci_enable = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_hw_rssi = false;
} else
b43legacy_radio_interference_mitigation_enable(dev, mode);
phy->interfmode = mode;
@@ -2102,7 +2102,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
phy->radio_off_context.rfover);
b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
phy->radio_off_context.rfoverval);
- phy->radio_off_context.valid = 0;
+ phy->radio_off_context.valid = false;
}
channel = phy->channel;
err = b43legacy_radio_selectchannel(dev,
@@ -2113,7 +2113,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
default:
B43legacy_BUG_ON(1);
}
- phy->radio_on = 1;
+ phy->radio_on = true;
}
void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
@@ -2131,14 +2131,14 @@ void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
if (!force) {
phy->radio_off_context.rfover = rfover;
phy->radio_off_context.rfoverval = rfoverval;
- phy->radio_off_context.valid = 1;
+ phy->radio_off_context.valid = true;
}
b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C);
b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
rfoverval & 0xFF73);
} else
b43legacy_phy_write(dev, 0x0015, 0xAA00);
- phy->radio_on = 0;
+ phy->radio_on = false;
b43legacydbg(dev->wl, "Radio initialized\n");
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 72bee2c0495..eb9eb766ac2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -784,7 +784,7 @@ static int brcmf_netdev_stop(struct net_device *ndev)
return 0;
/* Set state and stop OS transmissions */
- drvr->bus_if->drvr_up = 0;
+ drvr->bus_if->drvr_up = false;
netif_stop_queue(ndev);
return 0;
@@ -821,7 +821,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
}
/* Allow transmit calls */
netif_start_queue(ndev);
- drvr->bus_if->drvr_up = 1;
+ drvr->bus_if->drvr_up = true;
if (brcmf_cfg80211_up(drvr->config)) {
brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
return -1;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index b4cf617276c..2e90a9a16ed 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -641,10 +641,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
if ((core->id.id == SDIOD_CORE_ID)
&& ((rev > 0) && (rev <= 2)))
- di->addrext = 0;
+ di->addrext = false;
else if ((core->id.id == I2S_CORE_ID) &&
((rev == 0) || (rev == 1)))
- di->addrext = 0;
+ di->addrext = false;
else
di->addrext = _dma_isaddrext(di);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 77fdc45b43e..d106576ce33 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1265,7 +1265,7 @@ uint brcms_reset(struct brcms_info *wl)
brcms_c_reset(wl->wlc);
/* dpc will not be rescheduled */
- wl->resched = 0;
+ wl->resched = false;
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index efa0142bdad..ce8562aa5db 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1603,7 +1603,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
si_pmu_pllupd(pi->sh->sih);
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
- pi_lcn->lcnphy_spurmod = 0;
+ pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
write_phy_reg(pi, 0x425, 0x5907);
@@ -1616,7 +1616,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
- pi_lcn->lcnphy_spurmod = 0;
+ pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
write_phy_reg(pi, 0x425, 0x590a);
@@ -2325,7 +2325,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
{
s8 index, delta_brd, delta_temp, new_index, tempcorrx;
s16 manp, meas_temp, temp_diff;
- bool neg = 0;
+ bool neg = false;
u16 temp;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
@@ -2348,7 +2348,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
temp_diff = manp - meas_temp;
if (temp_diff < 0) {
- neg = 1;
+ neg = true;
temp_diff = -temp_diff;
}
@@ -3682,8 +3682,8 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
udelay(20);
for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
- phy_c23 = 1;
- phy_c22 = 0;
+ phy_c23 = true;
+ phy_c22 = false;
switch (cal_type) {
case 0:
phy_c10 = 511;
@@ -3701,18 +3701,18 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
phy_c9 = read_phy_reg(pi, 0x93d);
phy_c9 = 2 * phy_c9;
- phy_c24 = 0;
+ phy_c24 = false;
phy_c5 = 7;
- phy_c25 = 1;
+ phy_c25 = true;
while (1) {
write_radio_reg(pi, RADIO_2064_REG026,
(phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
udelay(50);
- phy_c22 = 0;
+ phy_c22 = false;
ptr[130] = 0;
wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
if (ptr[130] == 1)
- phy_c22 = 1;
+ phy_c22 = true;
if (phy_c22)
phy_c5 -= 1;
if ((phy_c22 != phy_c24) && (!phy_c25))
@@ -3722,7 +3722,7 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
if (phy_c5 <= 0 || phy_c5 >= 7)
break;
phy_c24 = phy_c22;
- phy_c25 = 0;
+ phy_c25 = false;
}
if (phy_c5 < 0)
@@ -3773,10 +3773,10 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
phy_c13 = phy_c11;
phy_c14 = phy_c12;
}
- phy_c23 = 0;
+ phy_c23 = false;
}
}
- phy_c23 = 1;
+ phy_c23 = true;
phy_c15 = phy_c13;
phy_c16 = phy_c14;
phy_c7 = phy_c7 >> 1;
@@ -3966,7 +3966,7 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s16 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
if (mode == 1) {
suspend = (0 == (bcma_read32(pi->d11core,
@@ -4008,7 +4008,7 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s32 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
@@ -4076,7 +4076,7 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
{
u16 vbatsenseval;
s32 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
if (mode == 1) {
suspend = (0 == (bcma_read32(pi->d11core,
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 409faea66b8..cb9da25bd81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -1188,9 +1188,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
- if (is_agg)
- iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
- le16_to_cpu(tx_cmd->len));
+ iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
DMA_BIDIRECTIONAL);
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 98a179f98ea..1f868b166d1 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -91,11 +91,11 @@ static struct iwm_conf def_iwm_conf = {
.mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
};
-static int modparam_reset;
+static bool modparam_reset;
module_param_named(reset, modparam_reset, bool, 0644);
MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
-static int modparam_wimax_enable = 1;
+static bool modparam_wimax_enable = true;
module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
@@ -130,7 +130,7 @@ static void iwm_disconnect_work(struct work_struct *work)
iwm_invalidate_mlme_profile(iwm);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = 0;
+ iwm->umac_profile_active = false;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a414768f40f..7d708f4395f 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -660,7 +660,7 @@ static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = 0;
+ iwm->umac_profile_active = false;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
@@ -735,7 +735,7 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
umac_sta->mac_addr,
umac_sta->flags & UMAC_STA_FLAG_QOS);
- sta->valid = 1;
+ sta->valid = true;
sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
@@ -750,12 +750,12 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
- sta->valid = 0;
+ sta->valid = false;
break;
case UMAC_OPCODE_CLEAR_ALL:
for (i = 0; i < IWM_STA_TABLE_NUM; i++)
- iwm->sta_table[i].valid = 0;
+ iwm->sta_table[i].valid = false;
break;
default:
@@ -1203,7 +1203,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
switch (hdr->oid) {
case UMAC_WIFI_IF_CMD_SET_PROFILE:
- iwm->umac_profile_active = 1;
+ iwm->umac_profile_active = true;
break;
default:
break;
@@ -1363,7 +1363,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
*/
list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
- cmd->resp_received = 1;
+ cmd->resp_received = true;
cmd->buf.len = buf_size;
memcpy(cmd->buf.hdr, buf, buf_size);
wake_up_interruptible(&iwm->nonwifi_queue);
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index e2693517986..3f7bf4d912b 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -859,7 +859,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
* Most of the libertas cards can do unaligned register access, but some
* weird ones cannot. That's especially true for the CF8305 card.
*/
- card->align_regs = 0;
+ card->align_regs = false;
card->model = get_model(p_dev->manf_id, p_dev->card_id);
if (card->model == MODEL_UNKNOWN) {
@@ -871,7 +871,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Check if we have a current silicon */
prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
if (card->model == MODEL_8305) {
- card->align_regs = 1;
+ card->align_regs = true;
if (prod_id < IF_CS_CF8305_B1_REV) {
pr_err("8305 rev B0 and older are not supported\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ceb51b6e670..a03457292c8 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -719,11 +719,11 @@ void lbtf_bcn_sent(struct lbtf_private *priv)
return;
if (skb_queue_empty(&priv->bc_ps_buf)) {
- bool tx_buff_bc = 0;
+ bool tx_buff_bc = false;
while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
skb_queue_tail(&priv->bc_ps_buf, skb);
- tx_buff_bc = 1;
+ tx_buff_bc = true;
}
if (tx_buff_bc) {
ieee80211_stop_queues(priv->hw);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 52bcdf40d5b..4b9e730d2c8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -708,7 +708,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
wiphy_debug(hw->wiphy, "%s\n", __func__);
- data->started = 1;
+ data->started = true;
return 0;
}
@@ -716,7 +716,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
- data->started = 0;
+ data->started = false;
del_timer(&data->beacon_timer);
wiphy_debug(hw->wiphy, "%s\n", __func__);
}
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 901cd79a061..036491f08e9 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -31,7 +31,7 @@
#define MWL8K_VERSION "0.12"
/* Module parameters */
-static unsigned ap_mode_default;
+static bool ap_mode_default;
module_param(ap_mode_default, bool, 0);
MODULE_PARM_DESC(ap_mode_default,
"Set to 1 to make ap mode the default instead of sta mode");
@@ -738,10 +738,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
if (ready_code == MWL8K_FWAP_READY) {
- priv->ap_fw = 1;
+ priv->ap_fw = true;
break;
} else if (ready_code == MWL8K_FWSTA_READY) {
- priv->ap_fw = 0;
+ priv->ap_fw = false;
break;
}
@@ -5517,8 +5517,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
- priv->radio_on = 0;
- priv->radio_short_preamble = 0;
+ priv->radio_on = false;
+ priv->radio_short_preamble = false;
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index b52acc4b408..9fb77d0319f 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -121,7 +121,7 @@ module_param(orinoco_debug, int, 0644);
MODULE_PARM_DESC(orinoco_debug, "Debug level");
#endif
-static int suppress_linkstatus; /* = 0 */
+static bool suppress_linkstatus; /* = 0 */
module_param(suppress_linkstatus, bool, 0644);
MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index db4d9a02f26..af2ca1a9c7d 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -27,7 +27,7 @@
#include "p54.h"
#include "lmac.h"
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0021e494851..04fec1fa6e0 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2426,7 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
UCHAR buff[256];
- struct rx_msg *msg = (struct rx_msg *)buff;
+ struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
del_timer(&local->timer);
@@ -2513,7 +2513,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
/* UCHAR buff[256];
- struct rx_msg *msg = (struct rx_msg *)buff;
+ struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
*/
pr_debug("Deauthentication frame received\n");
local->authentication_state = UNAUTHENTICATED;
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index d7646f299bd..3c3b98b152c 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -566,9 +566,9 @@ struct phy_header {
UCHAR hdr_3;
UCHAR hdr_4;
};
-struct rx_msg {
+struct ray_rx_msg {
struct mac_header mac;
- UCHAR var[1];
+ UCHAR var[0];
};
struct tx_msg {
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 53c5f878f61..de7d41f21a6 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -39,7 +39,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index da48c8ac27b..4941a1a2321 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -50,7 +50,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 7d1b6e4ef07..ee01d2e883a 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index bf55b4a311e..e0c6d117429 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -41,7 +41,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index cfb19dbb0a6..1c69c737086 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -40,7 +40,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 7c1d82d8d71..8d6eb0f56c0 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -396,7 +396,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
u8 valid = 0;
/*set init state to on */
- rtlpriv->rfkill.rfkill_state = 1;
+ rtlpriv->rfkill.rfkill_state = true;
wiphy_rfkill_set_hw_state(hw->wiphy, 0);
radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0d4d242849b..39e0907a3c4 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -78,7 +78,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
u8 init_aspm;
ppsc->reg_rfps_level = 0;
- ppsc->support_aspm = 0;
+ ppsc->support_aspm = false;
/*Update PCI ASPM setting */
ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -570,9 +570,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
if (ieee80211_is_nullfunc(fc)) {
if (ieee80211_has_pm(fc)) {
rtlpriv->mac80211.offchan_delay = true;
- rtlpriv->psc.state_inap = 1;
+ rtlpriv->psc.state_inap = true;
} else {
- rtlpriv->psc.state_inap = 0;
+ rtlpriv->psc.state_inap = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index f2aa33dc4d7..89ef6982ce5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -98,9 +98,9 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtl8192ce_bt_reg_init(hw);
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 4ed973a3aa1..124cf633861 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -2436,7 +2436,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
"%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
}
if (actuallyset) {
- ppsc->hwradiooff = 1;
+ ppsc->hwradiooff = true;
if (e_rfpowerstate_toset == ERFON) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 94a3e170615..3527c7957b4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -57,9 +57,9 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
const struct firmware *firmware;
int err;
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 149493f4c25..7911c9c8708 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -99,9 +99,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
- rtlpriv->dm.useramask = 1;
+ rtlpriv->dm.useramask = true;
/* dual mac */
if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 92f49d522c5..78723cf5949 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -98,9 +98,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
int err = 0;
u16 earlyrxthreshold = 7;
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dm.useramask = true;
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 115969df0c9..cdaf1429fa0 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1488,7 +1488,7 @@ struct rtl_intf_ops {
struct rtl_mod_params {
/* default: 0 = using hardware encryption */
- int sw_crypto;
+ bool sw_crypto;
/* default: 0 = DBG_EMERG (0)*/
int debug;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 182562952c7..0b5c18feb30 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,7 +165,8 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
return 0;
}
-static u32 xenvif_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xenvif_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct xenvif *vif = netdev_priv(dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0cb594c8609..639cf8ab62b 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct netbk_rx_meta *meta;
/*
- * These variables a used iff get_page_ext returns true,
+ * These variables are used iff get_page_ext returns true,
* in which case they are guaranteed to be initialized.
*/
unsigned int uninitialized_var(group), uninitialized_var(idx);
@@ -940,8 +940,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
if (!page)
return NULL;
- netbk->mmap_pages[pending_idx] = page;
-
gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
gop->source.offset = txp->offset;
@@ -1021,7 +1019,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx);
}
@@ -1336,8 +1334,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
continue;
}
- netbk->mmap_pages[pending_idx] = page;
-
gop->source.u.ref = txreq.gref;
gop->source.domid = vif->domid;
gop->source.offset = txreq.offset;
@@ -1668,7 +1664,7 @@ static int __init netback_init(void)
"netback/%u", group);
if (IS_ERR(netbk->task)) {
- printk(KERN_ALERT "kthread_run() fails at netback\n");
+ printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task);
goto failed_init;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 226faab2360..0a59c57864f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -201,7 +201,7 @@ static void xennet_sysfs_delif(struct net_device *netdev);
#define xennet_sysfs_delif(dev) do { } while (0)
#endif
-static int xennet_can_sg(struct net_device *dev)
+static bool xennet_can_sg(struct net_device *dev)
{
return dev->features & NETIF_F_SG;
}
@@ -1190,7 +1190,8 @@ static void xennet_uninit(struct net_device *dev)
gnttab_free_grant_references(np->gref_rx_head);
}
-static u32 xennet_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xennet_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct netfront_info *np = netdev_priv(dev);
int val;
@@ -1216,7 +1217,8 @@ static u32 xennet_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int xennet_set_features(struct net_device *dev, u32 features)
+static int xennet_set_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
netdev_info(dev, "Reducing MTU because no SG offload");
@@ -1707,7 +1709,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
- case XenbusStateConnected:
case XenbusStateUnknown:
case XenbusStateClosed:
break;
@@ -1718,6 +1719,9 @@ static void netback_changed(struct xenbus_device *dev,
if (xennet_connect(netdev) != 0)
break;
xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateConnected:
netif_notify_peers(netdev);
break;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6d3dd3988d0..0f0cfa3bca3 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,11 +26,6 @@
#include <linux/string.h>
#include <linux/slab.h>
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
struct of_irq oirq;
if (of_irq_map_one(dev, index, &oirq))
- return NO_IRQ;
+ return 0;
return irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@@ -60,27 +55,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
*/
struct device_node *of_irq_find_parent(struct device_node *child)
{
- struct device_node *p, *c = child;
+ struct device_node *p;
const __be32 *parp;
- if (!of_node_get(c))
+ if (!of_node_get(child))
return NULL;
do {
- parp = of_get_property(c, "interrupt-parent", NULL);
+ parp = of_get_property(child, "interrupt-parent", NULL);
if (parp == NULL)
- p = of_get_parent(c);
+ p = of_get_parent(child);
else {
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
p = of_node_get(of_irq_dflt_pic);
else
p = of_find_node_by_phandle(be32_to_cpup(parp));
}
- of_node_put(c);
- c = p;
+ of_node_put(child);
+ child = p;
} while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
- return (p == child) ? NULL : p;
+ return p;
}
/**
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
/* Only dereference the resource if both the
* resource and the irq are valid. */
- if (r && irq != NO_IRQ) {
+ if (r && irq) {
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ;
r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
{
int nr = 0;
- while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+ while (of_irq_to_resource(dev, nr, NULL))
nr++;
return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+ if (!of_irq_to_resource(dev, i, res))
break;
return i;
@@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches)
desc->dev = np;
desc->interrupt_parent = of_irq_find_parent(np);
+ if (desc->interrupt_parent == np)
+ desc->interrupt_parent = NULL;
list_add_tail(&desc->list, &intc_desc_list);
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index cbd5d701c7e..63b3ec48c20 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -314,7 +314,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
if (!lookup)
return NULL;
- for(; lookup->name != NULL; lookup++) {
+ for(; lookup->compatible != NULL; lookup++) {
if (!of_device_is_compatible(np, lookup->compatible))
continue;
if (of_address_to_resource(np, 0, &res))
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dccd8636095..f8c752e408a 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
return err;
}
+static int timer_mode;
+
static int __init oprofile_init(void)
{
int err;
+ /* always init architecture to setup backtrace support */
err = oprofile_arch_init(&oprofile_ops);
- if (err < 0 || timer) {
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+ timer_mode = err || timer; /* fall back to timer mode on errors */
+ if (timer_mode) {
+ if (!err)
+ oprofile_arch_exit();
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
- return oprofilefs_register();
+
+ err = oprofilefs_register();
+ if (!err)
+ return 0;
+
+ /* failed */
+ if (timer_mode)
+ oprofile_timer_exit();
+ else
+ oprofile_arch_exit();
+
+ return err;
}
static void __exit oprofile_exit(void)
{
- oprofile_timer_exit();
oprofilefs_unregister();
- oprofile_arch_exit();
+ if (timer_mode)
+ oprofile_timer_exit();
+ else
+ oprofile_arch_exit();
}
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 89f63456646..84a208dbed9 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
+ retval = 0;
if (val)
retval = oprofile_start();
else
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index d0de6cc2d7a..2f0aa0f700e 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
}
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
{
char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
raw_spin_lock_irqsave(&oprofilefs_lock, flags);
*val = simple_strtoul(tmpbuf, NULL, 0);
raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return 0;
+ return count;
}
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
return -EINVAL;
retval = oprofilefs_ulong_from_user(&value, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
retval = oprofile_set_ulong(file->private_data, value);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 3ef44624f51..878fba12658 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
ops->start = oprofile_hrtimer_start;
ops->stop = oprofile_hrtimer_stop;
ops->cpu_type = "timer";
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
return 0;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index b6f9749b4fa..f02b5235056 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -76,6 +76,7 @@ config PCI_IOV
config PCI_PRI
bool "PCI PRI support"
+ depends on PCI
select PCI_ATS
help
PRI is the PCI Page Request Interface. It allows PCI devices that are
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 7ec56fb0bd7..b0dd08e6a9d 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/pci-ats.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include "pci.h"
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 596172b4ae9..9ddf69e3bbe 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
+ pdev = pbus->self;
+ if (pdev && pci_is_pcie(pdev)) {
+ tmp = acpi_find_root_bridge_handle(pdev);
+ if (tmp) {
+ struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+ if (root && (root->osc_control_set &
+ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return AE_OK;
+ }
+ }
+
acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (pdev) {
- pdev->current_state = PCI_D0;
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
pci_dev_put(pdev);
}
@@ -1378,11 +1389,13 @@ find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *count = (int *)context;
- if (acpi_is_root_bridge(handle)) {
- acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge, NULL);
- (*count)++;
- }
+ if (!acpi_is_root_bridge(handle))
+ return AE_OK;
+
+ (*count)++;
+ acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ handle_hotplug_event_bridge, NULL);
+
return AE_OK ;
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 1e9c9aacc3a..085dbb5fc16 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot)
goto err_exit;
}
- /* Wait for 1 second after checking link training status */
- msleep(1000);
-
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 96dc4734e4a..7b1414810ae 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl)
else
msleep(1000);
+ /*
+ * Need to wait for 1000 ms after Data Link Layer Link Active
+ * (DLLLA) bit reads 1b before sending configuration request.
+ * We need it before checking Link Training (LT) bit becuase
+ * LT is still set even after DLLLA bit is set on some platform.
+ */
+ msleep(1000);
+
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
if (retval) {
ctrl_err(ctrl, "Cannot read LNKSTATUS register\n");
@@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl)
return retval;
}
+ /*
+ * If the port supports Link speeds greater than 5.0 GT/s, we
+ * must wait for 100 ms after Link training completes before
+ * sending configuration request.
+ */
+ if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT)
+ msleep(100);
+
+ pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+
return retval;
}
@@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot)
u16 slot_cmd;
u16 cmd_mask;
u16 slot_status;
- u16 lnk_status;
int retval = 0;
/* Clear sticky power-fault bit from previous power failures */
@@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot)
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
- __func__);
- return retval;
- }
- pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
-
return retval;
}
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index aca972bbfb4..dd7e0c51a33 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
static int is_shpc_capable(struct pci_dev *dev)
{
- if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
- PCI_DEVICE_ID_AMD_GOLAM_7450))
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
return 1;
if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
return 0;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 36547f0ce30..75ba2311b54 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
ctrl_dbg(ctrl, "Hotplug Controller:\n");
- if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
- PCI_DEVICE_ID_AMD_GOLAM_7450)) {
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
/* amd shpc driver doesn't use Base Offset; assume 0 */
ctrl->mmio_base = pci_resource_start(pdev, 0);
ctrl->mmio_size = pci_resource_len(pdev, 0);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b82c155d7b3..1969a3ee305 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
+ int bars = 0;
if (!nr_virtfn)
return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ bars |= (1 << (i + PCI_IOV_RESOURCES));
res = dev->resource + PCI_IOV_RESOURCES + i;
if (res->parent)
nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
+ if (pci_enable_resources(dev, bars)) {
+ dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+ return -ENOMEM;
+ }
+
if (iov->link != dev->devfn) {
pdev = pci_get_slot(dev->bus, iov->link);
if (!pdev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6f45a73c6e9..6d4a5319148 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
+ /* Fall back to PCI_D0 if native PM is not supported */
+ if (!dev->pm_cap)
+ dev->current_state = PCI_D0;
} else {
error = -ENODEV;
/* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ /* only skip sriov related */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+ for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index a43cfd906c6..d93e962f261 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -589,14 +589,14 @@ static const struct backlight_ops dell_ops = {
.update_status = dell_send_intensity,
};
-static void touchpad_led_on()
+static void touchpad_led_on(void)
{
int command = 0x97;
char data = 1;
i8042_command(&data, command | 1 << 12);
}
-static void touchpad_led_off()
+static void touchpad_led_off(void)
{
int command = 0x97;
char data = 2;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 13ef8c37471..dcdc1f4a462 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
int illumination_supported:1;
int video_supported:1;
int fan_supported:1;
+ int system_event_supported:1;
struct mutex mutex;
};
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
u32 hci_result;
u32 value;
- if (!dev->key_event_valid) {
+ if (!dev->key_event_valid && dev->system_event_supported) {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
/* enable event fifo */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+ if (hci_result == HCI_SUCCESS)
+ dev->system_event_supported = 1;
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
u32 hci_result, value;
+ int retries = 3;
- if (event != 0x80)
+ if (!dev->system_event_supported || event != 0x80)
return;
+
do {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ switch (hci_result) {
+ case HCI_SUCCESS:
if (value == 0x100)
continue;
/* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
pr_info("Unknown key %x\n",
value);
}
- } else if (hci_result == HCI_NOT_SUPPORTED) {
+ break;
+ case HCI_NOT_SUPPORTED:
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
pr_notice("Re-enabled hotkeys\n");
+ /* fall through */
+ default:
+ retries--;
+ break;
}
- } while (hci_result != HCI_EMPTY);
+ } while (retries && hci_result != HCI_EMPTY);
}
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index cffcb7c00b0..01fa671ec97 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK 0x86
+
#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
batt_exception = 1;
- } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
- batt_exception = 1;
} else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
batt_exception = 1;
} else {
pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+ /* PMIC will change charging current automatically */
+ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+ }
}
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546..10451a15e82 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
{
- return 1; /* always round timer functions to one nanosecond */
+ tp->tv_sec = 0;
+ tp->tv_nsec = 1;
+ return 0;
}
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10c..691b1ab1a3d 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
&priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
- memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
priv->idb_base, (unsigned long long)priv->idb_dma);
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
*/
/* Allocate space for DMA descriptors */
- bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].bd_phys = bd_phys;
priv->bdma[chnum].bd_base = bd_ptr;
- memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].sts_base = sts_ptr;
priv->bdma[chnum].sts_size = sts_size;
- memset(sts_ptr, 0, sts_size);
-
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
priv->omsg_ring[mbox].sts_size *
sizeof(struct tsi721_dma_sts),
&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
goto out_desc;
}
- memset(priv->omsg_ring[mbox].sts_base, 0,
- entries * sizeof(struct tsi721_dma_sts));
-
/*
* Configure Outbound Messaging Engine
*/
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
INIT_LIST_HEAD(&mport->dbells);
rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
- rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
- rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
strcpy(mport->name, "Tsi721 mport");
/* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct tsi721_device *priv;
- int i;
+ int i, cap;
int err;
u32 regval;
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
}
- /* Clear "no snoop" and "relaxed ordering" bits. */
- pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
- regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
- pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+ cap = pci_pcie_cap(pdev);
+ BUG_ON(cap == 0);
+
+ /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+ regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+ /* Adjust PCIe completion timeout. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+ regval &= ~(0x0f);
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
/*
* FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb140..822e54c394d 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
+#define MAX_READ_REQUEST_SZ_SHIFT 12
+
/*
* Event Management Registers
*/
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8..298c6c6a279 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
break;
}
- if (!ri)
+ if (i == ARRAY_SIZE(aat2870_regulators))
return NULL;
ri->enable_addr = AAT2870_LDO_EN;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d0216022..938398f3e86 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
list_del(&rdev->list);
if (rdev->supply)
regulator_put(rdev->supply);
- device_unregister(&rdev->dev);
kfree(rdev->constraints);
+ device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 66d2d60b436..b552aae55b4 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -664,10 +664,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
switch (id) {
case TPS65910_REG_VDD1:
- dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
if (dcdc_mult == 1)
dcdc_mult--;
- vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+ vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
tps65910_modify_bits(pmic, TPS65910_VDD1,
(dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
@@ -675,10 +675,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
- dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
if (dcdc_mult == 1)
dcdc_mult--;
- vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+ vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
tps65910_modify_bits(pmic, TPS65910_VDD2,
(dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
@@ -756,9 +756,9 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
switch (id) {
case TPS65910_REG_VDD1:
case TPS65910_REG_VDD2:
- mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+ mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
volt = VDD1_2_MIN_VOLT +
- (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+ (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET;
break;
case TPS65911_REG_VDDCTRL:
volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
@@ -947,6 +947,8 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
pmic->desc[i].ops = &tps65910_ops_dcdc;
+ pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
+ VDD1_2_NUM_VOLT_COARSE;
} else if (i == TPS65910_REG_VDD3) {
if (tps65910_chip_id(tps65910) == TPS65910)
pmic->desc[i].ops = &tps65910_ops_vdd3;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa0..11cc308d66e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
#define VREG_TYPE 1
#define VREG_REMAP 2
#define VREG_DEDICATED 3 /* LDO control */
+#define VREG_VOLTAGE_SMPS_4030 9
/* TWL6030 register offsets */
#define VREG_TRANS 1
#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
.get_status = twl4030reg_get_status,
};
+static int
+twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
+ vsel);
+ return 0;
+}
+
+static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE_SMPS_4030);
+
+ return vsel * 12500 + 600000;
+}
+
+static struct regulator_ops twl4030smps_ops = {
+ .set_voltage = twl4030smps_set_voltage,
+ .get_voltage = twl4030smps_get_voltage,
+};
+
static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+ { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL4030_REG_##label, \
+ .ops = &twl4030smps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
.min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e8326f26fa2..dc4c2748bbc 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
*/
delta = timespec_sub(old_system, old_rtc);
delta_delta = timespec_sub(delta, old_delta);
- if (abs(delta_delta.tv_sec) >= 2) {
+ if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
rtc_tm_to_time(&tm, &new_rtc.tv_sec);
new_rtc.tv_nsec = 0;
- if (new_rtc.tv_sec <= old_rtc.tv_sec) {
- if (new_rtc.tv_sec < old_rtc.tv_sec)
- pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
+ if (new_rtc.tv_sec < old_rtc.tv_sec) {
+ pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
sleep_time = timespec_sub(sleep_time,
timespec_sub(new_system, old_system));
- timekeeping_inject_sleeptime(&sleep_time);
+ if (sleep_time.tv_sec >= 0)
+ timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e286259a00..3bcc7cfcaba 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -73,6 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
err = -EINVAL;
mutex_unlock(&rtc->ops_lock);
+ /* A timer might have just expired */
+ schedule_work(&rtc->irqwork);
return err;
}
EXPORT_SYMBOL_GPL(rtc_set_time);
@@ -112,6 +114,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
err = -EINVAL;
mutex_unlock(&rtc->ops_lock);
+ /* A timer might have just expired */
+ schedule_work(&rtc->irqwork);
return err;
}
@@ -319,6 +323,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
+static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+ return err;
+}
+
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
@@ -342,14 +360,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
* over right here, before we set the alarm.
*/
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->set_alarm)
- err = -EINVAL;
- else
- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
- return err;
+ return ___rtc_set_alarm(rtc, alarm);
}
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -396,6 +407,8 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
}
mutex_unlock(&rtc->ops_lock);
+ /* maybe that was in the past.*/
+ schedule_work(&rtc->irqwork);
return err;
}
EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
@@ -763,6 +776,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
return 0;
}
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+ struct rtc_wkalrm alarm;
+ struct rtc_time tm;
+
+ __rtc_read_time(rtc, &tm);
+
+ alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+ ktime_set(300, 0)));
+ alarm.enabled = 0;
+
+ ___rtc_set_alarm(rtc, &alarm);
+}
+
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
@@ -784,8 +811,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
struct rtc_wkalrm alarm;
int err;
next = timerqueue_getnext(&rtc->timerqueue);
- if (!next)
+ if (!next) {
+ rtc_alarm_disable(rtc);
return;
+ }
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +876,8 @@ again:
err = __rtc_set_alarm(rtc, &alarm);
if (err == -ETIME)
goto again;
- }
+ } else
+ rtc_alarm_disable(rtc);
mutex_unlock(&rtc->ops_lock);
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index eda128fc1d3..64aedd8cc09 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
static struct rtc_class_ops m41t80_rtc_ops = {
.read_time = m41t80_rtc_read_time,
.set_time = m41t80_rtc_set_time,
+ /*
+ * XXX - m41t80 alarm functionality is reported broken.
+ * until it is fixed, don't register alarm functions.
+ *
.read_alarm = m41t80_rtc_read_alarm,
.set_alarm = m41t80_rtc_set_alarm,
+ */
.proc = m41t80_rtc_proc,
+ /*
+ * See above comment on broken alarm
+ *
.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+ */
};
#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index b3eba3cddd4..e4b6880aabd 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -220,7 +220,7 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
}
}
-static int puv3_rtc_remove(struct platform_device *dev)
+static int __devexit puv3_rtc_remove(struct platform_device *dev)
{
struct rtc_device *rtc = platform_get_drvdata(dev);
@@ -236,7 +236,7 @@ static int puv3_rtc_remove(struct platform_device *dev)
return 0;
}
-static int puv3_rtc_probe(struct platform_device *pdev)
+static int __devinit puv3_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f0..5b979d9cc33 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- clk_enable(rtc_clk);
pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ clk_enable(rtc_clk);
writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 43068fbd0ba..1b6d9247fdc 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -641,6 +641,8 @@ static int __init zcore_init(void)
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return -ENODATA;
+ if (OLDMEM_BASE)
+ return -ENODATA;
zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
debug_register_view(zcore_dbf, &debug_sprintf_view);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 75c3f1f8fd4..a84631a7391 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
- struct chp_link link;
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/* Try to update the channel path descritor. */
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
+ __s390_vary_chpid_on, &chpid);
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e..4a1ff5c2eb8 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
enum sch_todo {
SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
SCH_TODO_UNREG,
};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1..21908e67bf6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
-static void css_sch_todo(struct work_struct *work)
-{
- struct subchannel *sch;
- enum sch_todo todo;
-
- sch = container_of(work, struct subchannel, todo_work);
- /* Find out todo. */
- spin_lock_irq(sch->lock);
- todo = sch->todo;
- CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
- sch->schid.sch_no, todo);
- sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
- /* Perform todo. */
- if (todo == SCH_TODO_UNREG)
- css_sch_device_unregister(sch);
- /* Release workqueue ref. */
- put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
- CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
- sch->schid.ssid, sch->schid.sch_no, todo);
- if (sch->todo >= todo)
- return;
- /* Get workqueue ref. */
- if (!get_device(&sch->dev))
- return;
- sch->todo = todo;
- if (!queue_work(cio_work_q, &sch->todo_work)) {
- /* Already queued, release workqueue ref. */
- put_device(&sch->dev);
- }
-}
-
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0eca..47269858ecb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
*/
cdev->private->flags.resuming = 1;
cdev->private->path_new_mask = LPM_ANYPATH;
- css_schedule_eval(sch->schid);
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
spin_unlock_irq(sch->lock);
- css_complete_work();
+ css_wait_for_slow_path();
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel(cdev->dev.parent);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 52c233fa2b1..1b853513c89 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
cdev->private->pgid_reset_mask = 0;
}
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
@@ -520,12 +538,8 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw.cmd.cc = 1;
- cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
- cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
- cdev->private->irb.scsw.cmd.stctl =
- SCSW_STCTL_STATUS_PEND;
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f98698d5735..ec7fb6d3b47 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
- cdev->private->flags.fake_irb = 1;
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 2ebb492a5c1..76253dfcc1b 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -111,6 +111,9 @@ enum cdev_todo {
CDEV_TODO_UNREG_EVAL,
};
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
- unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int fake_irb:2; /* deliver faked irb */
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b77ae519d79..96bbe9d12a7 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1271,18 +1271,16 @@ ap_config_timeout(unsigned long ptr)
}
/**
- * ap_schedule_poll_timer(): Schedule poll timer.
+ * __ap_schedule_poll_timer(): Schedule poll timer.
*
* Set up the timer to run the poll tasklet
*/
-static inline void ap_schedule_poll_timer(void)
+static inline void __ap_schedule_poll_timer(void)
{
ktime_t hr_time;
spin_lock_bh(&ap_poll_timer_lock);
- if (ap_using_interrupts() || ap_suspend_flag)
- goto out;
- if (hrtimer_is_queued(&ap_poll_timer))
+ if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
goto out;
if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
hr_time = ktime_set(0, poll_timeout);
@@ -1294,6 +1292,18 @@ out:
}
/**
+ * ap_schedule_poll_timer(): Schedule poll timer.
+ *
+ * Set up the timer to run the poll tasklet
+ */
+static inline void ap_schedule_poll_timer(void)
+{
+ if (ap_using_interrupts())
+ return;
+ __ap_schedule_poll_timer();
+}
+
+/**
* ap_poll_read(): Receive pending reply messages from an AP device.
* @ap_dev: pointer to the AP device
* @flags: pointer to control flags, bit 2^0 is set if another poll is
@@ -1374,8 +1384,9 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
*flags |= 1;
*flags |= 2;
break;
- case AP_RESPONSE_Q_FULL:
case AP_RESPONSE_RESET_IN_PROGRESS:
+ __ap_schedule_poll_timer();
+ case AP_RESPONSE_Q_FULL:
*flags |= 2;
break;
case AP_RESPONSE_MESSAGE_TOO_BIG:
@@ -1541,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
rc = ap_init_queue(ap_dev->qid);
if (rc == -ENODEV)
ap_dev->unregistered = 1;
+ else
+ __ap_schedule_poll_timer();
}
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 94f49ffa70b..8af868bab20 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -263,6 +263,11 @@ error:
return PTR_ERR(vqs[i]);
}
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+ return "";
+}
+
/*
* The config ops structure as defined by virtio config
*/
@@ -276,6 +281,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
.reset = kvm_reset,
.find_vqs = kvm_find_vqs,
.del_vqs = kvm_del_vqs,
+ .bus_name = kvm_bus_name,
};
/*
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index fa80ba1f034..9b66d2d1809 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -4,7 +4,7 @@ menu "S/390 network device drivers"
config LCS
def_tristate m
prompt "Lan Channel Station Interface"
- depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
+ depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI)
help
Select this option if you want to use LCS networking on IBM System z.
This device driver supports Token Ring (IEEE 802.5),
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c28713da1ec..863fc219715 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -50,7 +50,7 @@
#include "lcs.h"
-#if !defined(CONFIG_NET_ETHERNET) && \
+#if !defined(CONFIG_ETHERNET) && \
!defined(CONFIG_TR) && !defined(CONFIG_FDDI)
#error Cannot compile lcs.c without some net devices switched on.
#endif
@@ -1634,7 +1634,7 @@ lcs_startlan_auto(struct lcs_card *card)
int rc;
LCS_DBF_TEXT(2, trace, "strtauto");
-#ifdef CONFIG_NET_ETHERNET
+#ifdef CONFIG_ETHERNET
card->lan_type = LCS_FRAME_TYPE_ENET;
rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
if (rc == 0)
@@ -2166,7 +2166,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
goto netdev_out;
}
switch (card->lan_type) {
-#ifdef CONFIG_NET_ETHERNET
+#ifdef CONFIG_ETHERNET
case LCS_FRAME_TYPE_ENET:
card->lan_type_trans = eth_type_trans;
dev = alloc_etherdev(0);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 3251333a23d..8160591913f 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -63,6 +63,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include "fsm.h"
@@ -75,7 +76,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
* Debug Facility stuff
*/
#define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_LEN 64
#define IUCV_DBF_SETUP_PAGES 2
#define IUCV_DBF_SETUP_NR_AREAS 1
#define IUCV_DBF_SETUP_LEVEL 3
@@ -226,6 +227,7 @@ struct iucv_connection {
struct net_device *netdev;
struct connection_profile prof;
char userid[9];
+ char userdata[17];
};
/**
@@ -263,7 +265,7 @@ struct ll_header {
};
#define NETIUCV_HDRLEN (sizeof(struct ll_header))
-#define NETIUCV_BUFSIZE_MAX 32768
+#define NETIUCV_BUFSIZE_MAX 65537
#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
#define NETIUCV_MTU_DEFAULT 9216
@@ -288,7 +290,12 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev)
return test_and_set_bit(0, &priv->tbusy);
}
-static u8 iucvMagic[16] = {
+static u8 iucvMagic_ascii[16] = {
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
};
@@ -301,18 +308,38 @@ static u8 iucvMagic[16] = {
*
* @returns The printable string (static data!!)
*/
-static char *netiucv_printname(char *name)
+static char *netiucv_printname(char *name, int len)
{
- static char tmp[9];
+ static char tmp[17];
char *p = tmp;
- memcpy(tmp, name, 8);
- tmp[8] = '\0';
- while (*p && (!isspace(*p)))
+ memcpy(tmp, name, len);
+ tmp[len] = '\0';
+ while (*p && ((p - tmp) < len) && (!isspace(*p)))
p++;
*p = '\0';
return tmp;
}
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+ static char tmp_uid[9];
+ static char tmp_udat[17];
+ static char buf[100];
+
+ if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+ tmp_uid[8] = '\0';
+ tmp_udat[16] = '\0';
+ memcpy(tmp_uid, conn->userid, 8);
+ memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+ memcpy(tmp_udat, conn->userdata, 16);
+ EBCASC(tmp_udat, 16);
+ memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+ sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+ return buf;
+ } else
+ return netiucv_printname(conn->userid, 8);
+}
+
/**
* States of the interface statemachine.
*/
@@ -563,15 +590,18 @@ static int netiucv_callback_connreq(struct iucv_path *path,
{
struct iucv_connection *conn = path->private;
struct iucv_event ev;
+ static char tmp_user[9];
+ static char tmp_udat[17];
int rc;
- if (memcmp(iucvMagic, ipuser, 16))
- /* ipuser must match iucvMagic. */
- return -EINVAL;
rc = -EINVAL;
+ memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+ memcpy(tmp_udat, ipuser, 16);
+ EBCASC(tmp_udat, 16);
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(conn, &iucv_connection_list, list) {
- if (strncmp(ipvmid, conn->userid, 8))
+ if (strncmp(ipvmid, conn->userid, 8) ||
+ strncmp(ipuser, conn->userdata, 16))
continue;
/* Found a matching connection for this path. */
conn->path = path;
@@ -580,6 +610,8 @@ static int netiucv_callback_connreq(struct iucv_path *path,
fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
rc = 0;
}
+ IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+ tmp_user, netiucv_printname(tmp_udat, 16));
read_unlock_bh(&iucv_connection_rwlock);
return rc;
}
@@ -816,7 +848,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
conn->path = path;
path->msglim = NETIUCV_QUEUELEN_DEFAULT;
path->flags = 0;
- rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
+ rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
if (rc) {
IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
return;
@@ -854,7 +886,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
- iucv_path_sever(conn->path, NULL);
+ iucv_path_sever(conn->path, conn->userdata);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
}
@@ -867,9 +899,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
- iucv_path_sever(conn->path, NULL);
- dev_info(privptr->dev, "The peer interface of the IUCV device"
- " has closed the connection\n");
+ iucv_path_sever(conn->path, conn->userdata);
+ dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+ "connection\n", netiucv_printuser(conn));
IUCV_DBF_TEXT(data, 2,
"conn_action_connsever: Remote dropped connection\n");
fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -886,8 +918,6 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
- IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
- netdev->name, conn->userid);
/*
* We must set the state before calling iucv_connect because the
@@ -897,8 +927,11 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+ IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+ netdev->name, netiucv_printuser(conn));
+
rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
- NULL, iucvMagic, conn);
+ NULL, conn->userdata, conn);
switch (rc) {
case 0:
netdev->tx_queue_len = conn->path->msglim;
@@ -908,13 +941,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
case 11:
dev_warn(privptr->dev,
"The IUCV device failed to connect to z/VM guest %s\n",
- netiucv_printname(conn->userid));
+ netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 12:
dev_warn(privptr->dev,
"The IUCV device failed to connect to the peer on z/VM"
- " guest %s\n", netiucv_printname(conn->userid));
+ " guest %s\n", netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 13:
@@ -927,7 +960,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
dev_err(privptr->dev,
"z/VM guest %s has too many IUCV connections"
" to connect with the IUCV device\n",
- netiucv_printname(conn->userid));
+ netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
@@ -972,7 +1005,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
- iucv_path_sever(conn->path, iucvMagic);
+ iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
@@ -1090,7 +1123,8 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_RUNNING);
dev_info(privptr->dev,
"The IUCV device has been connected"
- " successfully to %s\n", privptr->conn->userid);
+ " successfully to %s\n",
+ netiucv_printuser(privptr->conn));
IUCV_DBF_TEXT(setup, 3,
"connection is up and running\n");
break;
@@ -1452,45 +1486,72 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
+ return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
}
-static ssize_t user_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+ char *userdata)
{
- struct netiucv_priv *priv = dev_get_drvdata(dev);
- struct net_device *ndev = priv->conn->netdev;
- char *p;
- char *tmp;
- char username[9];
- int i;
- struct iucv_connection *cp;
+ const char *p;
+ int i;
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (count > 9) {
- IUCV_DBF_TEXT_(setup, 2,
- "%d is length of username\n", (int) count);
+ p = strchr(buf, '.');
+ if ((p && ((count > 26) ||
+ ((p - buf) > 8) ||
+ (buf + count - p > 18))) ||
+ (!p && (count > 9))) {
+ IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
return -EINVAL;
}
- tmp = strsep((char **) &buf, "\n");
- for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
- if (isalnum(*p) || (*p == '$')) {
- username[i]= toupper(*p);
+ for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+ if (isalnum(*p) || *p == '$') {
+ username[i] = toupper(*p);
continue;
}
- if (*p == '\n') {
+ if (*p == '\n')
/* trailing lf, grr */
break;
- }
IUCV_DBF_TEXT_(setup, 2,
- "username: invalid character %c\n", *p);
+ "conn_write: invalid character %02x\n", *p);
return -EINVAL;
}
while (i < 8)
username[i++] = ' ';
username[8] = '\0';
+ if (*p == '.') {
+ p++;
+ for (i = 0; i < 16 && *p; i++, p++) {
+ if (*p == '\n')
+ break;
+ userdata[i] = toupper(*p);
+ }
+ while (i > 0 && i < 16)
+ userdata[i++] = ' ';
+ } else
+ memcpy(userdata, iucvMagic_ascii, 16);
+ userdata[16] = '\0';
+ ASCEBC(userdata, 16);
+
+ return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->conn->netdev;
+ char username[9];
+ char userdata[17];
+ int rc;
+ struct iucv_connection *cp;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
+
if (memcmp(username, priv->conn->userid, 9) &&
(ndev->flags & (IFF_UP | IFF_RUNNING))) {
/* username changed while the interface is active. */
@@ -1499,15 +1560,17 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
}
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
- if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
read_unlock_bh(&iucv_connection_rwlock);
- IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
- "to %s already exists\n", username);
+ IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
memcpy(priv->conn->userid, username, 9);
+ memcpy(priv->conn->userdata, userdata, 17);
return count;
}
@@ -1537,7 +1600,8 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
bs1 = simple_strtoul(buf, &e, 0);
if (e && (!isspace(*e))) {
- IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+ IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+ *e);
return -EINVAL;
}
if (bs1 > NETIUCV_BUFSIZE_MAX) {
@@ -1864,7 +1928,8 @@ static void netiucv_unregister_device(struct device *dev)
* Add it to the list of netiucv connections;
*/
static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
- char *username)
+ char *username,
+ char *userdata)
{
struct iucv_connection *conn;
@@ -1893,6 +1958,8 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
fsm_settimer(conn->fsm, &conn->timer);
fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+ if (userdata)
+ memcpy(conn->userdata, userdata, 17);
if (username) {
memcpy(conn->userid, username, 9);
fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
@@ -1919,6 +1986,7 @@ out:
*/
static void netiucv_remove_connection(struct iucv_connection *conn)
{
+
IUCV_DBF_TEXT(trace, 3, __func__);
write_lock_bh(&iucv_connection_rwlock);
list_del_init(&conn->list);
@@ -1926,7 +1994,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
fsm_deltimer(&conn->timer);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
- iucv_path_sever(conn->path, iucvMagic);
+ iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
@@ -1985,7 +2053,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
/**
* Allocate and initialize everything of a net device.
*/
-static struct net_device *netiucv_init_netdevice(char *username)
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
{
struct netiucv_priv *privptr;
struct net_device *dev;
@@ -1994,6 +2062,8 @@ static struct net_device *netiucv_init_netdevice(char *username)
netiucv_setup_netdevice);
if (!dev)
return NULL;
+ if (dev_alloc_name(dev, dev->name) < 0)
+ goto out_netdev;
privptr = netdev_priv(dev);
privptr->fsm = init_fsm("netiucvdev", dev_state_names,
@@ -2002,7 +2072,7 @@ static struct net_device *netiucv_init_netdevice(char *username)
if (!privptr->fsm)
goto out_netdev;
- privptr->conn = netiucv_new_connection(dev, username);
+ privptr->conn = netiucv_new_connection(dev, username, userdata);
if (!privptr->conn) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
goto out_fsm;
@@ -2020,47 +2090,31 @@ out_netdev:
static ssize_t conn_write(struct device_driver *drv,
const char *buf, size_t count)
{
- const char *p;
char username[9];
- int i, rc;
+ char userdata[17];
+ int rc;
struct net_device *dev;
struct netiucv_priv *priv;
struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __func__);
- if (count>9) {
- IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
- return -EINVAL;
- }
-
- for (i = 0, p = buf; i < 8 && *p; i++, p++) {
- if (isalnum(*p) || *p == '$') {
- username[i] = toupper(*p);
- continue;
- }
- if (*p == '\n')
- /* trailing lf, grr */
- break;
- IUCV_DBF_TEXT_(setup, 2,
- "conn_write: invalid character %c\n", *p);
- return -EINVAL;
- }
- while (i < 8)
- username[i++] = ' ';
- username[8] = '\0';
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
- if (!strncmp(username, cp->userid, 9)) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17)) {
read_unlock_bh(&iucv_connection_rwlock);
- IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
- "to %s already exists\n", username);
+ IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
- dev = netiucv_init_netdevice(username);
+ dev = netiucv_init_netdevice(username, userdata);
if (!dev) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
@@ -2081,8 +2135,9 @@ static ssize_t conn_write(struct device_driver *drv,
if (rc)
goto out_unreg;
- dev_info(priv->dev, "The IUCV interface to %s has been"
- " established successfully\n", netiucv_printname(username));
+ dev_info(priv->dev, "The IUCV interface to %s has been established "
+ "successfully\n",
+ netiucv_printuser(priv->conn));
return count;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b77c65ed138..4abc79d3963 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -236,8 +236,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_IN_BUF_COUNT_MAX 128
#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
- ((card)->ssqd.qdioac1 & AC1_SIGA_INPUT_NEEDED ? 1 : \
- ((card)->qdio.in_buf_pool.buf_count / 2))
+ ((card)->qdio.in_buf_pool.buf_count / 2)
/* buffers we have to be behind before we get a PCI */
#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 81534437373..4fae1dc1995 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -66,7 +66,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum qeth_qdio_buffer_states newbufstate);
-
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
@@ -363,6 +363,9 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
int bidx, int forced_cleanup)
{
+ if (q->card->options.cq != QETH_CQ_ENABLED)
+ return;
+
if (q->bufs[bidx]->next_pending != NULL) {
struct qeth_qdio_out_buffer *head = q->bufs[bidx];
struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
@@ -390,6 +393,13 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
}
}
+ if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+ QETH_QDIO_BUF_HANDLED_DELAYED)) {
+ /* for recovery situations */
+ q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+ qeth_init_qdio_out_buf(q, bidx);
+ QETH_CARD_TEXT(q->card, 2, "clprecov");
+ }
}
@@ -412,7 +422,6 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
notification = TX_NOTIFY_OK;
} else {
BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
-
atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
notification = TX_NOTIFY_DELAYED_OK;
}
@@ -425,7 +434,8 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
buffer->aob = NULL;
qeth_clear_output_buffer(buffer->q, buffer,
- QETH_QDIO_BUF_HANDLED_DELAYED);
+ QETH_QDIO_BUF_HANDLED_DELAYED);
+
/* from here on: do not touch buffer anymore */
qdio_release_aob(aob);
}
@@ -881,7 +891,6 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "startrec");
- WARN_ON(1);
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
@@ -1114,11 +1123,25 @@ out:
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
struct sk_buff *skb;
+ struct iucv_sock *iucv;
+ int notify_general_error = 0;
+
+ if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+ notify_general_error = 1;
+
+ /* release may never happen from within CQ tasklet scope */
+ BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
skb = skb_dequeue(&buf->skb_list);
while (skb) {
QETH_CARD_TEXT(buf->q->card, 5, "skbr");
QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+ if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+ if (skb->sk) {
+ iucv = iucv_sk(skb->sk);
+ iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+ }
+ }
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
@@ -1161,7 +1184,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
continue;
- qeth_cleanup_handled_pending(q, j, free);
+ qeth_cleanup_handled_pending(q, j, 1);
qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -1208,7 +1231,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
qeth_free_cq(card);
cancel_delayed_work_sync(&card->buffer_reclaim_work);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
+ dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
kfree(card->qdio.in_q);
card->qdio.in_q = NULL;
/* inbound buffer pool */
@@ -1330,6 +1353,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
static void qeth_start_kernel_thread(struct work_struct *work)
{
+ struct task_struct *ts;
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
QETH_CARD_TEXT(card , 2, "strthrd");
@@ -1337,9 +1361,15 @@ static void qeth_start_kernel_thread(struct work_struct *work)
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
return;
- if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
- kthread_run(card->discipline.recover, (void *) card,
+ if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+ ts = kthread_run(card->discipline.recover, (void *)card,
"qeth_recover");
+ if (IS_ERR(ts)) {
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card,
+ QETH_RECOVER_THREAD);
+ }
+ }
}
static int qeth_setup_card(struct qeth_card *card)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a21ae3d549d..c1296713311 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
spin_unlock_bh(&card->vlanlock);
}
-static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
- return;
+ return 0;
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "aidOSM");
- return;
+ return 0;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "aidREC");
- return;
+ return 0;
}
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
@@ -324,10 +324,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
spin_lock_bh(&card->vlanlock);
list_add_tail(&id->list, &card->vid_list);
spin_unlock_bh(&card->vlanlock);
+ } else {
+ return -ENOMEM;
}
+ return 0;
}
-static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
@@ -335,11 +338,11 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "kidOSM");
- return;
+ return 0;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
- return;
+ return 0;
}
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
@@ -355,6 +358,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
kfree(tmpid);
}
qeth_l2_set_multicast_list(card->dev);
+ return 0;
}
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -1169,6 +1173,7 @@ static void __exit qeth_l2_exit(void)
static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e4c1176ee25..9648e4e6833 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
qeth_l3_free_vlan_addresses6(card, vid);
}
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
set_bit(vid, card->active_vlans);
- return;
+ return 0;
}
-static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
@@ -1885,7 +1885,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
- return;
+ return 0;
}
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
@@ -1893,6 +1893,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, card->active_vlans);
spin_unlock_irqrestore(&card->vlanlock, flags);
qeth_l3_set_multicast_list(card->dev);
+ return 0;
}
static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -2756,11 +2757,13 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
struct neighbour *n = NULL;
struct dst_entry *dst;
+ rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (n) {
cast_type = n->type;
+ rcu_read_unlock();
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
@@ -2768,6 +2771,8 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
else
return RTN_UNSPEC;
}
+ rcu_read_unlock();
+
/* try something else */
if (skb->protocol == ETH_P_IPV6)
return (skb_network_header(skb)[24] == 0xff) ?
@@ -2847,9 +2852,11 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+
+ rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (ipv == 4) {
/* IPv4 */
hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
@@ -2893,6 +2900,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
}
}
+ rcu_read_unlock();
}
static inline void qeth_l3_hdr_csum(struct qeth_card *card,
@@ -3202,7 +3210,8 @@ static int qeth_l3_stop(struct net_device *dev)
return 0;
}
-static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
@@ -3216,7 +3225,8 @@ static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int qeth_l3_set_features(struct net_device *dev, u32 features)
+static int qeth_l3_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
u32 changed = dev->features ^ features;
@@ -3482,14 +3492,13 @@ contin:
else
netif_carrier_off(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
+ rtnl_lock();
if (recovery_mode)
__qeth_l3_open(card->dev);
- else {
- rtnl_lock();
+ else
dev_open(card->dev);
- rtnl_unlock();
- }
qeth_l3_set_multicast_list(card->dev);
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -3535,6 +3544,11 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
card->info.hwtrap = 1;
}
qeth_l3_stop_card(card, recovery_mode);
+ if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+ rtnl_unlock();
+ }
rc = ccw_device_set_offline(CARD_DDEV(card));
rc2 = ccw_device_set_offline(CARD_WDEV(card));
rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -3589,6 +3603,7 @@ static int qeth_l3_recover(void *ptr)
static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0ea2fbfe0e9..d979bb26522 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -335,10 +335,10 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
QETH_IN_BUF_COUNT_MAX)
qeth_realloc_buffer_pool(card,
QETH_IN_BUF_COUNT_MAX);
- break;
} else
rc = -EPERM;
- default: /* fall through */
+ break;
+ default:
rc = -EINVAL;
}
out:
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 11f07f88822..b79576b64f4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ /* if previous slave_alloc returned early, there is nothing to do */
+ if (!zfcp_sdev->port)
+ return;
+
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22c491..54266829290 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_writeb(client, *buf, off);
-
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_writeb(client, *buf, off);
+ if (ret < 0)
break;
- }
-
len--;
buf++;
off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_readb(client, buf, off);
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_readb(client, buf, off);
+ if (ret < 0)
break;
- }
len--;
buf++;
off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
.remove = __devexit_p(bbc_i2c_remove),
};
-static int __init bbc_i2c_init(void)
-{
- return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
- platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fccd66..4b9939726c3 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
.remove = __devexit_p(d7s_remove),
};
-static int __init d7s_init(void)
-{
- return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
- platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e56154..339fd6f65ed 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
.remove = __devexit_p(envctrl_remove),
};
-static int __init envctrl_init(void)
-{
- return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
- platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
-module_init(envctrl_init);
-module_exit(envctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7afaa..826157f3869 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
.remove = __devexit_p(flash_remove),
};
-static int __init flash_init(void)
-{
- return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
- platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
-module_init(flash_init);
-module_exit(flash_cleanup);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce9639a26..0b31658ccde 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
};
-static int __init uctrl_init(void)
-{
- return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
- platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
-module_init(uctrl_init);
-module_exit(uctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4aa76d6f11d..705e13e470a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -38,6 +38,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
@@ -1109,6 +1110,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
unique_id++;
}
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
error = pci_enable_device(pdev);
if (error)
goto out;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index dba72a4e6a1..1ad0b822556 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_lock(&session->lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
- if (!task) {
+ if (!task || !task->sc) {
spin_unlock(&session->lock);
return -EINVAL;
}
sc = task->sc;
- spin_unlock(&session->lock);
if (!blk_rq_cpu_valid(sc->request))
cpu = smp_processor_id();
else
cpu = sc->request->cpu;
+ spin_unlock(&session->lock);
+
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 000294a9df8..36739da8bc1 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->saddr.sin_addr.s_addr = chba->ipv4addr;
csk->rss_qid = 0;
- csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
+ csk->l2t = t3_l2t_get(t3dev, dst, ndev);
if (!csk->l2t) {
pr_err("NO l2t available.\n");
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index ac7a9b1e3e2..5a4a3bfc60c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1127,6 +1127,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct port_info *pi = netdev_priv(ndev);
struct sk_buff *skb = NULL;
+ struct neighbour *n;
unsigned int step;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -1141,7 +1142,12 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
+ n = dst_get_neighbour_noref(csk->dst);
+ if (!n) {
+ pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+ goto rel_resource;
+ }
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c10f74a566f..1d25a87aa47 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -472,6 +472,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
struct net_device *ndev;
struct cxgbi_device *cdev;
struct rtable *rt = NULL;
+ struct neighbour *n;
struct flowi4 fl4;
struct cxgbi_sock *csk = NULL;
unsigned int mtu = 0;
@@ -493,7 +494,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
goto err_out;
}
dst = &rt->dst;
- ndev = dst_get_neighbour(dst)->dev;
+ n = dst_get_neighbour_noref(dst);
+ if (!n) {
+ err = -ENODEV;
+ goto rel_rt;
+ }
+ ndev = n->dev;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -507,7 +513,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
mtu = ndev->mtu;
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
- dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
+ n->dev->name, ndev->name, mtu);
}
cdev = cxgbi_device_find_by_netdev(ndev, &port);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cefbe44bb84..8d67467dd9c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,8 @@
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
static bool fcoe_match(struct net_device *netdev);
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+ .notifier_call = fcoe_dcb_app_notification,
+};
+
static struct scsi_transport_template *fcoe_nport_scsi_transport;
static struct scsi_transport_template *fcoe_vport_scsi_transport;
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
+ skb->priority = port->priority;
+
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ put_cpu();
return -EINVAL;
}
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
*/
static void fcoe_dev_setup(void)
{
+ register_dcbevent_notifier(&dcb_notifier);
register_netdevice_notifier(&fcoe_notifier);
}
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
*/
static void fcoe_dev_cleanup(void)
{
+ unregister_dcbevent_notifier(&dcb_notifier);
unregister_netdevice_notifier(&fcoe_notifier);
}
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *real_dev;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ real_dev = vlan_dev_real_dev(fcoe->netdev);
+ else
+ real_dev = fcoe->netdev;
+
+ if (netdev == real_dev)
+ return fcoe;
+ }
+ return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct dcb_app_type *entry = ptr;
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ struct fcoe_port *port;
+ int prio;
+
+ if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+ return NOTIFY_OK;
+
+ netdev = dev_get_by_index(&init_net, entry->ifindex);
+ if (!netdev)
+ return NOTIFY_OK;
+
+ fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+ dev_put(netdev);
+ if (!fcoe)
+ return NOTIFY_OK;
+
+ if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+ prio = ffs(entry->app.priority) - 1;
+ else
+ prio = entry->app.priority;
+
+ if (prio < 0)
+ return NOTIFY_OK;
+
+ if (entry->app.protocol == ETH_P_FIP ||
+ entry->app.protocol == ETH_P_FCOE)
+ fcoe->ctlr.priority = prio;
+
+ if (entry->app.protocol == ETH_P_FCOE) {
+ port = lport_priv(fcoe->ctlr.lp);
+ port->priority = prio;
+ }
+
+ return NOTIFY_OK;
+}
+
/**
* fcoe_device_notification() - Handler for net device events
* @notifier: The context of the notification
@@ -1965,6 +2038,46 @@ static bool fcoe_match(struct net_device *netdev)
}
/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+ int dcbx;
+ u8 fup, up;
+ struct net_device *netdev = fcoe->realdev;
+ struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct dcb_app app = {
+ .priority = 0,
+ .protocol = ETH_P_FCOE
+ };
+
+ /* setup DCB priority attributes. */
+ if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+ if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+ up = dcb_ieee_getapp_mask(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_ieee_getapp_mask(netdev, &app);
+ } else {
+ app.selector = DCB_APP_IDTYPE_ETHTYPE;
+ up = dcb_getapp(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_getapp(netdev, &app);
+ }
+
+ port->priority = ffs(up) ? ffs(up) - 1 : 0;
+ fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ }
+#endif
+}
+
+/**
* fcoe_create() - Create a fcoe interface
* @netdev : The net_device object the Ethernet interface to create on
* @fip_mode: The FIP mode for this creation
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this the "master" N_Port */
fcoe->ctlr.lp = lport;
+ /* setup DCB priority attributes. */
+ fcoe_dcb_create(fcoe);
+
/* add to lports list */
fcoe_hostlist_add(lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8e71e..e7522dcc296 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
skb_put(skb, sizeof(*sol));
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index e76107b2ade..865d452542b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -3922,6 +3923,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
return -ENODEV;
}
+
+ pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
+
err = pci_enable_device(h->pdev);
if (err) {
dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 8889b1babca..d570573b796 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2802,6 +2802,11 @@ _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc)
if (ioc->is_driver_loading)
return;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+
fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
@@ -4330,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac326c41e93..6465dae5883 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
scsi_qla_host_t *vha = shost_priv(shost);
struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
- if (!base_vha->flags.online)
+ if (!base_vha->flags.online) {
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
- else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
- fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
- else
+ return;
+ }
+
+ switch (atomic_read(&base_vha->loop_state)) {
+ case LOOP_UPDATE:
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ break;
+ case LOOP_DOWN:
+ if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_DEAD:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_READY:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9df4787715c..f3cddd5800c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,17 +12,17 @@
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0116 | |
- * | Mailbox commands | 0x1129 | |
+ * | Mailbox commands | 0x112b | |
* | Device Discovery | 0x2083 | |
* | Queue Command and IO tracing | 0x302e | 0x3008 |
* | DPC Thread | 0x401c | |
* | Async Events | 0x5059 | |
- * | Timer Routines | 0x600d | |
+ * | Timer Routines | 0x6010 | 0x600e,0x600f |
* | User Space Interactions | 0x709d | |
- * | Task Management | 0x8041 | |
+ * | Task Management | 0x8041 | 0x800b |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb051 | |
+ * | ISP82XX Specific | 0xb052 | |
* | MultiQ | 0xc00b | |
* | Misc | 0xd00b | |
* ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ce32d8135c9..c0c11afb685 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f03e915f187..54ea68cec4c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
+ if (!fw_major_version && ql2xallocfwdump
+ && !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
}
} else {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dbec89622a0..a4b267e60a3 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
- struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ vha->hw->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ ha->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2516adf1aee..7b91b290ffd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
- break;
+ goto check_scsi_status;
}
if (!lscsi_status &&
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3b3cec9f6ac..82a33533ed2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, base_vha, 0x1004,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
+ return QLA_FUNCTION_TIMEOUT;
}
/*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112a,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101c,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112b,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101e,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort.\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 94bded5ddce..03554934b0a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3817,6 +3817,20 @@ exit:
return rval;
}
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ ha->flags.mbox_busy = 0;
+ ql_log(ql_log_warn, vha, 0x6010,
+ "Doing premature completion of mbx command.\n");
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+}
+
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
+ ql_dbg(ql_dbg_timer, vha, 0x6011,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla82xx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- ql_dbg(ql_dbg_timer, vha, 0x6005,
+ ql_log(ql_log_info, vha, 0x6005,
"dumping hw/fw registers:.\n "
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql_log(ql_log_warn, vha, 0xb052,
+ "Firmware aborted with "
+ "error code 0x00006700. Device is "
+ "being reset.\n");
if (halt_status & HALT_STATUS_UNRECOVERABLE) {
set_bit(ISP_UNRECOVERABLE,
&vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
}
qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_log(ql_log_warn, vha, 0x6007,
- "Due to FW hung, doing "
- "premature completion of mbx "
- "command.\n");
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
}
}
}
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
msleep(1000);
if (qla82xx_check_fw_alive(vha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- complete(&ha->mbx_intr_comp);
- }
+ qla82xx_clear_pending_mbx(vha);
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57820c199bc..57a226be339 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fd14c7bfc62..f9e5b85e84d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
"Set the Minidump driver capture mask level. "
"Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
-int ql2xmdenable;
+int ql2xmdenable = 1;
module_param(ql2xmdenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdenable,
"Enable/disable MiniDump. "
- "0 (Default) - MiniDump disabled. "
- "1 - MiniDump enabled.");
+ "0 - MiniDump disabled. "
+ "1 (Default) - MiniDump enabled.");
/*
* SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
qla25xx_delete_queues(vha);
destroy_workqueue(ha->wq);
ha->wq = NULL;
+ vha->req = ha->req_q_map[0];
fail:
ha->mqenable = 0;
kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-/*
- * qla2x00_wait_for_loop_ready
- * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- * to be in LOOP_READY state.
- * Input:
- * ha - pointer to host adapter structure
- *
- * Note:
- * Does context switching-Release SPIN_LOCK
- * (if any) before calling this routine.
- *
- *
- * Return:
- * Success (LOOP_READY) : 0
- * Failed (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
- int return_status = QLA_SUCCESS;
- unsigned long loop_timeout ;
- struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
- /* wait for 5 min at the max for loop to be ready */
- loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
- while ((!atomic_read(&base_vha->loop_down_timer) &&
- atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
- atomic_read(&base_vha->loop_state) != LOOP_READY) {
- if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- msleep(1000);
- if (time_after_eq(jiffies, loop_timeout)) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- }
- return (return_status);
-}
-
static void
sp_get(struct srb *sp)
{
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
"Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
- err = 1;
- if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x800b,
- "Wait for loop ready failed for cmd=%p.\n", cmd);
- goto eh_reset_failed;
- }
err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
!= QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
goto eh_bus_reset_done;
}
- if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
- if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
- ret = SUCCESS;
- }
+ if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+ ret = SUCCESS;
+
if (ret == FAILED)
goto eh_bus_reset_done;
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
- /*
- * Fixme-may be dpc thread is active and processing
- * loop_resync,so wait a while for it to
- * be completed and then issue big hammer.Otherwise
- * it may cause I/O failure as big hammer marks the
- * devices as lost kicking of the port_down_timer
- * while dpc is stuck for the mailbox to complete.
- */
- qla2x00_wait_for_loop_ready(vha);
if (vha != base_vha) {
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_mark_all_devices_lost(vha, 0);
- qla2x00_wait_for_loop_ready(vha);
}
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
- if (ret != QLA_SUCCESS) {
+ if (ret != QLA_SUCCESS)
ql_dbg(ql_dbg_taskm, vha, 0x802e,
"lip_reset failed (%d).\n", ret);
- } else
- qla2x00_wait_for_loop_ready(vha);
}
/* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_dbg(ql_dbg_aer, vha, 0x9001,
- "Due to pci channel io frozen, doing premature "
- "completion of mbx command.\n");
- complete(&ha->mbx_intr_comp);
- }
+ ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+ qla82xx_clear_pending_mbx(vha);
}
qla2x00_free_irqs(vha);
pci_disable_device(pdev);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 13b6357c1fa..23f33a6d52d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.07-k"
+#define QLA2XXX_VERSION "8.03.07.12-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ace637bf254..fd5edc6e166 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -147,7 +147,7 @@
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
-#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
#define LSDW(x) ((u32)((u64)(x)))
@@ -173,6 +173,8 @@
#define ISNS_DEREG_TOV 5
#define HBA_ONLINE_TOV 30
#define DISABLE_ACB_TOV 30
+#define IP_CONFIG_TOV 30
+#define LOGIN_TOV 12
#define MAX_RESET_HA_RETRIES 2
@@ -240,6 +242,45 @@ struct ddb_entry {
uint16_t fw_ddb_index; /* DDB firmware index */
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
+ uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+ struct dev_db_entry fw_ddb_entry;
+ int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+ int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+
+ /* Driver Re-login */
+ unsigned long flags; /* DDB Flags */
+ uint16_t default_relogin_timeout; /* Max time to wait for
+ * relogin to complete */
+ atomic_t retry_relogin_timer; /* Min Time between relogins
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+ atomic_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+ struct list_head list;
+ uint16_t fw_ddb_idx;
+ struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+ int port;
+ int tpgt;
+ char ip_addr[DDB_IPADDR_LEN];
+ char iscsi_name[ISCSI_NAME_SIZE];
+ uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
};
/*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
uint16_t bootload_minor;
uint16_t bootload_patch;
uint16_t bootload_build;
+ uint16_t def_timeout; /* Default login timeout */
uint32_t flash_state;
#define QLFLASH_WAITING 0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
uint16_t iscsi_pci_func_cnt;
uint8_t model_name[16];
struct completion disable_acb_comp;
+ struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+ uint16_t pri_ddb_idx;
+ uint16_t sec_ddb_idx;
+ int is_reset;
};
struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER 0
+#define RESET_ADAPTER 1
+
#define PRESERVE_DDB_LIST 0
#define REBUILD_DDB_LIST 1
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index cbd5a20dbbd..4ac07f88252 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -12,6 +12,7 @@
#define MAX_PRST_DEV_DB_ENTRIES 64
#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
#define MAX_DEV_DB_ENTRIES 512
+#define MAX_DEV_DB_ENTRIES_40XX 256
/*************************************************************************
*
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
uint8_t res14[140]; /* 274-2FF */
};
+#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
+ * One IPv4, one IPv6 link local and 2 IPv6
+ */
+
+#define IP_STATE_MASK 0x0F000000
+#define IP_STATE_SHIFT 24
+
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
/* struct addr_ctrl_blk sec;*/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 160db9d5ea2..d0dd4b33020 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t *mbx_sts);
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
uint16_t stats_size, dma_addr_t stats_dma);
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
uint32_t region, uint32_t field0,
uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 3075fbaef55..1bdfa8120ac 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
* be freed so that when login happens from user space there are free DDB
* indices available.
**/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
{
int max_ddbs;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
&next_idx, &state, &conn_err,
NULL, NULL);
- if (ret == QLA_ERROR)
+ if (ret == QLA_ERROR) {
+ next_idx++;
continue;
+ }
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
}
}
-
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
{
int status = QLA_ERROR;
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
if (status == QLA_ERROR)
goto exit_init_hba;
- qla4xxx_free_ddb_index(ha);
+ if (is_reset == RESET_ADAPTER)
+ qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
return status;
}
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
- uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
{
- struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
int status = QLA_ERROR;
- /* check for out of range index */
- if (fw_ddb_index >= MAX_DDB_ENTRIES)
- goto exit_ddb_event;
-
- /* Get the corresponging ddb entry */
- ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
- /* Device does not currently exist in our database. */
- if (ddb_entry == NULL) {
- ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
- __func__, fw_ddb_index);
-
- if (state == DDB_DS_NO_CONNECTION_ACTIVE)
- clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
- goto exit_ddb_event;
- }
-
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
__func__));
break;
}
+ return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+ /*
+ * This triggers a relogin. After the relogin_timer
+ * expires, the relogin gets scheduled. We must wait a
+ * minimum amount of time since receiving an 0x8014 AEN
+ * with failed device_state or a logout response before
+ * we can issue another relogin.
+ *
+ * Firmware pads this timeout: (time2wait +1).
+ * Driver retry to login should be longer than F/W.
+ * Otherwise F/W will fail
+ * set_ddb() mbx cmd with 0x4005 since it still
+ * counting down its time2wait.
+ */
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
+{
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
+
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+ ddb_entry->fw_ddb_device_state = state;
+
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+ uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
+{
+ struct ddb_entry *ddb_entry;
+ int status = QLA_ERROR;
+
+ /* check for out of range index */
+ if (fw_ddb_index >= MAX_DDB_ENTRIES)
+ goto exit_ddb_event;
+
+ /* Get the corresponging ddb entry */
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+ /* Device does not currently exist in our database. */
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+ __func__, fw_ddb_index);
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+ goto exit_ddb_event;
+ }
+
+ ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
return status;
}
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t mbx_sts = 0;
+ int ret;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags))
+ return;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Skipping login to non FLASH DB"));
+ goto exit_login;
+ }
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_login;
+ }
+
+ if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+ ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login;
+
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+ }
+
+ memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+ ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+ ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_dma, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+ goto exit_login;
+ }
+
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+ ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ goto exit_login;
+ }
+
+exit_login:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 4c2b8487039..c2593782fbb 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
return status;
}
+ if (is_qla40XX(ha)) {
+ if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+ "prematurely completing mbx cmd as "
+ "adapter removal detected\n",
+ ha->host_no, __func__));
+ return status;
+ }
+ }
+
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
memcpy(ha->name_string, init_fw_cb->iscsi_name,
min(sizeof(ha->name_string),
sizeof(init_fw_cb->iscsi_name)));
+ ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 30f31b127f3..4169c8baa11 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+ "Set to disable exporting boot targets to sysfs\n"
+ " 0 - Export boot targets\n"
+ " 1 - Do not export boot targets (Default)");
+
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 30 sec.");
+ " Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
- if (adapter_up(ha))
+ if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+ uint32_t mbx_sts = 0;
+ uint16_t tmp_ddb_index;
+ int ret;
+
+get_ddb_index:
+ tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+ if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free DDB index not available\n"));
+ ret = QLA_ERROR;
+ goto exit_get_ddb_index;
+ }
+
+ if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+ goto get_ddb_index;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Found a free DDB index at %d\n", tmp_ddb_index));
+ ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+ ql4_printk(KERN_INFO, ha,
+ "DDB index = %d not available trying next\n",
+ tmp_ddb_index);
+ goto get_ddb_index;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free FW DDB not available\n"));
+ }
+
+ *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+ return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ char *existing_ipaddr,
+ char *user_ipaddr)
+{
+ uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+ char formatted_ipaddr[DDB_IPADDR_LEN];
+ int status = QLA_SUCCESS, ret = 0;
+
+ if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+ ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+ } else {
+ ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+ }
+
+ if (strcmp(existing_ipaddr, formatted_ipaddr))
+ status = QLA_ERROR;
+
+out_match:
+ return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int idx = 0, max_ddbs, rval;
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess, *existing_sess;
+ struct iscsi_conn *conn, *existing_conn;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ if (sess->targetname == NULL ||
+ conn->persistent_address == NULL ||
+ conn->persistent_port == 0)
+ return QLA_ERROR;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ existing_sess = ddb_entry->sess->dd_data;
+ existing_conn = ddb_entry->conn->dd_data;
+
+ if (existing_sess->targetname == NULL ||
+ existing_conn->persistent_address == NULL ||
+ existing_conn->persistent_port == 0)
+ continue;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IQN = %s User IQN = %s\n",
+ existing_sess->targetname,
+ sess->targetname));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IP = %s User IP = %s\n",
+ existing_conn->persistent_address,
+ conn->persistent_address));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port = %d User Port = %d\n",
+ existing_conn->persistent_port,
+ conn->persistent_port));
+
+ if (strcmp(existing_sess->targetname, sess->targetname))
+ continue;
+ rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+ existing_conn->persistent_address,
+ conn->persistent_address);
+ if (rval == QLA_ERROR)
+ continue;
+ if (existing_conn->persistent_port != conn->persistent_port)
+ continue;
+ break;
+ }
+
+ if (idx == max_ddbs)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match found in fwdb sessions\n"));
+ return QLA_SUCCESS;
+}
+
static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct scsi_qla_host *ha;
struct qla_endpoint *qla_ep;
struct ddb_entry *ddb_entry;
- uint32_t ddb_index;
- uint32_t mbx_sts = 0;
+ uint16_t ddb_index;
struct iscsi_session *sess;
struct sockaddr *dst_addr;
int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
-get_ddb_index:
- ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
- if (ddb_index >= MAX_DDB_ENTRIES) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free DDB index not available\n"));
- return NULL;
- }
-
- if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
- goto get_ddb_index;
-
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Found a free DDB index at %d\n", ddb_index));
- ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
- if (ret == QLA_ERROR) {
- if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
- ql4_printk(KERN_INFO, ha,
- "DDB index = %d not available trying next\n",
- ddb_index);
- goto get_ddb_index;
- }
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free FW DDB not available\n"));
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
return NULL;
- }
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
ddb_entry->ha = ha;
ddb_entry->sess = cls_sess;
+ ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+ ddb_entry->ddb_change = qla4xxx_ddb_change;
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
+ if (!cls_conn)
+ return NULL;
+
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- struct dev_db_entry *fw_ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint32_t mbx_sts = 0;
int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ /* Check if we have matching FW DDB, if yes then do not
+ * login to this target. This could cause target to logout previous
+ * connection
+ */
+ ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+ if (ret == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "Session already exist in FW.\n");
+ ret = -EEXIST;
+ goto exit_conn_start;
+ }
+
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto exit_conn_start;
}
ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
if (mbx_sts)
if (ddb_entry->fw_ddb_device_state ==
DDB_DS_SESSION_ACTIVE) {
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
goto exit_set_param;
}
@@ -1167,8 +1311,9 @@ exit_set_param:
ret = 0;
exit_conn_start:
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
- fw_ddb_entry, fw_ddb_entry_dma);
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
return -ENOSYS;
}
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ struct iscsi_cls_session *cls_sess,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int buflen = 0;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ char ip_addr[DDB_IPADDR_LEN];
+ uint16_t options = 0;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+ sess->initial_r2t_en =
+ (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+ sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+ conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+ (char *)fw_ddb_entry->iscsi_name, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+ (char *)ha->name_string, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_session_conn_fwddb_param;
+ }
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ goto exit_session_conn_fwddb_param;
+ }
+
+ cls_sess = ddb_entry->sess;
+
+ cls_conn = ddb_entry->conn;
+
+ /* Update params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return;
+ goto exit_session_conn_param;
}
if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
"get_ddb_entry for fw_ddb_index %d\n",
ha->host_no, __func__,
ddb_entry->fw_ddb_index));
- return;
+ goto exit_session_conn_param;
}
cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
+ /* Update timers after login */
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
/* Update params */
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
/*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
vfree(ha->chap_list);
ha->chap_list = NULL;
+ if (ha->fw_ddb_dma_pool)
+ dma_pool_destroy(ha->fw_ddb_dma_pool);
+
/* release io space registers */
if (is_qla8022(ha)) {
if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
goto mem_alloc_error_exit;
}
+ ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+ DDB_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->fw_ddb_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: fw_ddb_dma_pool allocation failed..\n",
+ __func__);
+ goto mem_alloc_error_exit;
+ }
+
return QLA_SUCCESS;
mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
}
}
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+ INVALID_ENTRY) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+ 0) {
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ INVALID_ENTRY);
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index [%d] login device\n",
+ __func__, ddb_entry->fw_ddb_index));
+ } else
+ atomic_dec(&ddb_entry->retry_relogin_timer);
+ }
+ }
+
+ /* Wait for relogin to timeout */
+ if (atomic_read(&ddb_entry->relogin_timer) &&
+ (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+ /*
+ * If the relogin times out and the device is
+ * still NOT ONLINE then try and relogin again.
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+ atomic_inc(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+ atomic_read(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+ }
+ }
+}
+
/**
* qla4xxx_timer - checks every second for work to do.
* @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
int start_dpc = 0;
uint16_t w;
+ iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
/* If we are in the middle of AER/EEH processing
* skip any processing and reschedule the timer
*/
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+ if (ddb_entry->ddb_type == FLASH_DDB)
+ iscsi_block_session(ddb_entry->sess);
+ else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
/**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
/* NOTE: AF_ONLINE flag set upon successful completion of
* qla4xxx_initialize_adapter */
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
}
/* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
iscsi_unblock_session(ddb_entry->sess);
} else {
/* Trigger relogin */
- iscsi_session_failure(cls_session->dd_data,
- ISCSI_ERR_CONN_FAILED);
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ } else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
}
}
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ iscsi_unblock_session(ddb_entry->sess);
+
+ /* Start scan target */
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " start scan\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock user space session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+
+ return QLA_SUCCESS;
+}
+
static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
{
iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
}
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ uint16_t relogin_timer;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ relogin_timer = max(ddb_entry->default_relogin_timeout,
+ (uint16_t)RELOGIN_TOV);
+ atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+ ddb_entry->fw_ddb_index, relogin_timer));
+
+ qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "relogin issued\n"));
+ qla4xxx_relogin_flash_ddb(cls_sess);
+ }
+}
+
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- relogin device? --- */
+ if (adapter_up(ha) &&
+ test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+ iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+ }
+
/* ---- link change? --- */
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
* fatal error recovery. Therefore, the driver must
* manually relogin to devices when recovering from
* connection failures, logouts, expired KATO, etc. */
-
- qla4xxx_relogin_all_devices(ha);
+ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+ qla4xxx_build_ddb_list(ha, ha->is_reset);
+ iscsi_host_for_each_session(ha->host,
+ qla4xxx_login_flash_ddb);
+ } else
+ qla4xxx_relogin_all_devices(ha);
}
}
}
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
" target ID %d\n", __func__, ddb_index[0],
ddb_index[1]));
+ ha->pri_ddb_idx = ddb_index[0];
+ ha->sec_ddb_idx = ddb_index[1];
+
exit_boot_info_free:
dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
return ret;
}
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+
if (ddb_index[0] == 0xffff)
goto sec_target;
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
struct iscsi_boot_kobj *boot_kobj;
if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
- return 0;
+ return QLA_ERROR;
+
+ if (ql4xdisablesysfsboot) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: syfsboot disabled - driver will trigger login"
+ "and publish session for discovery .\n", __func__);
+ return QLA_SUCCESS;
+ }
+
ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
if (!boot_kobj)
goto put_host;
- return 0;
+ return QLA_SUCCESS;
put_host:
scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
exit_chap_list:
dma_free_coherent(&ha->pdev->dev, chap_size,
chap_flash_data, chap_dma);
- return;
}
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ struct scsi_qla_host *ha;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = ddb_entry->ha;
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ tddb->tpgt = sess->tpgt;
+ tddb->port = conn->persistent_port;
+ strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ uint16_t options = 0;
+
+ tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+ min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb)
+{
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ return QLA_ERROR;
+
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+ return QLA_ERROR;
+
+ if (old_tddb->port != new_tddb->port)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+ old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+ old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+ new_tddb->ip_addr, new_tddb->iscsi_name));
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct ddb_entry *ddb_entry;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int idx;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+ /* Free up the normaltargets list */
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ list_del_init(&nt_ddb_idx->list);
+ vfree(nt_ddb_idx);
+ }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr *dst_addr;
+ char *ip;
+
+ /* TODO: need to destroy on unload iscsi_endpoint*/
+ dst_addr = vmalloc(sizeof(*dst_addr));
+ if (!dst_addr)
+ return NULL;
+
+ if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ dst_addr->sa_family = AF_INET6;
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+ } else {
+ dst_addr->sa_family = AF_INET;
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+ addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+ }
+
+ ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ vfree(dst_addr);
+ return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+ if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+ return QLA_ERROR;
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ ddb_entry->ddb_type = FLASH_DDB;
+ ddb_entry->fw_ddb_index = INVALID_ENTRY;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+ ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+ uint32_t idx = 0;
+ uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+ uint32_t sts[MBOX_REG_COUNT];
+ uint32_t ip_state;
+ unsigned long wtime;
+ int ret;
+
+ wtime = jiffies + (HZ * IP_CONFIG_TOV);
+ do {
+ for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+ if (ip_idx[idx] == -1)
+ continue;
+
+ ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+ if (ret == QLA_ERROR) {
+ ip_idx[idx] = -1;
+ continue;
+ }
+
+ ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Waiting for IP state for idx = %d, state = 0x%x\n",
+ ip_idx[idx], ip_state));
+ if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+ ip_state == IP_ADDRSTATE_INVALID ||
+ ip_state == IP_ADDRSTATE_PREFERRED ||
+ ip_state == IP_ADDRSTATE_DEPRICATED ||
+ ip_state == IP_ADDRSTATE_DISABLING)
+ ip_idx[idx] = -1;
+
+ }
+
+ /* Break if all IP states checked */
+ if ((ip_idx[0] == -1) &&
+ (ip_idx[1] == -1) &&
+ (ip_idx[2] == -1) &&
+ (ip_idx[3] == -1))
+ break;
+ schedule_timeout_uninterruptible(HZ);
+ } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ int max_ddbs;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id;
+ struct dev_db_entry *fw_ddb_entry;
+ struct ddb_entry *ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32, tmo = 0;
+ uint32_t initial_cmdsn = 0;
+ struct list_head list_st, list_nt; /* List of sendtargets */
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ int fw_idx_size;
+ unsigned long wtime;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_ddb_list;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_st;
+
+ /* Check if ST, add to the list_st */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+ goto continue_next_st;
+
+ st_ddb_idx = vzalloc(fw_idx_size);
+ if (!st_ddb_idx)
+ break;
+
+ st_ddb_idx->fw_ddb_idx = idx;
+
+ list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+ if (next_idx == 0)
+ break;
+ }
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+ list) {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ st_ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx,
+ &state, &conn_err, NULL,
+ NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+ }
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_nt;
+
+ /* Check if NT, then add to list it */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_nt;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n",
+ idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
+ }
+ list_add_tail(&nt_ddb_idx->list, &list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha,
+ fw_ddb_entry) == QLA_SUCCESS)
+ goto continue_next_nt;
+ }
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
+ */
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+ ha->host, cmds_max,
+ sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess)
+ goto exit_ddb_list;
+
+ /*
+ * iscsi_session_setup increments the driver reference
+ * count which wouldn't let the driver to be unloaded.
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess,
+ sizeof(struct qla_conn),
+ conn_id);
+ if (!cls_conn)
+ goto exit_ddb_list;
+
+ ddb_entry->conn = cls_conn;
+
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "Unable to get ep\n"));
+ }
+
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+ cls_conn);
+
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ }
+ }
+continue_next_nt:
+ if (next_idx == 0)
+ break;
+ }
+exit_ddb_list:
+ qla4xxx_free_nt_list(&list_nt);
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+ qla4xxx_free_ddb_index(ha);
+}
+
+
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
* firmware
* NOTE: interrupts enabled upon successful completion
*/
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
continue;
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
}
if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
- qla4xxx_create_chap_list(ha);
-
if (qla4xxx_setup_boot_info(ha))
ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
__func__);
+ /* Perform the build ddb list and login to each */
+ qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+ iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+ qla4xxx_create_chap_list(ha);
+
qla4xxx_create_ifaces(ha);
return 0;
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
}
}
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ int options;
+ int idx;
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if ((ddb_entry != NULL) &&
+ (ddb_entry->ddb_type == FLASH_DDB)) {
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+ == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+ __func__);
+
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ iscsi_session_teardown(ddb_entry->sess);
+ }
+ }
+}
/**
* qla4xxx_remove_adapter - calback function to remove adapter.
* @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
/* destroy iface from sysfs */
qla4xxx_destroy_ifaces(ha);
- if (ha->boot_kset)
+ if ((!ql4xdisablesysfsboot) && ha->boot_kset)
iscsi_boot_destroy_kset(ha->boot_kset);
+ qla4xxx_destroy_fw_ddb_session(ha);
+
scsi_remove_host(ha->host);
qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c15347d3f53..5254e57968f 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 06bc26554a6..f85cfa6c47b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1409,6 +1409,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
blk_start_request(req);
+ scmd_printk(KERN_INFO, cmd, "killing request\n");
+
sdev = cmd->device;
starget = scsi_target(sdev);
shost = sdev->host;
@@ -1490,7 +1492,6 @@ static void scsi_request_fn(struct request_queue *q)
struct request *req;
if (!sdev) {
- printk("scsi: killing requests for dead queue\n");
while ((req = blk_peek_request(q)) != NULL)
scsi_kill_request(req, q);
return;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 72273a0e566..b3c6d957fbd 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -319,11 +319,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
return sdev;
out_device_destroy:
- scsi_device_set_state(sdev, SDEV_DEL);
- transport_destroy_device(&sdev->sdev_gendev);
- put_device(&sdev->sdev_dev);
- scsi_free_queue(sdev->request_queue);
- put_device(&sdev->sdev_gendev);
+ __scsi_remove_device(sdev);
out:
if (display_failure_msg)
printk(ALLOC_FAILURE_MSG, __func__);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df541..8ba4510a951 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
depends on FSL_SOC
config SPI_FSL_SPI
- tristate "Freescale SPI controller"
+ bool "Freescale SPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
config SPI_FSL_ESPI
- tristate "Freescale eSPI controller"
+ bool "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5c..acc88b4d286 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41b..0094c645ff0 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __init
+static int __devinit
spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
u16 *res_flags)
{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c..182e9c87382 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
goto err_clk;
}
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
nuc900_init_spi(hw);
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f103e470cb6..5559b229919 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2184,6 +2184,12 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_clk_prep;
}
+ status = clk_enable(pl022->clk);
+ if (status) {
+ dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
+ goto err_no_clk_en;
+ }
+
/* Disable SSP */
writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
SSP_CR1(pl022->virtbase));
@@ -2237,6 +2243,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
free_irq(adev->irq[0], pl022);
err_no_irq:
+ clk_disable(pl022->clk);
+ err_no_clk_en:
clk_unprepare(pl022->clk);
err_clk_prep:
clk_put(pl022->clk);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 21d8c1c16cd..5e78c77d5a0 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
insns =
- kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+ kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
if (!insns) {
DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+ struct comedi_async *async;
+ struct comedi_device *dev;
+
+ async = area->vm_private_data;
+ dev = async->subdevice->device;
+
+ mutex_lock(&dev->mutex);
+ async->mmap_count++;
+ mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
}
static struct vm_operations_struct comedi_vm_ops = {
- .close = comedi_unmap,
+ .open = comedi_vm_open,
+ .close = comedi_vm_close,
};
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_async *async = NULL;
unsigned long start = vma->vm_start;
unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
int i;
int retval;
struct comedi_subdevice *s;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+
+ dev_file_info = comedi_get_device_file_info(minor);
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
{
unsigned int mask = 0;
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *read_subdev;
struct comedi_subdevice *write_subdev;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy)
break;
if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy) {
retval = 0;
break;
@@ -1885,11 +1924,17 @@ ok:
static int comedi_close(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *s = NULL;
int i;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
static int comedi_fasync(int fd, struct file *file, int on)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
return fasync_helper(fd, file, on, &dev->async_queue);
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a8fea9a9173..6144afb8cba 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
/*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
Description: University of Stirling USB DAQ & INCITE Technology Limited
Devices: [ITL] USB-DUX (usbduxsigma.o)
Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
Status: testing
*/
/*
@@ -44,6 +44,7 @@ Status: testing
* 0.3: proper vendor ID and driver name
* 0.4: fixed D/A voltage range
* 0.5: various bug fixes, health check at startup
+ * 0.6: corrected wrong input range
*/
/* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
/* comedi constants */
static const struct comedi_lrange range_usbdux_ai_range = { 1, {
BIP_RANGE
- (2.65)
+ (2.65/2.0)
}
};
diff --git a/drivers/staging/et131x/Kconfig b/drivers/staging/et131x/Kconfig
index 9e1864c6dfd..8190f2aaf53 100644
--- a/drivers/staging/et131x/Kconfig
+++ b/drivers/staging/et131x/Kconfig
@@ -1,6 +1,7 @@
config ET131X
tristate "Agere ET-1310 Gigabit Ethernet support"
- depends on PCI
+ depends on PCI && NET && NETDEVICES
+ select PHYLIB
default n
---help---
This driver supports Agere ET-1310 ethernet adapters.
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index f5f44a02456..0c1c6ca8c37 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -4469,6 +4469,12 @@ static int et131x_resume(struct device *dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
+#define ET131X_PM_OPS (&et131x_pm_ops)
+#else
+#define ET131X_PM_OPS NULL
+#endif
+
/* ISR functions */
/**
@@ -5470,12 +5476,6 @@ err_out:
return result;
}
-static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
-#define ET131X_PM_OPS (&et131x_pm_ops)
-#else
-#define ET131X_PM_OPS NULL
-#endif
-
static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
{ PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 326e967d54e..aec9311b108 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -242,19 +242,26 @@ static const struct file_operations iio_event_chrdev_fileops = {
static int iio_event_getfd(struct iio_dev *indio_dev)
{
- if (indio_dev->event_interface == NULL)
+ struct iio_event_interface *ev_int = indio_dev->event_interface;
+ int fd;
+
+ if (ev_int == NULL)
return -ENODEV;
- mutex_lock(&indio_dev->event_interface->event_list_lock);
- if (test_and_set_bit(IIO_BUSY_BIT_POS,
- &indio_dev->event_interface->flags)) {
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
+ mutex_lock(&ev_int->event_list_lock);
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ mutex_unlock(&ev_int->event_list_lock);
return -EBUSY;
}
- mutex_unlock(&indio_dev->event_interface->event_list_lock);
- return anon_inode_getfd("iio:event",
- &iio_event_chrdev_fileops,
- indio_dev->event_interface, O_RDONLY);
+ mutex_unlock(&ev_int->event_list_lock);
+ fd = anon_inode_getfd("iio:event",
+ &iio_event_chrdev_fileops, ev_int, O_RDONLY);
+ if (fd < 0) {
+ mutex_lock(&ev_int->event_list_lock);
+ clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+ mutex_unlock(&ev_int->event_list_lock);
+ }
+ return fd;
}
static int __init iio_init(void)
diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c
index d335c7d6fa0..828526d4c28 100644
--- a/drivers/staging/media/as102/as102_drv.c
+++ b/drivers/staging/media/as102/as102_drv.c
@@ -32,8 +32,8 @@
#include "as102_fw.h"
#include "dvbdev.h"
-int debug;
-module_param_named(debug, debug, int, 0644);
+int as102_debug;
+module_param_named(debug, as102_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)");
int dual_tuner;
diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h
index bcda635b5a9..fd33f5a12dc 100644
--- a/drivers/staging/media/as102/as102_drv.h
+++ b/drivers/staging/media/as102/as102_drv.h
@@ -37,7 +37,8 @@ extern struct spi_driver as102_spi_driver;
#define DRIVER_FULL_NAME "Abilis Systems as10x usb driver"
#define DRIVER_NAME "as10x_usb"
-extern int debug;
+extern int as102_debug;
+#define debug as102_debug
#define dprintk(debug, args...) \
do { if (debug) { \
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index b445cd63f90..2542c374390 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -275,7 +275,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
- hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
+ hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c3056..5385da2e9cd 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0DF6, 0x0045)},
{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
{USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
{USB_DEVICE(0x0DF6, 0x0063)},
/* Sweex */
{USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 480b0ed2e4d..115635f9502 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start the device-scanning thread\n");
+ complete(&dev->scanning_done);
quiesce_and_remove_host(dev);
err = PTR_ERR(th);
goto errout;
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
index 5cde96b2e6e..5c2a15b42df 100644
--- a/drivers/staging/slicoss/Kconfig
+++ b/drivers/staging/slicoss/Kconfig
@@ -1,6 +1,6 @@
config SLICOSS
tristate "Alacritech Gigabit IS-NIC support"
- depends on PCI && X86
+ depends on PCI && X86 && NET
default n
help
This driver supports Alacritech's IS-NIC gigabit ethernet cards.
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c424a..7eb56178fb6 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
#define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS 4
/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
#define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
*/
void dsp_clk_exit(void)
{
+ int i;
+
dsp_clock_disable_all(dsp_clocks);
+ for (i = 0; i < DM_TIMER_CLOCKS; i++)
+ omap_dm_timer_free(timer[i]);
+
clk_put(iva2_clk);
clk_put(ssi.sst_fck);
clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
void dsp_clk_init(void)
{
static struct platform_device dspbridge_device;
+ int i, id;
dspbridge_device.dev.bus = &platform_bus_type;
+ for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+ timer[i] = omap_dm_timer_request_specific(id);
+
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
clk_enable(iva2_clk);
break;
case GPT_CLK:
- timer[clk_id - 1] =
- omap_dm_timer_request_specific(DMT_ID(clk_id));
+ status = omap_dm_timer_start(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
clk_disable(iva2_clk);
break;
case GPT_CLK:
- omap_dm_timer_free(timer[clk_id - 1]);
+ status = omap_dm_timer_stop(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3421c..76cfc6edecd 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-
-#ifdef MODULE
#include <linux/module.h>
-#endif
-
#include <linux/device.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 09c44abb89e..3872b8cccdc 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ unsigned long flags;
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
{
struct vhci_unlink *unlink;
struct urb *urb;
+ unsigned long flags;
usbip_dump_header(pdu);
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271..8599545cdf9 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -614,13 +614,12 @@ int iscsit_add_reject(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
- if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
-
send_check_condition = 1;
goto attach_cmd;
}
@@ -1044,6 +1037,8 @@ done:
*/
send_check_condition = 1;
} else {
+ cmd->data_length = cmd->se_cmd.data_length;
+
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
- if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f..1cd6ce373b8 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int j = DIV_ROUND_UP(len, 2);
+ int j = DIV_ROUND_UP(len, 2), rc;
- hex2bin(dst, src, j);
+ rc = hex2bin(dst, src, j);
+ if (rc < 0)
+ pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae..f1a02dad05a 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
- u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
- atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
- atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e50..101b1beb3bc 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->se_cmd_flags &
- SCF_SCSI_RESERVATION_CONFLICT) {
+ if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93c..d734bdec24f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
- return -1;
+ return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9..98936cb7c29 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
return NULL;
}
- login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
- memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6..81d5832fbbd 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
- /*
- * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
- */
if (scsi_bidi_cmnd(sc))
- se_cmd->t_tasks_bidi = 1;
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
- if (ret == -ENOMEM) {
- /* Out of Resources */
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -EINVAL) {
- /*
- * Handle case for SAM_STAT_RESERVATION_CONFLICT
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
- /*
- * Otherwise, return SAM_STAT_CHECK_CONDITION and return
- * sense data.
- */
- return PYX_TRANSPORT_USE_SENSE_REASON;
- }
-
+ if (ret != 0)
+ return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
- if (se_cmd->t_tasks_bidi) {
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
}
/* Tell the core about our preallocated memory */
- ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- return 0;
}
/*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- int host_no = tl_hba->sh->host_no;
+
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+ " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+ tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
-
- pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8..1dcbef499d6 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+ if (!l_port) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
/*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
} else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
}
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
- atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
- atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b824..831468b3163 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[2] = 0x3c;
+ buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
- return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ return -EINVAL;
}
offset += length;
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
+ struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4..93d4f6a1b79 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_device_lock);
- list_add_tail(&se_dev->se_dev_node, &se_dev_list);
- spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_device_lock);
- list_del(&se_dev->se_dev_node);
- spin_unlock(&se_device_lock);
-
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f..9b863942547 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -708,7 +705,7 @@ done:
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == 0) {
+ if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == 0) {
+ if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bf..b4864fba4ef 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- cmd->t_tasks_fua) {
+ (cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
}
- if (ret < 0)
+ if (ret < 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
+ }
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe2926..4aa99220443 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
+ (cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
- if (!bio)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ if (!bio) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
+ }
bio_list_init(&list);
bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
blk_finish_plug(&plug);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54..95dee7074ae 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return true;
}
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out_unlock;
}
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* Do nothing but return GOOD status.
*/
if (!sa_res_key)
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
if (!spec_i_pt) {
/*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
if (ret != 0) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
} else {
/*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
}
/*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
INIT_LIST_HEAD(&preempt_and_abort_list);
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3553,7 +3615,8 @@ after_iport_check:
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3585,7 +3649,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3603,7 +3668,8 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = EINVAL;
goto out;
}
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe42..8b15e56b038 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
struct bio **hbio)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
- int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
*hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return ret;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENODEV;
}
} else {
BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (ret < 0) {
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return ret;
+ }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
/* pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
break;
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f1..02e51faa2f4 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-/* rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
+ struct scatterlist *rd_sg;
+ struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
+ u32 src_len;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = task->task_sg;
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+ rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
- pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
- " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- src_offset = rd_offset;
+ pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ dev->rd_dev_id, read_rd ? "Read" : "Write",
+ task->task_lba, req->rd_size, req->rd_page,
+ rd_offset);
+ src_len = PAGE_SIZE - rd_offset;
+ sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+ read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
- if ((sg_d[i].length - dst_offset) <
- (sg_s[j].length - src_offset)) {
- length = (sg_d[i].length - dst_offset);
-
- pr_debug("Step 1 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
- sg_s[j].length);
- pr_debug("Step 1 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i++]) + dst_offset;
- BUG_ON(!dst);
-
- src = sg_virt(&sg_s[j]) + src_offset;
- BUG_ON(!src);
-
- dst_offset = 0;
- src_offset = length;
- page_end = 0;
- } else {
- length = (sg_s[j].length - src_offset);
-
- pr_debug("Step 2 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset,
- j, sg_s[j].length);
- pr_debug("Step 2 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i]) + dst_offset;
- BUG_ON(!dst);
-
- if (sg_d[i].length == length) {
- i++;
- dst_offset = 0;
- } else
- dst_offset = length;
-
- src = sg_virt(&sg_s[j++]) + src_offset;
- BUG_ON(!src);
-
- src_offset = 0;
- page_end = 1;
- }
+ u32 len;
+ void *rd_addr;
- memcpy(dst, src, length);
+ sg_miter_next(&m);
+ len = min((u32)m.length, src_len);
+ m.consumed = len;
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
+ rd_addr = sg_virt(rd_sg) + rd_offset;
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
+ if (read_rd)
+ memcpy(m.addr, rd_addr, len);
+ else
+ memcpy(rd_addr, m.addr, len);
- if (!page_end)
+ req->rd_size -= len;
+ if (!req->rd_size)
continue;
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ src_len -= len;
+ if (src_len) {
+ rd_offset += len;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- sg_s = &table->sg_table[j = 0];
- }
-
- return 0;
-}
-
-/* rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
- struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
- struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
- u32 rd_offset = req->rd_offset;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
- sg_s = task->task_sg;
-
- pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
- " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- dst_offset = rd_offset;
-
- while (req->rd_size) {
- if ((sg_s[i].length - src_offset) <
- (sg_d[j].length - dst_offset)) {
- length = (sg_s[i].length - src_offset);
-
- pr_debug("Step 1 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 1 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i++]) + src_offset;
- BUG_ON(!src);
-
- dst = sg_virt(&sg_d[j]) + dst_offset;
- BUG_ON(!dst);
-
- src_offset = 0;
- dst_offset = length;
- page_end = 0;
- } else {
- length = (sg_d[j].length - dst_offset);
-
- pr_debug("Step 2 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 2 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i]) + src_offset;
- BUG_ON(!src);
-
- if (sg_s[i].length == length) {
- i++;
- src_offset = 0;
- } else
- src_offset = length;
-
- dst = sg_virt(&sg_d[j++]) + dst_offset;
- BUG_ON(!dst);
-
- dst_offset = 0;
- page_end = 1;
- }
-
- memcpy(dst, src, length);
-
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
-
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
-
- if (!page_end)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ /* rd page completed, next one please */
+ req->rd_page++;
+ rd_offset = 0;
+ src_len = PAGE_SIZE;
+ if (req->rd_page <= table->page_end_offset) {
+ rd_sg++;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
+ if (!table) {
+ sg_miter_stop(&m);
return -EINVAL;
+ }
- sg_d = &table->sg_table[j = 0];
+ /* since we increment, the first sg entry is correct */
+ rd_sg = table->sg_table;
}
-
+ sg_miter_stop(&m);
return 0;
}
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
- unsigned long long lba;
+ u64 tmp;
int ret;
- req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ req->rd_offset = do_div(tmp, PAGE_SIZE);
+ req->rd_page = tmp;
req->rd_size = task->task_size;
- if (task->task_data_direction == DMA_FROM_DEVICE)
- ret = rd_MEMCPY_read(req);
- else
- ret = rd_MEMCPY_write(req);
-
+ ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df629..684522805a1 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
- /*
- * Signal that the command has failed via cmd->se_cmd_flags,
- */
- transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f..0257658e2e3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -61,7 +61,6 @@
static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
- se_cmd_cache = kmem_cache_create("se_cmd_cache",
- sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!se_cmd_cache) {
- pr_err("kmem_cache_create for struct se_cmd failed\n");
- goto out;
- }
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out_free_cmd_cache;
+ goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
- kmem_cache_destroy(se_cmd_cache);
out:
return -ENOMEM;
}
@@ -191,7 +182,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
- kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
task->task_scsi_status = GOOD;
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_ILLEGAL_REQUEST;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
}
transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd, 1, 1);
+ transport_generic_request_failure(cmd);
}
/* transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
- atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->ordered_cmd_lock);
- spin_lock_init(&dev->state_task_lock);
- spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
- INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
} else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, 0,
- (cmd->data_direction != DMA_TO_DEVICE));
- }
+ if (ret < 0)
+ transport_generic_request_failure(cmd);
+
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-static void transport_generic_request_failure(
- struct se_cmd *cmd,
- int complete,
- int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state,
- cmd->transport_error_status);
+ cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
- if (complete) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- }
-
- switch (cmd->transport_error_status) {
- case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- break;
- case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- break;
- case PYX_TRANSPORT_INVALID_CDB_FIELD:
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- break;
- case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- break;
- case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
- if (!sc)
- transport_new_cmd_failure(cmd);
- /*
- * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
- * we force this session to fall back to session
- * recovery.
- */
- cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
- cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
- goto check_stop;
- case PYX_TRANSPORT_LU_COMM_FAILURE:
- case PYX_TRANSPORT_ILLEGAL_REQUEST:
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
- case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- break;
- case PYX_TRANSPORT_WRITE_PROTECTED:
- cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ switch (cmd->scsi_sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_INVALID_CDB_FIELD:
+ case TCM_INVALID_PARAMETER_LIST:
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
break;
- case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
- case PYX_TRANSPORT_USE_SENSE_REASON:
- /*
- * struct se_cmd->scsi_sense_reason already set
- */
- break;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0],
- cmd->transport_error_status);
+ cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
- if (!sc && !cmd->se_tfo->new_cmd_map)
- transport_new_cmd_failure(cmd);
- else {
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- }
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&cmd->se_dev->dev_hoq_count);
- smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&cmd->se_dev->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_node,
- &cmd->se_dev->ordered_cmd_list);
- spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, 0, 1);
+ if (se_dev_check_online(cmd->se_dev) != 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ transport_generic_request_failure(cmd);
return 0;
}
@@ -2163,14 +2102,13 @@ check_depth:
else
error = dev->transport->do_task(task);
if (error != 0) {
- cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
+ transport_generic_request_failure(cmd);
}
goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
return 0;
}
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
- unsigned long flags;
- /*
- * Any unsolicited data will get dumped for failed command inside of
- * the fabric plugin
- */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
/*
* Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value.
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
*/
type_disk:
- return (u32)cdb[4];
+ return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
return -1;
}
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- /*
- * For UA Interlock Code 11b, a RESERVATION CONFLICT will
- * establish a UNIT ATTENTION with PREVIOUS RESERVATION
- * CONFLICT STATUS.
- *
- * See spc4r17, section 7.4.6 Control Mode Page, Table 349
- */
- if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -EINVAL;
-}
-
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0)
- return transport_handle_reservation_conflict(cmd);
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->t_tasks_bidi))
+ !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[10] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_dec(&dev->dev_hoq_count);
- smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
- spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- return ret;
+ goto out_fail;
}
/*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
task_cdbs = transport_allocate_control_task(cmd);
}
- if (task_cdbs <= 0)
+ if (task_cdbs < 0)
goto out_fail;
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+ }
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
else if (ret < 0)
return ret;
- return PYX_TRANSPORT_WRITE_PENDING;
+ return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
- transport_new_cmd_failure(cmd);
- return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- set_user_nice(current, -20);
-
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
+ break;
}
break;
case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c61..71fc9cea5dc 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca4..9402b7387ca 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- pr_debug("del lport %s\n",
- config_item_name(&wwn->wwn_group.cg_item));
+ pr_debug("del lport %s\n", lacl->name);
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
mutex_unlock(&ft_lport_lock);
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 435f6facbc2..44fbebab507 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -46,6 +46,7 @@ static inline char __dcc_getchar(void)
asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
: "=r" (__c));
+ isb();
return __c;
}
@@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c)
asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
: /* no output register */
: "r" (c));
+ isb();
}
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5f479dada6f..925a1e547a8 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1560,7 +1560,7 @@ config SERIAL_IFX6X60
Support for the IFX6x60 modem devices on Intel MID platforms.
config SERIAL_PCH_UART
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART"
depends on PCI
select SERIAL_CORE
help
@@ -1568,12 +1568,12 @@ config SERIAL_PCH_UART
which is an IOH(Input/Output Hub) for x86 embedded processor.
Enabling PCH_DMA, this PCH UART works as DMA mode.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config SERIAL_MSM_SMD
bool "Enable tty device interface for some SMD ports"
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4a0f86fa1e9..4c823f341d9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -228,7 +228,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
- if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
+ if ((rs485conf->delay_rts_after_send) > 0)
UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
@@ -304,7 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
@@ -1228,7 +1228,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
- if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
@@ -1447,16 +1447,6 @@ static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port,
rs485conf->delay_rts_after_send = rs485_delay[1];
rs485conf->flags = 0;
- if (rs485conf->delay_rts_before_send == 0 &&
- rs485conf->delay_rts_after_send == 0) {
- rs485conf->flags |= SER_RS485_RTS_ON_SEND;
- } else {
- if (rs485conf->delay_rts_before_send)
- rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND;
- if (rs485conf->delay_rts_after_send)
- rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
- }
-
if (of_get_property(np, "rs485-rx-during-tx", NULL))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index b7435043f2f..1dfba7b779c 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3234,9 +3234,8 @@ rs_write(struct tty_struct *tty,
e100_disable_rx(info);
e100_enable_rx_irq(info);
#endif
- if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) &&
- (info->rs485.delay_rts_before_send > 0))
- msleep(info->rs485.delay_rts_before_send);
+ if (info->rs485.delay_rts_before_send > 0)
+ msleep(info->rs485.delay_rts_before_send);
}
#endif /* CONFIG_ETRAX_RS485 */
@@ -3693,10 +3692,6 @@ rs_ioctl(struct tty_struct *tty,
rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
rs485data.flags = 0;
- if (rs485data.delay_rts_before_send != 0)
- rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
- else
- rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
if (rs485ctrl.enabled)
rs485data.flags |= SER_RS485_ENABLED;
@@ -4531,7 +4526,6 @@ static int __init rs_init(void)
/* Set sane defaults */
info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
- info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
info->rs485.delay_rts_before_send = 0;
info->rs485.flags &= ~(SER_RS485_ENABLED);
#endif
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 286c386d9c4..e272d3919c6 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -884,7 +884,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
- struct tty_struct *tty = port->state->port.tty;
unsigned char cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
@@ -907,8 +906,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
}
/* CMSPAR isn't supported by this driver */
- if (tty)
- tty->termios->c_cflag &= ~CMSPAR;
+ termios->c_cflag &= ~CMSPAR;
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 21febef926a..d6aba8c087e 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1,5 +1,5 @@
/*
- *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
*This program is free software; you can redistribute it and/or modify
*it under the terms of the GNU General Public License as published by
@@ -46,8 +46,8 @@ enum {
/* Set the max number of UART port
* Intel EG20T PCH: 4 port
- * OKI SEMICONDUCTOR ML7213 IOH: 3 port
- * OKI SEMICONDUCTOR ML7223 IOH: 2 port
+ * LAPIS Semiconductor ML7213 IOH: 3 port
+ * LAPIS Semiconductor ML7223 IOH: 2 port
*/
#define PCH_UART_NR 4
@@ -258,6 +258,8 @@ enum pch_uart_num_t {
pch_ml7213_uart2,
pch_ml7223_uart0,
pch_ml7223_uart1,
+ pch_ml7831_uart0,
+ pch_ml7831_uart1,
};
static struct pch_uart_driver_data drv_dat[] = {
@@ -270,6 +272,8 @@ static struct pch_uart_driver_data drv_dat[] = {
[pch_ml7213_uart2] = {PCH_UART_2LINE, 2},
[pch_ml7223_uart0] = {PCH_UART_8LINE, 0},
[pch_ml7223_uart1] = {PCH_UART_2LINE, 1},
+ [pch_ml7831_uart0] = {PCH_UART_8LINE, 0},
+ [pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
};
static unsigned int default_baud = 9600;
@@ -628,6 +632,7 @@ static void pch_request_dma(struct uart_port *port)
dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n",
__func__);
dma_release_channel(priv->chan_tx);
+ priv->chan_tx = NULL;
return;
}
@@ -1215,8 +1220,7 @@ static void pch_uart_shutdown(struct uart_port *port)
dev_err(priv->port.dev,
"pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
- if (priv->use_dma_flag)
- pch_free_dma(port);
+ pch_free_dma(port);
free_irq(priv->port.irq, priv);
}
@@ -1280,6 +1284,7 @@ static void pch_uart_set_termios(struct uart_port *port,
if (rtn)
goto out;
+ pch_uart_set_mctrl(&priv->port, priv->port.mctrl);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -1552,6 +1557,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
.driver_data = pch_ml7223_uart0},
{PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D),
.driver_data = pch_ml7223_uart1},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811),
+ .driver_data = pch_ml7831_uart0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812),
+ .driver_data = pch_ml7831_uart1},
{0,},
};
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 512c49f98e8..8e0924f5544 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -36,6 +36,7 @@
#include <linux/kmod.h>
#include <linux/nsproxy.h>
+#include <linux/ratelimit.h>
/*
* This guards the refcounted line discipline lists. The lock
@@ -547,15 +548,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
/**
* tty_ldisc_wait_idle - wait for the ldisc to become idle
* @tty: tty to wait for
+ * @timeout: for how long to wait at most
*
* Wait for the line discipline to become idle. The discipline must
* have been halted for this to guarantee it remains idle.
*/
-static int tty_ldisc_wait_idle(struct tty_struct *tty)
+static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
{
- int ret;
+ long ret;
ret = wait_event_timeout(tty_ldisc_idle,
- atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+ atomic_read(&tty->ldisc->users) == 1, timeout);
if (ret < 0)
return ret;
return ret > 0 ? 0 : -EBUSY;
@@ -665,7 +667,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
tty_ldisc_flush_works(tty);
- retval = tty_ldisc_wait_idle(tty);
+ retval = tty_ldisc_wait_idle(tty, 5 * HZ);
tty_lock();
mutex_lock(&tty->ldisc_mutex);
@@ -762,8 +764,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
if (IS_ERR(ld))
return -1;
- WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
-
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
@@ -838,7 +838,7 @@ void tty_ldisc_hangup(struct tty_struct *tty)
tty_unlock();
cancel_work_sync(&tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
-
+retry:
tty_lock();
mutex_lock(&tty->ldisc_mutex);
@@ -847,6 +847,22 @@ void tty_ldisc_hangup(struct tty_struct *tty)
it means auditing a lot of other paths so this is
a FIXME */
if (tty->ldisc) { /* Not yet closed */
+ if (atomic_read(&tty->ldisc->users) != 1) {
+ char cur_n[TASK_COMM_LEN], tty_n[64];
+ long timeout = 3 * HZ;
+ tty_unlock();
+
+ while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ printk_ratelimited(KERN_WARNING
+ "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
+ __func__, get_task_comm(cur_n, current),
+ tty_name(tty, tty_n));
+ }
+ mutex_unlock(&tty->ldisc_mutex);
+ goto retry;
+ }
+
if (reset == 0) {
if (!tty_ldisc_reinit(tty, tty->termios->c_line))
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6960715c506..a8078d0638f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -539,7 +539,6 @@ static void acm_port_down(struct acm *acm)
{
int i;
- mutex_lock(&open_mutex);
if (acm->dev) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
@@ -551,14 +550,15 @@ static void acm_port_down(struct acm *acm)
acm->control->needs_remote_wakeup = 0;
usb_autopm_put_interface(acm->control);
}
- mutex_unlock(&open_mutex);
}
static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
tty_port_hangup(&acm->port);
+ mutex_lock(&open_mutex);
acm_port_down(acm);
+ mutex_unlock(&open_mutex);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
@@ -569,8 +569,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
shutdown */
if (!acm)
return;
+
+ mutex_lock(&open_mutex);
if (tty_port_close_start(&acm->port, tty, filp) == 0) {
- mutex_lock(&open_mutex);
if (!acm->dev) {
tty_port_tty_set(&acm->port, NULL);
acm_tty_unregister(acm);
@@ -582,6 +583,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
acm_port_down(acm);
tty_port_close_end(&acm->port, tty);
tty_port_tty_set(&acm->port, NULL);
+ mutex_unlock(&open_mutex);
}
static int acm_tty_write(struct tty_struct *tty,
@@ -1456,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
},
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+ { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+ { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 96f05b29c9a..79781461eec 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -813,6 +813,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_C_PORT_LINK_STATE);
}
+ if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
+ hub_is_superspeed(hub->hdev)) {
+ need_debounce_delay = true;
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ }
/* We can forget about a "removed" device when there's a
* physical disconnect or the connect status changes.
*/
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d6a8d8269bf..ecf12e15a7e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -50,15 +50,42 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech Webcam B/C500 */
{ USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Webcam C600 */
+ { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Webcam Pro 9000 */
{ USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Webcam C905 */
+ { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C210 */
+ { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C260 */
+ { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Webcam C310 */
{ USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Webcam C910 */
+ { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Webcam C160 */
+ { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Webcam C270 */
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech Quickcam Pro 9000 */
+ { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Quickcam E3500 */
+ { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Quickcam Vision Pro */
+ { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech Harmony 700-series */
{ USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 717ebc9ff94..600d8234851 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
ret = -ENODEV;
goto err0;
}
- dwc->revision = reg & DWC3_GSNPSREV_MASK;
+ dwc->revision = reg;
dwc3_core_soft_reset(dwc);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index fa824cfdd2e..25dbd8614e7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1284,6 +1284,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
int ret;
dep->endpoint.maxpacket = 1024;
+ dep->endpoint.max_streams = 15;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
&dwc->gadget.ep_list);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b21cd376c11..23a447373c5 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -469,7 +469,7 @@ config USB_LANGWELL
gadget drivers to also be dynamically linked.
config USB_EG20T
- tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
+ tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
depends on PCI
select USB_GADGET_DUALSPEED
help
@@ -485,10 +485,11 @@ config USB_EG20T
This driver dose not support interrupt transfer or isochronous
transfer modes.
- This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
+ This driver also can be used for LAPIS Semiconductor's ML7213 which is
for IVI(In-Vehicle Infotainment) use.
- ML7213 is companion chip for Intel Atom E6xx series.
- ML7213 is completely compatible for Intel EG20T PCH.
+ ML7831 is for general purpose use.
+ ML7213/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7831 is completely compatible for Intel EG20T PCH.
config USB_CI13XXX_MSM
tristate "MIPS USB CI13xxx for MSM"
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 4730016d7cd..45f422ac103 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
u32 tmp;
if (!driver || !bind || !driver->setup
- || driver->speed != USB_SPEED_HIGH)
+ || driver->speed < USB_SPEED_HIGH)
return -EINVAL;
if (!dev)
return -ENODEV;
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 4eedfe55715..1fc612914c5 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -122,3 +122,5 @@ static int __init ci13xxx_msm_init(void)
return platform_driver_register(&ci13xxx_msm_driver);
}
module_init(ci13xxx_msm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 83428f56253..9a0c3979ff4 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -71,6 +71,9 @@
/******************************************************************************
* DEFINE
*****************************************************************************/
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
/* ctrl register bank access */
static DEFINE_SPINLOCK(udc_lock);
@@ -1434,7 +1437,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
return -EALREADY;
mReq->req.status = -EALREADY;
- if (length && !mReq->req.dma) {
+ if (length && mReq->req.dma == DMA_ADDR_INVALID) {
mReq->req.dma = \
dma_map_single(mEp->device, mReq->req.buf,
length, mEp->dir ? DMA_TO_DEVICE :
@@ -1453,7 +1456,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
dma_unmap_single(mEp->device, mReq->req.dma,
length, mEp->dir ? DMA_TO_DEVICE :
DMA_FROM_DEVICE);
- mReq->req.dma = 0;
+ mReq->req.dma = DMA_ADDR_INVALID;
mReq->map = 0;
}
return -ENOMEM;
@@ -1549,7 +1552,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
if (mReq->map) {
dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- mReq->req.dma = 0;
+ mReq->req.dma = DMA_ADDR_INVALID;
mReq->map = 0;
}
@@ -1610,7 +1613,6 @@ __acquires(mEp->lock)
* @gadget: gadget
*
* This function returns an error code
- * Caller must hold lock
*/
static int _gadget_stop_activity(struct usb_gadget *gadget)
{
@@ -2189,6 +2191,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
if (mReq != NULL) {
INIT_LIST_HEAD(&mReq->queue);
+ mReq->req.dma = DMA_ADDR_INVALID;
mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
&mReq->dma);
@@ -2328,7 +2331,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
if (mReq->map) {
dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- mReq->req.dma = 0;
+ mReq->req.dma = DMA_ADDR_INVALID;
mReq->map = 0;
}
req->status = -ECONNRESET;
@@ -2500,12 +2503,12 @@ static int ci13xxx_wakeup(struct usb_gadget *_gadget)
spin_lock_irqsave(udc->lock, flags);
if (!udc->remote_wakeup) {
ret = -EOPNOTSUPP;
- dbg_trace("remote wakeup feature is not enabled\n");
+ trace("remote wakeup feature is not enabled\n");
goto out;
}
if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
ret = -EINVAL;
- dbg_trace("port is not suspended\n");
+ trace("port is not suspended\n");
goto out;
}
hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
@@ -2703,7 +2706,9 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
if (udc->udc_driver->notify_event)
udc->udc_driver->notify_event(udc,
CI13XXX_CONTROLLER_STOPPED_EVENT);
+ spin_unlock_irqrestore(udc->lock, flags);
_gadget_stop_activity(&udc->gadget);
+ spin_lock_irqsave(udc->lock, flags);
pm_runtime_put(&udc->gadget.dev);
}
@@ -2850,7 +2855,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
struct ci13xxx *udc;
int retval = 0;
- trace("%p, %p, %p", dev, regs, name);
+ trace("%p, %p, %p", dev, regs, driver->name);
if (dev == NULL || regs == NULL || driver == NULL ||
driver->name == NULL)
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 596a0b464e6..4dff83d2f26 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -130,9 +130,6 @@ ep_matches (
num_req_streams = ep_comp->bmAttributes & 0x1f;
if (num_req_streams > ep->max_streams)
return 0;
- /* Update the ep_comp descriptor if needed */
- if (num_req_streams != ep->max_streams)
- ep_comp->bmAttributes = ep->max_streams;
}
}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 52583a23533..1a6f415c0d0 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -624,7 +624,8 @@ static int fsg_setup(struct usb_function *f,
if (ctrl->bRequestType !=
(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != fsg->interface_number || w_value != 0)
+ if (w_index != fsg->interface_number || w_value != 0 ||
+ w_length != 0)
return -EDOM;
/*
@@ -639,7 +640,8 @@ static int fsg_setup(struct usb_function *f,
if (ctrl->bRequestType !=
(USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != fsg->interface_number || w_value != 0)
+ if (w_index != fsg->interface_number || w_value != 0 ||
+ w_length != 1)
return -EDOM;
VDBG(fsg, "get max LUN\n");
*(u8 *)req->buf = fsg->common->nluns - 1;
@@ -2973,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
fsg_common_put(common);
usb_free_descriptors(fsg->function.descriptors);
usb_free_descriptors(fsg->function.hs_descriptors);
+ usb_free_descriptors(fsg->function.ss_descriptors);
kfree(fsg);
}
diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
index 67b222908cf..3797b3d6c62 100644
--- a/drivers/usb/gadget/f_midi.c
+++ b/drivers/usb/gadget/f_midi.c
@@ -95,7 +95,6 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
-DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16);
DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
/* B.3.1 Standard AC Interface Descriptor */
@@ -140,26 +139,6 @@ static struct usb_ms_header_descriptor ms_header_desc __initdata = {
/* .wTotalLength = DYNAMIC */
};
-/* B.4.3 Embedded MIDI IN Jack Descriptor */
-static struct usb_midi_in_jack_descriptor jack_in_emb_desc = {
- .bLength = USB_DT_MIDI_IN_SIZE,
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_MIDI_IN_JACK,
- .bJackType = USB_MS_EMBEDDED,
- /* .bJackID = DYNAMIC */
-};
-
-/* B.4.4 Embedded MIDI OUT Jack Descriptor */
-static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = {
- /* .bLength = DYNAMIC */
- .bDescriptorType = USB_DT_CS_INTERFACE,
- .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK,
- .bJackType = USB_MS_EMBEDDED,
- /* .bJackID = DYNAMIC */
- /* .bNrInputPins = DYNAMIC */
- /* .pins = DYNAMIC */
-};
-
/* B.5.1 Standard Bulk OUT Endpoint Descriptor */
static struct usb_endpoint_descriptor bulk_out_desc = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
@@ -758,9 +737,11 @@ fail:
static int __init
f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
- struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12];
+ struct usb_descriptor_header **midi_function;
struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
+ struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
+ struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS];
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
int status, n, jack = 1, i = 0;
@@ -798,6 +779,14 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
midi->out_ep->driver_data = cdev; /* claim */
+ /* allocate temporary function list */
+ midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(midi_function),
+ GFP_KERNEL);
+ if (!midi_function) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/*
* construct the function's descriptor set. As the number of
* input and output MIDI ports is configurable, we have to do
@@ -811,73 +800,74 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
/* calculate the header's wTotalLength */
n = USB_DT_MS_HEADER_SIZE
- + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE
- + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1);
+ + (midi->in_ports + midi->out_ports) *
+ (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
ms_header_desc.wTotalLength = cpu_to_le16(n);
midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
- /* we have one embedded IN jack */
- jack_in_emb_desc.bJackID = jack++;
- midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc;
-
- /* and a dynamic amount of external IN jacks */
- for (n = 0; n < midi->in_ports; n++) {
- struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n];
-
- ext->bLength = USB_DT_MIDI_IN_SIZE;
- ext->bDescriptorType = USB_DT_CS_INTERFACE;
- ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
- ext->bJackType = USB_MS_EXTERNAL;
- ext->bJackID = jack++;
- ext->iJack = 0;
-
- midi_function[i++] = (struct usb_descriptor_header *) ext;
- }
-
- /* one embedded OUT jack ... */
- jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports);
- jack_out_emb_desc.bJackID = jack++;
- jack_out_emb_desc.bNrInputPins = midi->in_ports;
- /* ... which referencess all external IN jacks */
+ /* configure the external IN jacks, each linked to an embedded OUT jack */
for (n = 0; n < midi->in_ports; n++) {
- jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID;
- jack_out_emb_desc.pins[n].baSourcePin = 1;
+ struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n];
+ struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n];
+
+ in_ext->bLength = USB_DT_MIDI_IN_SIZE;
+ in_ext->bDescriptorType = USB_DT_CS_INTERFACE;
+ in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
+ in_ext->bJackType = USB_MS_EXTERNAL;
+ in_ext->bJackID = jack++;
+ in_ext->iJack = 0;
+ midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+
+ out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
+ out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
+ out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
+ out_emb->bJackType = USB_MS_EMBEDDED;
+ out_emb->bJackID = jack++;
+ out_emb->bNrInputPins = 1;
+ out_emb->pins[0].baSourcePin = 1;
+ out_emb->pins[0].baSourceID = in_ext->bJackID;
+ out_emb->iJack = 0;
+ midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+
+ /* link it to the endpoint */
+ ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
}
- midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc;
-
- /* and multiple external OUT jacks ... */
+ /* configure the external OUT jacks, each linked to an embedded IN jack */
for (n = 0; n < midi->out_ports; n++) {
- struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n];
- int m;
-
- ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
- ext->bDescriptorType = USB_DT_CS_INTERFACE;
- ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
- ext->bJackType = USB_MS_EXTERNAL;
- ext->bJackID = jack++;
- ext->bNrInputPins = 1;
- ext->iJack = 0;
- /* ... which all reference the same embedded IN jack */
- for (m = 0; m < midi->out_ports; m++) {
- ext->pins[m].baSourceID = jack_in_emb_desc.bJackID;
- ext->pins[m].baSourcePin = 1;
- }
-
- midi_function[i++] = (struct usb_descriptor_header *) ext;
+ struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n];
+ struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n];
+
+ in_emb->bLength = USB_DT_MIDI_IN_SIZE;
+ in_emb->bDescriptorType = USB_DT_CS_INTERFACE;
+ in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
+ in_emb->bJackType = USB_MS_EMBEDDED;
+ in_emb->bJackID = jack++;
+ in_emb->iJack = 0;
+ midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+
+ out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
+ out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
+ out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
+ out_ext->bJackType = USB_MS_EXTERNAL;
+ out_ext->bJackID = jack++;
+ out_ext->bNrInputPins = 1;
+ out_ext->iJack = 0;
+ out_ext->pins[0].baSourceID = in_emb->bJackID;
+ out_ext->pins[0].baSourcePin = 1;
+ midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+
+ /* link it to the endpoint */
+ ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
}
/* configure the endpoint descriptors ... */
ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
- for (n = 0; n < midi->in_ports; n++)
- ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID;
ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
- for (n = 0; n < midi->out_ports; n++)
- ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID;
/* ... and add them to the list */
midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
@@ -901,6 +891,8 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f)
f->descriptors = usb_copy_descriptors(midi_function);
}
+ kfree(midi_function);
+
return 0;
fail:
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 34907703333..7cdcb63b21f 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev)
static int
pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
{
- struct net_device *dev = fp->dev;
struct page *page;
int err;
- page = __netdev_alloc_page(dev, gfp_flags);
+ page = alloc_page(gfp_flags);
if (!page)
return -ENOMEM;
@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
err = usb_ep_queue(fp->out_ep, req, gfp_flags);
if (unlikely(err))
- netdev_free_page(dev, page);
+ put_page(page);
return err;
}
@@ -346,7 +345,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- skb->len == 0, req->actual);
+ skb->len <= 1, req->actual);
page = NULL;
if (req->actual < req->length) { /* Last fragment */
@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
}
if (page)
- netdev_free_page(dev, page);
+ put_page(page);
if (req)
- pn_rx_submit(fp, req, GFP_ATOMIC);
+ pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
}
/*-------------------------------------------------------------------------*/
@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
netif_carrier_on(dev);
for (i = 0; i < phonet_rxq_size; i++)
- pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
+ pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
}
spin_unlock(&port->lock);
return 0;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 91fdf790ed2..cf33a8d0fd5 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
if (!gser->port.in->desc || !gser->port.out->desc) {
DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
- if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
- !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+ config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
gser->port.in->desc = NULL;
gser->port.out->desc = NULL;
return -EINVAL;
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index f7e39b0365c..11b5196284a 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -859,7 +859,7 @@ static int class_setup_req(struct fsg_dev *fsg,
if (ctrl->bRequestType != (USB_DIR_OUT |
USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != 0 || w_value != 0) {
+ if (w_index != 0 || w_value != 0 || w_length != 0) {
value = -EDOM;
break;
}
@@ -875,7 +875,7 @@ static int class_setup_req(struct fsg_dev *fsg,
if (ctrl->bRequestType != (USB_DIR_IN |
USB_TYPE_CLASS | USB_RECIP_INTERFACE))
break;
- if (w_index != 0 || w_value != 0) {
+ if (w_index != 0 || w_value != 0 || w_length != 1) {
value = -EDOM;
break;
}
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 43a49ecc1f3..dcbc0a2e48d 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/fsl_devices.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <mach/hardware.h>
@@ -88,7 +89,6 @@ eenahb:
void fsl_udc_clk_finalize(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
if (cpu_is_mx35()) {
unsigned int v;
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
USBPHYCTRL_OTGBASE_OFFSET));
}
}
-#endif
/* ULPI transceivers don't need usbpll */
if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 2a03e4de11c..e00cf92409c 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index b2c44e1d581..dd28ef3def7 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+ struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+ /* Clear active and halt bit */
+ qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+ | EP_QUEUE_HEAD_STATUS_HALT));
+
+ /* Ensure that updates to the QH will occur before priming. */
+ wmb();
+
+ /* Prime endpoint by writing correct bit to ENDPTPRIME */
+ fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
- int i = ep_index(ep) * 2 + ep_is_in(ep);
u32 temp, bitmask, tmp_stat;
- struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
/* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
- goto out;
+ return;
do {
/* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
if (tmp_stat)
- goto out;
+ return;
}
- /* Write dQH next pointer and terminate bit to 0 */
- temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
- /* Clear active and halt bit */
- temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
- | EP_QUEUE_HEAD_STATUS_HALT));
- dQH->size_ioc_int_sts &= temp;
-
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
-
- /* Prime endpoint by writing 1 to ENDPTPRIME */
- temp = ep_is_in(ep)
- ? (1 << (ep_index(ep) + 16))
- : (1 << (ep_index(ep)));
- fsl_writel(temp, &dr_regs->endpointprime);
-out:
- return;
+ fsl_prime_ep(ep, req->head);
}
/* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
VDBG("%s, bad ep", __func__);
return -EINVAL;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
}
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct ep_queue_head *qh;
struct fsl_req *next_req;
- qh = ep->qh;
next_req = list_entry(req->queue.next, struct fsl_req,
queue);
- /* Point the QH to the first TD of next request */
- fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ /* prime with dTD of next request */
+ fsl_prime_ep(ep, next_req->head);
}
-
- /* The request hasn't been processed, patch up the TD chain */
+ /* The request hasn't been processed, patch up the TD chain */
} else {
struct fsl_req *prev_req;
prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
- fsl_writel(fsl_readl(&req->tail->next_td_ptr),
- &prev_req->tail->next_td_ptr);
-
+ prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
}
done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
goto out;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
status = -EOPNOTSUPP;
goto out;
}
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
struct fsl_udc *udc;
int size = 0;
u32 bitmask;
- struct ep_queue_head *d_qh;
+ struct ep_queue_head *qh;
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+ qh = get_qh_by_ep(ep);
bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
(1 << (ep_index(ep)));
if (fsl_readl(&dr_regs->endptstatus) & bitmask)
- size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+ size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
pr_debug("%s %u\n", __func__, size);
@@ -1717,7 +1713,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
static inline enum usb_device_speed portscx_device_speed(u32 reg)
{
- switch (speed & PORTSCX_PORT_SPEED_MASK) {
+ switch (reg & PORTSCX_PORT_SPEED_MASK) {
case PORTSCX_PORT_SPEED_HIGH:
return USB_SPEED_HIGH;
case PORTSCX_PORT_SPEED_FULL:
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
@@ -2480,8 +2475,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
#ifndef CONFIG_ARCH_MXC
if (pdata->have_sysif_regs)
- usb_sys_regs = (struct usb_sys_interface *)
- ((u32)dr_regs + USB_DR_SYS_OFFSET);
+ usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
#endif
/* Initialize USB clocks */
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 1d51be83fda..f781f5dec41 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+ /* we only have one ep0 structure but two queue heads */
+ if (ep_index(ep) != 0)
+ return ep->qh;
+ else
+ return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+ USB_DIR_IN) ? 1 : 0];
+}
+
struct platform_device;
#ifdef CONFIG_ARCH_MXC
int fsl_udc_clk_init(struct platform_device *pdev);
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index a392ec0d2d5..6ccae2707e5 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1730,8 +1730,9 @@ static void
gadgetfs_disconnect (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
+ unsigned long flags;
- spin_lock (&dev->lock);
+ spin_lock_irqsave (&dev->lock, flags);
if (dev->state == STATE_DEV_UNCONNECTED)
goto exit;
dev->state = STATE_DEV_UNCONNECTED;
@@ -1740,7 +1741,7 @@ gadgetfs_disconnect (struct usb_gadget *gadget)
next_event (dev, GADGETFS_DISCONNECT);
ep0_readable (dev);
exit:
- spin_unlock (&dev->lock);
+ spin_unlock_irqrestore (&dev->lock, flags);
}
static void
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 91d0af2a24a..9aa1cbbee45 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->speed < USB_SPEED_HIGH
|| !bind
|| !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 7f1bc9a73cd..da2b9d0be3c 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
- if (!driver || driver->speed != USB_SPEED_HIGH
+ if (!driver || driver->speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 550d6dcdf10..5048a0c0764 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -354,6 +354,7 @@ struct pch_udc_dev {
#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
+#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
@@ -2970,6 +2971,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
.class_mask = 0xffffffff,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
+ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class_mask = 0xffffffff,
+ },
{ 0 },
};
@@ -2999,5 +3005,5 @@ static void __exit pch_udc_pci_exit(void)
module_exit(pch_udc_pci_exit);
MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
-MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 68a826a1b86..fc719a3f855 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1718,6 +1718,8 @@ static void r8a66597_fifo_flush(struct usb_ep *_ep)
if (list_empty(&ep->queue) && !ep->busy) {
pipe_stop(ep->r8a66597, ep->pipenum);
r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
+ r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
+ r8a66597_write(ep->r8a66597, 0, ep->pipectr);
}
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
}
@@ -1742,26 +1744,16 @@ static int r8a66597_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
- int retval;
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
return -ENODEV;
/* hook up the driver */
- driver->driver.bus = NULL;
r8a66597->driver = driver;
- r8a66597->gadget.dev.driver = &driver->driver;
-
- retval = device_add(&r8a66597->gadget.dev);
- if (retval) {
- dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n",
- retval);
- goto error;
- }
init_controller(r8a66597);
r8a66597_bset(r8a66597, VBSE, INTENB0);
@@ -1775,12 +1767,6 @@ static int r8a66597_start(struct usb_gadget *gadget,
}
return 0;
-
-error:
- r8a66597->driver = NULL;
- r8a66597->gadget.dev.driver = NULL;
-
- return retval;
}
static int r8a66597_stop(struct usb_gadget *gadget,
@@ -1794,7 +1780,6 @@ static int r8a66597_stop(struct usb_gadget *gadget,
disable_controller(r8a66597);
spin_unlock_irqrestore(&r8a66597->lock, flags);
- device_del(&r8a66597->gadget.dev);
r8a66597->driver = NULL;
return 0;
}
@@ -1845,6 +1830,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
clk_put(r8a66597->clk);
}
#endif
+ device_unregister(&r8a66597->gadget.dev);
kfree(r8a66597);
return 0;
}
@@ -1924,13 +1910,17 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
r8a66597->gadget.ops = &r8a66597_gadget_ops;
- device_initialize(&r8a66597->gadget.dev);
dev_set_name(&r8a66597->gadget.dev, "gadget");
r8a66597->gadget.is_dualspeed = 1;
r8a66597->gadget.dev.parent = &pdev->dev;
r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
r8a66597->gadget.dev.release = pdev->dev.release;
r8a66597->gadget.name = udc_name;
+ ret = device_register(&r8a66597->gadget.dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "device_register failed\n");
+ goto clean_up;
+ }
init_timer(&r8a66597->timer);
r8a66597->timer.function = r8a66597_timer;
@@ -1945,7 +1935,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
clk_name);
ret = PTR_ERR(r8a66597->clk);
- goto clean_up;
+ goto clean_up_dev;
}
clk_enable(r8a66597->clk);
}
@@ -2014,7 +2004,9 @@ clean_up2:
clk_disable(r8a66597->clk);
clk_put(r8a66597->clk);
}
+clean_up_dev:
#endif
+ device_unregister(&r8a66597->gadget.dev);
clean_up:
if (r8a66597) {
if (r8a66597->sudmac_reg)
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a552453dc94..b31448229f0 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
return -EINVAL;
}
- if (driver->speed != USB_SPEED_HIGH &&
- driver->speed != USB_SPEED_FULL) {
+ if (driver->speed < USB_SPEED_FULL)
dev_err(hsotg->dev, "%s: bad speed\n", __func__);
- }
if (!bind || !driver->setup) {
dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 8d54f893cef..20a553b46ae 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
int ret;
if (!driver
- || (driver->speed != USB_SPEED_FULL &&
- driver->speed != USB_SPEED_HIGH)
+ || driver->speed < USB_SPEED_FULL
|| !bind
|| !driver->unbind || !driver->disconnect || !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 022baeca7c9..6939e17f458 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -210,10 +210,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
if (udc_is_newstyle(udc)) {
- usb_gadget_disconnect(udc->gadget);
+ udc->driver->disconnect(udc->gadget);
udc->driver->unbind(udc->gadget);
usb_gadget_udc_stop(udc->gadget, udc->driver);
-
+ usb_gadget_disconnect(udc->gadget);
} else {
usb_gadget_stop(udc->gadget, udc->driver);
}
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
static ssize_t usb_udc_srp_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
- struct usb_udc *udc = dev_get_drvdata(dev);
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
if (sysfs_streq(buf, "1"))
usb_gadget_wakeup(udc->gadget);
@@ -378,7 +378,7 @@ static ssize_t usb_udc_speed_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n",
usb_speed_string(udc->gadget->speed));
}
-static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL);
+static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
#define USB_UDC_ATTR(name) \
ssize_t usb_udc_##name##_show(struct device *dev, \
@@ -389,7 +389,7 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
\
return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
} \
-static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL)
+static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
static USB_UDC_ATTR(is_dualspeed);
static USB_UDC_ATTR(is_otg);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 2e829fae648..a60679cbbf8 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1475,30 +1475,36 @@ iso_stream_schedule (
* jump until after the queue is primed.
*/
else {
+ int done = 0;
start = SCHEDULE_SLOP + (now & ~0x07);
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
- /* find a uframe slot with enough bandwidth */
- next = start + period;
- for (; start < next; start++) {
-
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
/* check schedule: enough space? */
if (stream->highspeed) {
if (itd_slot_ok(ehci, mod, start,
stream->usecs, period))
- break;
+ done = 1;
} else {
if ((start % 8) >= 6)
continue;
if (sitd_slot_ok(ehci, mod, stream,
start, sched, period))
- break;
+ done = 1;
}
- }
+ } while (start > next && !done);
/* no room in the schedule */
- if (start == next) {
+ if (!done) {
ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
urb, now, now + mod);
status = -ENOSPC;
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
index fe74bd67601..b4fb511d24b 100644
--- a/drivers/usb/host/ehci-xls.c
+++ b/drivers/usb/host/ehci-xls.c
@@ -19,7 +19,7 @@ static int ehci_xls_setup(struct usb_hcd *hcd)
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index a7dc1e1d45f..2ac4ac2e4ef 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -18,7 +18,7 @@
#include "isp1760-hcd.h"
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -31,7 +31,7 @@
#include <linux/pci.h>
#endif
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct isp1760 {
struct usb_hcd *hcd;
int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
ret = platform_driver_register(&isp1760_of_driver);
if (!ret)
any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
platform_driver_unregister(&isp1760_of_driver);
#endif
#ifdef CONFIG_PCI
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index ba3a46b78b7..95a9fec38e8 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -223,6 +223,9 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (port < 0 || port >= 2)
return;
+ if (pdata->vbus_pin[port] <= 0)
+ return;
+
gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
}
@@ -231,6 +234,9 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (port < 0 || port >= 2)
return -EINVAL;
+ if (pdata->vbus_pin[port] <= 0)
+ return -EINVAL;
+
return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 34efd479e06..b2639191549 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd)
struct ohci_hcd *ohci;
ohci = hcd_to_ohci (hcd);
- ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
+ ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
- /* If the SHUTDOWN quirk is set, don't put the controller in RESET */
- ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ?
- OHCI_CTRL_RWC | OHCI_CTRL_HCFS :
- OHCI_CTRL_RWC);
- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
+ /* Software reset, after which the controller goes into SUSPEND */
+ ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
+ ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
+ udelay(10);
- /* flush the writes */
- (void) ohci_readl (ohci, &ohci->regs->control);
+ ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
}
static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index ad8166c681e..bc01b064585 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
return 0;
}
-/* nVidia controllers continue to drive Reset signalling on the bus
- * even after system shutdown, wasting power. This flag tells the
- * shutdown routine to leave the controller OPERATIONAL instead of RESET.
- */
-static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
-{
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
-
- /* Evidently nVidia fixed their later hardware; this is a guess at
- * the changeover point.
- */
-#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d
-
- if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
- ohci->flags |= OHCI_QUIRK_SHUTDOWN;
- ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
- }
-
- return 0;
-}
-
static void sb800_prefetch(struct ohci_hcd *ohci, int on)
{
struct pci_dev *pdev;
@@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = {
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
- {
- PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
- .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown,
- },
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 35e5fd640ce..0795b934d00 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -403,7 +403,6 @@ struct ohci_hcd {
#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
-#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 27a3dec32fa..caf87428ca4 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -37,6 +37,7 @@
#define OHCI_INTRENABLE 0x10
#define OHCI_INTRDISABLE 0x14
#define OHCI_FMINTERVAL 0x34
+#define OHCI_HCFS (3 << 6) /* hc functional state */
#define OHCI_HCR (1 << 0) /* host controller reset */
#define OHCI_OCR (1 << 3) /* ownership change request */
#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
@@ -466,6 +467,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
u32 control;
+ u32 fminterval;
+ int cnt;
if (!mmio_resource_enabled(pdev, 0))
return;
@@ -498,41 +501,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
}
#endif
- /* reset controller, preserving RWC (and possibly IR) */
- writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
- readl(base + OHCI_CONTROL);
+ /* disable interrupts */
+ writel((u32) ~0, base + OHCI_INTRDISABLE);
- /* Some NVIDIA controllers stop working if kept in RESET for too long */
- if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
- u32 fminterval;
- int cnt;
+ /* Reset the USB bus, if the controller isn't already in RESET */
+ if (control & OHCI_HCFS) {
+ /* Go into RESET, preserving RWC (and possibly IR) */
+ writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+ readl(base + OHCI_CONTROL);
- /* drive reset for at least 50 ms (7.1.7.5) */
+ /* drive bus reset for at least 50 ms (7.1.7.5) */
msleep(50);
+ }
- /* software reset of the controller, preserving HcFmInterval */
- fminterval = readl(base + OHCI_FMINTERVAL);
- writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+ /* software reset of the controller, preserving HcFmInterval */
+ fminterval = readl(base + OHCI_FMINTERVAL);
+ writel(OHCI_HCR, base + OHCI_CMDSTATUS);
- /* reset requires max 10 us delay */
- for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
- if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
- break;
- udelay(1);
- }
- writel(fminterval, base + OHCI_FMINTERVAL);
-
- /* Now we're in the SUSPEND state with all devices reset
- * and wakeups and interrupts disabled
- */
+ /* reset requires max 10 us delay */
+ for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
+ if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
+ break;
+ udelay(1);
}
+ writel(fminterval, base + OHCI_FMINTERVAL);
- /*
- * disable interrupts
- */
- writel(~(u32)0, base + OHCI_INTRDISABLE);
- writel(~(u32)0, base + OHCI_INTRSTATUS);
-
+ /* Now the controller is safely in SUSPEND and nothing can wake it up */
iounmap(base);
}
@@ -627,7 +621,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
void __iomem *base, *op_reg_base;
u32 hcc_params, cap, val;
u8 offset, cap_length;
- int wait_time, delta, count = 256/4;
+ int wait_time, count = 256/4;
if (!mmio_resource_enabled(pdev, 0))
return;
@@ -673,11 +667,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
writel(val, op_reg_base + EHCI_USBCMD);
wait_time = 2000;
- delta = 100;
do {
writel(0x3f, op_reg_base + EHCI_USBSTS);
- udelay(delta);
- wait_time -= delta;
+ udelay(100);
+ wait_time -= 100;
val = readl(op_reg_base + EHCI_USBSTS);
if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
break;
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index d6e17542861..a403b53e86b 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
- qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
qset->qh.err_count = 0;
qset->qh.scratch[0] = 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 42a22b8e692..0e4b25fa3bc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -982,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
- struct xhci_input_control_ctx *ctrl_ctx;
u32 port_num;
struct usb_device *top_dev;
@@ -994,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return -EINVAL;
}
ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
- ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
- /* 2) New slot context and endpoint 0 context are valid*/
- ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
-
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 940321b3ec6..9f1d4b15d81 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -816,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
struct xhci_ring *ring;
struct xhci_td *cur_td;
int ret, i, j;
+ unsigned long flags;
ep = (struct xhci_virt_ep *) arg;
xhci = ep->xhci;
- spin_lock(&xhci->lock);
+ spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
"xHCI as DYING, exiting.\n");
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
"exiting.\n");
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
@@ -844,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
xhci->xhc_state |= XHCI_STATE_DYING;
/* Disable interrupts from the host controller and start halting it */
xhci_quiesce(xhci);
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
ret = xhci_halt(xhci);
- spin_lock(&xhci->lock);
+ spin_lock_irqsave(&xhci->lock, flags);
if (ret < 0) {
/* This is bad; the host is not responding to commands and it's
* not allowing itself to be halted. At least interrupts are
@@ -896,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
}
}
}
- spin_unlock(&xhci->lock);
+ spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Calling usb_hc_died()\n");
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
xhci_dbg(xhci, "xHCI host controller is dead.\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1ff95a0df57..a1afb7c39f7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
ring = xhci->cmd_ring;
seg = ring->deq_seg;
do {
- memset(seg->trbs, 0, SEGMENT_SIZE);
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
@@ -799,7 +802,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
- int retval;
+ int retval = 0;
/* Wait a bit if either of the roothubs need to settle from the
* transition into bus suspend.
@@ -809,6 +812,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci->bus_state[1].next_statechange))
msleep(100);
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+
spin_lock_irq(&xhci->lock);
if (xhci->quirks & XHCI_RESET_ON_RESUME)
hibernated = true;
@@ -878,20 +884,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
return retval;
xhci_dbg(xhci, "Start the primary HCD\n");
retval = xhci_run(hcd->primary_hcd);
- if (retval)
- goto failed_restart;
-
- xhci_dbg(xhci, "Start the secondary HCD\n");
- retval = xhci_run(secondary_hcd);
if (!retval) {
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- set_bit(HCD_FLAG_HW_ACCESSIBLE,
- &xhci->shared_hcd->flags);
+ xhci_dbg(xhci, "Start the secondary HCD\n");
+ retval = xhci_run(secondary_hcd);
}
-failed_restart:
hcd->state = HC_STATE_SUSPENDED;
xhci->shared_hcd->state = HC_STATE_SUSPENDED;
- return retval;
+ goto done;
}
/* step 4: set Run/Stop bit */
@@ -910,11 +909,14 @@ failed_restart:
* Running endpoints by ringing their doorbells
*/
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
-
spin_unlock_irq(&xhci->lock);
- return 0;
+
+ done:
+ if (retval == 0) {
+ usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
+ return retval;
}
#endif /* CONFIG_PM */
@@ -3504,6 +3506,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Otherwise, update the control endpoint ring enqueue pointer. */
else
xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
+ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+ ctrl_ctx->drop_flags = 0;
+
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
@@ -3585,7 +3591,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
+ 1;
/* Zero the input context control for later use */
- ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index fc34b8b1191..07a03460a59 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -11,6 +11,7 @@ config USB_MUSB_HDRC
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
+ select USB_GADGET_DUALSPEED
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
Say Y here if your system has a dual role high speed USB
@@ -60,7 +61,7 @@ config USB_MUSB_BLACKFIN
config USB_MUSB_UX500
tristate "U8500 and U5500"
- depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500)
+ depends on (ARCH_U8500 && AB8500_USB)
endchoice
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 08f1d0b662a..e233d2b7d33 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -27,6 +27,7 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 4da7492ddbd..2613bfdb09b 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -27,6 +27,7 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 20a28731c33..b63ab157010 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1477,8 +1477,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
- defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
- defined(CONFIG_ARCH_U5500)
+ defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500)
static irqreturn_t generic_interrupt(int irq, void *__hci)
{
@@ -2302,18 +2301,12 @@ static int musb_suspend(struct device *dev)
*/
}
- musb_save_context(musb);
-
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_resume_noirq(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
-
- musb_restore_context(musb);
-
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ae4a20acef6..922148ff8d2 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
unsigned long flags;
int retval = -EINVAL;
- if (driver->speed != USB_SPEED_HIGH)
+ if (driver->speed < USB_SPEED_HIGH)
goto err0;
pm_runtime_get_sync(musb->controller);
@@ -1999,10 +1999,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
nuke(&hw_ep->ep_out, -ESHUTDOWN);
}
}
-
- spin_unlock(&musb->lock);
- driver->disconnect(&musb->g);
- spin_lock(&musb->lock);
}
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 60ddba8066e..79cb0af779f 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
hw_ep->max_packet_sz_tx);
+ else if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP, packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
else
musb_writew(epio, MUSB_TXMAXP,
qh->maxpacket |
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d2e2efaba65..08c679c0dde 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -405,7 +405,7 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
/*
* platform functions
*/
-static int __devinit usbhs_probe(struct platform_device *pdev)
+static int usbhs_probe(struct platform_device *pdev)
{
struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
struct renesas_usbhs_driver_callback *dfunc;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 8da685e796d..ffdf5d15085 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -820,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
if (len % 4) /* 32bit alignment */
goto usbhsf_pio_prepare_push;
- if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_push;
/* get enable DMA fifo */
@@ -897,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
if (!fifo)
goto usbhsf_pio_prepare_pop;
- if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
goto usbhsf_pio_prepare_pop;
ret = usbhsf_fifo_select(pipe, fifo, 0);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 053f86d7000..ad96a389672 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_attch)
intenb1 |= ATTCHE;
- if (mod->irq_attch)
+ if (mod->irq_dtch)
intenb1 |= DTCHE;
if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 8ae3733031c..6c6875533f0 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -143,8 +143,8 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod);
*/
#if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \
defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE)
-extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv);
-extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv);
+extern int usbhs_mod_host_probe(struct usbhs_priv *priv);
+extern int usbhs_mod_host_remove(struct usbhs_priv *priv);
#else
static inline int usbhs_mod_host_probe(struct usbhs_priv *priv)
{
@@ -157,8 +157,8 @@ static inline void usbhs_mod_host_remove(struct usbhs_priv *priv)
#if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \
defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE)
-extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv);
-extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv);
+extern int usbhs_mod_gadget_probe(struct usbhs_priv *priv);
+extern void usbhs_mod_gadget_remove(struct usbhs_priv *priv);
#else
static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
{
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 4cc7ee0babc..7f4e8033857 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
- int ret;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->speed < USB_SPEED_FULL)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
/* first hook up the driver ... */
gpriv->driver = driver;
gpriv->gadget.dev.driver = &driver->driver;
- ret = device_add(&gpriv->gadget.dev);
- if (ret) {
- dev_err(dev, "device_add error %d\n", ret);
- goto add_fail;
- }
-
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
- gpriv->driver = NULL;
- gpriv->gadget.dev.driver = NULL;
-
- return ret;
}
static int usbhsg_gadget_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->unbind)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
- device_del(&gpriv->gadget.dev);
+ gpriv->gadget.dev.driver = NULL;
gpriv->driver = NULL;
return 0;
@@ -827,10 +806,17 @@ static int usbhsg_start(struct usbhs_priv *priv)
static int usbhsg_stop(struct usbhs_priv *priv)
{
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+ /* cable disconnect */
+ if (gpriv->driver &&
+ gpriv->driver->disconnect)
+ gpriv->driver->disconnect(&gpriv->gadget);
+
return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
}
-int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
+int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv;
struct usbhsg_uep *uep;
@@ -876,12 +862,14 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
/*
* init gadget
*/
- device_initialize(&gpriv->gadget.dev);
dev_set_name(&gpriv->gadget.dev, "gadget");
gpriv->gadget.dev.parent = dev;
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
gpriv->gadget.is_dualspeed = 1;
+ ret = device_register(&gpriv->gadget.dev);
+ if (ret < 0)
+ goto err_add_udc;
INIT_LIST_HEAD(&gpriv->gadget.ep_list);
@@ -912,12 +900,15 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
if (ret)
- goto err_add_udc;
+ goto err_register;
dev_info(dev, "gadget probed\n");
return 0;
+
+err_register:
+ device_unregister(&gpriv->gadget.dev);
err_add_udc:
kfree(gpriv->uep);
@@ -927,12 +918,14 @@ usbhs_mod_gadget_probe_err_gpriv:
return ret;
}
-void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv)
+void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
usb_del_gadget_udc(&gpriv->gadget);
+ device_unregister(&gpriv->gadget.dev);
+
usbhsg_controller_unregister(gpriv);
kfree(gpriv->uep);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 1a7208a50af..7955de58995 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -103,7 +103,7 @@ struct usbhsh_hpriv {
u32 port_stat; /* USB_PORT_STAT_xxx */
- struct completion *done;
+ struct completion setup_ack_done;
/* see usbhsh_req_alloc/free */
struct list_head ureq_link_active;
@@ -355,6 +355,7 @@ static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
struct usbhsh_device *udev,
struct usb_host_endpoint *ep,
+ int dir_in_req,
gfp_t mem_flags)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
@@ -364,27 +365,38 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
struct usbhs_pipe *pipe, *best_pipe;
struct device *dev = usbhsh_hcd_to_dev(hcd);
struct usb_endpoint_descriptor *desc = &ep->desc;
- int type, i;
+ int type, i, dir_in;
unsigned int min_usr;
+ dir_in_req = !!dir_in_req;
+
uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
if (!uep) {
dev_err(dev, "usbhsh_ep alloc fail\n");
return NULL;
}
- type = usb_endpoint_type(desc);
+
+ if (usb_endpoint_xfer_control(desc)) {
+ best_pipe = usbhsh_hpriv_to_dcp(hpriv);
+ goto usbhsh_endpoint_alloc_find_pipe;
+ }
/*
* find best pipe for endpoint
* see
* HARDWARE LIMITATION
*/
+ type = usb_endpoint_type(desc);
min_usr = ~0;
best_pipe = NULL;
- usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
+ usbhs_for_each_pipe(pipe, priv, i) {
if (!usbhs_pipe_type_is(pipe, type))
continue;
+ dir_in = !!usbhs_pipe_is_dir_in(pipe);
+ if (0 != (dir_in - dir_in_req))
+ continue;
+
info = usbhsh_pipe_info(pipe);
if (min_usr > info->usr_cnt) {
@@ -398,7 +410,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
kfree(uep);
return NULL;
}
-
+usbhsh_endpoint_alloc_find_pipe:
/*
* init uep
*/
@@ -423,6 +435,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
* see
* DCPMAXP/PIPEMAXP
*/
+ usbhs_pipe_sequence_data0(uep->pipe);
usbhs_pipe_config_update(uep->pipe,
usbhsh_device_number(hpriv, udev),
usb_endpoint_num(desc),
@@ -430,7 +443,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
usbhsh_device_number(hpriv, udev),
- usbhs_pipe_name(pipe), uep);
+ usbhs_pipe_name(uep->pipe), uep);
return uep;
}
@@ -549,8 +562,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
* usbhsh_irq_setup_ack()
* usbhsh_irq_setup_err()
*/
- DECLARE_COMPLETION(done);
- hpriv->done = &done;
+ init_completion(&hpriv->setup_ack_done);
/* copy original request */
memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest));
@@ -572,8 +584,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
/*
* wait setup packet ACK
*/
- wait_for_completion(&done);
- hpriv->done = NULL;
+ wait_for_completion(&hpriv->setup_ack_done);
dev_dbg(dev, "%s done\n", __func__);
}
@@ -724,11 +735,11 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
struct usbhsh_device *udev, *new_udev = NULL;
struct usbhs_pipe *pipe;
struct usbhsh_ep *uep;
+ int is_dir_in = usb_pipein(urb->pipe);
int ret;
- dev_dbg(dev, "%s (%s)\n",
- __func__, usb_pipein(urb->pipe) ? "in" : "out");
+ dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
@@ -751,7 +762,8 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
*/
uep = usbhsh_ep_to_uep(ep);
if (!uep) {
- uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags);
+ uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
+ is_dir_in, mem_flags);
if (!uep)
goto usbhsh_urb_enqueue_error_free_device;
}
@@ -1095,10 +1107,7 @@ static int usbhsh_irq_setup_ack(struct usbhs_priv *priv,
dev_dbg(dev, "setup packet OK\n");
- if (unlikely(!hpriv->done))
- dev_err(dev, "setup ack happen without necessary data\n");
- else
- complete(hpriv->done); /* see usbhsh_urb_enqueue() */
+ complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
return 0;
}
@@ -1111,10 +1120,7 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
dev_dbg(dev, "setup packet Err\n");
- if (unlikely(!hpriv->done))
- dev_err(dev, "setup err happen without necessary data\n");
- else
- complete(hpriv->done); /* see usbhsh_urb_enqueue() */
+ complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */
return 0;
}
@@ -1221,8 +1227,18 @@ static int usbhsh_stop(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct device *dev = usbhs_priv_to_dev(priv);
+ /*
+ * disable irq callback
+ */
+ mod->irq_attch = NULL;
+ mod->irq_dtch = NULL;
+ mod->irq_sack = NULL;
+ mod->irq_sign = NULL;
+ usbhs_irq_callback_update(priv, mod);
+
usb_remove_hcd(hcd);
/* disable sys */
@@ -1235,7 +1251,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
return 0;
}
-int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
+int usbhs_mod_host_probe(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv;
struct usb_hcd *hcd;
@@ -1251,6 +1267,7 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_err(dev, "Failed to create hcd\n");
return -ENOMEM;
}
+ hcd->has_tt = 1; /* for low/full speed */
pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
if (!pipe_info) {
@@ -1279,7 +1296,6 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv)
hpriv->mod.stop = usbhsh_stop;
hpriv->pipe_info = pipe_info;
hpriv->pipe_size = pipe_size;
- hpriv->done = NULL;
usbhsh_req_list_init(hpriv);
usbhsh_port_stat_init(hpriv);
@@ -1299,7 +1315,7 @@ usbhs_mod_host_probe_err:
return -ENOMEM;
}
-int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv)
+int usbhs_mod_host_remove(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 5cdb9d91227..18e875b92e0 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -42,7 +42,7 @@ static int debug;
* Version information
*/
-#define DRIVER_VERSION "v0.6"
+#define DRIVER_VERSION "v0.7"
#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
@@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
goto err_out;
}
- /* setup termios */
- if (tty)
- ark3116_set_termios(tty, port, NULL);
-
/* remove any data still left: also clears error state */
ark3116_read_reg(serial, UART_RX, buf);
@@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
/* enable DMA */
ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
+ /* setup termios */
+ if (tty)
+ ark3116_set_termios(tty, port, NULL);
+
err_out:
kfree(buf);
return result;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8fe034d2d3e..ff3db5d056a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
@@ -2104,13 +2105,19 @@ static void ftdi_set_termios(struct tty_struct *tty,
cflag = termios->c_cflag;
- /* FIXME -For this cut I don't care if the line is really changing or
- not - so just do the change regardless - should be able to
- compare old_termios and tty->termios */
+ if (old_termios->c_cflag == termios->c_cflag
+ && old_termios->c_ispeed == termios->c_ispeed
+ && old_termios->c_ospeed == termios->c_ospeed)
+ goto no_c_cflag_changes;
+
/* NOTE These routines can get interrupted by
ftdi_sio_read_bulk_callback - need to examine what this means -
don't see any problems yet */
+ if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) ==
+ (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
+ goto no_data_parity_stop_changes;
+
/* Set number of data bits, parity, stop bits */
urb_value = 0;
@@ -2151,6 +2158,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
}
/* Now do the baudrate */
+no_data_parity_stop_changes:
if ((cflag & CBAUD) == B0) {
/* Disable flow control */
if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
@@ -2178,6 +2186,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
/* Set flow control */
/* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */
+no_c_cflag_changes:
if (cflag & CRTSCTS) {
dbg("%s Setting to CRTSCTS flow control", __func__);
if (usb_control_msg(dev,
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 571fa96b49c..055b64ef0bb 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -112,6 +112,7 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
/* Lenz LI-USB Computer Interface. */
#define FTDI_LENZ_LIUSB_PID 0xD780
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 89ae1f65e1b..6dd64534fad 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -156,6 +156,7 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_K4511 0x14CC
#define HUAWEI_PRODUCT_ETS1220 0x1803
#define HUAWEI_PRODUCT_E353 0x1506
+#define HUAWEI_PRODUCT_E173S 0x1C05
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
@@ -316,6 +317,9 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_AC8710 0xfff1
#define ZTE_PRODUCT_AC2726 0xfff5
#define ZTE_PRODUCT_AC8710T 0xffff
+#define ZTE_PRODUCT_MC2718 0xffe8
+#define ZTE_PRODUCT_AD3812 0xffeb
+#define ZTE_PRODUCT_MC2716 0xffed
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@@ -468,6 +472,10 @@ static void option_instat_callback(struct urb *urb);
#define YUGA_PRODUCT_CLU528 0x260D
#define YUGA_PRODUCT_CLU526 0x260F
+/* Viettel products */
+#define VIETTEL_VENDOR_ID 0x2262
+#define VIETTEL_PRODUCT_VT1000 0x0002
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -500,6 +508,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = {
.reserved = BIT(4),
};
+static const struct option_blacklist_info zte_ad3812_z_blacklist = {
+ .sendsetup = BIT(0) | BIT(1) | BIT(2),
+};
+
+static const struct option_blacklist_info zte_mc2718_z_blacklist = {
+ .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+ .sendsetup = BIT(1) | BIT(2) | BIT(3),
+};
+
static const struct option_blacklist_info huawei_cdc12_blacklist = {
.reserved = BIT(1) | BIT(2),
};
@@ -622,6 +642,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
@@ -640,6 +661,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -726,6 +755,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
@@ -1043,6 +1073,12 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -1141,6 +1177,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+ { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9083d1e616b..fc2d66f7f4e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,7 +91,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
- { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3d10d7f0207..c38b8c00c06 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -145,10 +145,6 @@
#define ADLINK_VENDOR_ID 0x0b63
#define ADLINK_ND6530_PRODUCT_ID 0x6530
-/* WinChipHead USB->RS 232 adapter */
-#define WINCHIPHEAD_VENDOR_ID 0x4348
-#define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523
-
/* SMART USB Serial Adapter */
#define SMART_VENDOR_ID 0x0b8c
#define SMART_PRODUCT_ID 0x2303
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 4dca3ef0668..9fbe742343c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1762,10 +1762,9 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
} else {
void *buf;
- int offset;
+ int offset = 0;
u16 PhyBlockAddr;
u8 PageNum;
- u32 result;
u16 len, oldphy, newphy;
buf = kmalloc(blenByte, GFP_KERNEL);
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 93c1a4d86f5..82dd834709c 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -59,7 +59,9 @@
void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
{
- /* Pad the SCSI command with zeros out to 12 bytes
+ /*
+ * Pad the SCSI command with zeros out to 12 bytes. If the
+ * command already is 12 bytes or longer, leave it alone.
*
* NOTE: This only works because a scsi_cmnd struct field contains
* a unsigned char cmnd[16], so we know we have storage available
@@ -67,9 +69,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
for (; srb->cmd_len<12; srb->cmd_len++)
srb->cmnd[srb->cmd_len] = 0;
- /* set command length to 12 bytes */
- srb->cmd_len = 12;
-
/* send the command to the transport layer */
usb_stor_invoke_transport(srb, us);
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 3041a974faf..24caba79d72 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
+ "Kingston",
+ "DT 101 G2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG ),
+
/* Reported by Francesco Foresti <frafore@tiscali.it> */
UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
"Super Top",
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 55f91d9ab00..29577bf1f55 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -116,6 +116,7 @@
/* Clock registers available only on Version 2 */
#define LCD_CLK_ENABLE_REG 0x6c
#define LCD_CLK_RESET_REG 0x70
+#define LCD_CLK_MAIN_RESET BIT(3)
#define LCD_NUM_BUFFERS 2
@@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void)
{
u32 reg;
+ /* Bring LCDC out of reset */
+ if (lcd_revision == LCD_VERSION_2)
+ lcdc_write(0, LCD_CLK_RESET_REG);
+
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (!(reg & LCD_RASTER_ENABLE))
lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void)
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (reg & LCD_RASTER_ENABLE)
lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+
+ if (lcd_revision == LCD_VERSION_2)
+ /* Write 1 to reset LCDC */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
}
static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par)
lcdc_write(0, LCD_DMA_CTRL_REG);
lcdc_write(0, LCD_RASTER_CTRL_REG);
- if (lcd_revision == LCD_VERSION_2)
+ if (lcd_revision == LCD_VERSION_2) {
lcdc_write(0, LCD_INT_ENABLE_SET_REG);
+ /* Write 1 to reset */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
+ lcdc_write(0, LCD_CLK_RESET_REG);
+ }
}
static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 0ccd7adf47b..6f61e781f15 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 3532782551c..5c81533eaca 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
unsigned long fclk = 0;
- if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
- if (width != out_width || height != out_height)
- return -EINVAL;
- else
- return 0;
- }
+ if (width == out_width && height == out_height)
+ return 0;
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
+ return -EINVAL;
if (out_width < width / maxdownscale ||
out_width > width * 8)
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 3262f0f1fa3..c56378c555b 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
unsigned long hdmi_get_pixel_clock(void)
{
/* HDMI Pixel Clock in Mhz */
- return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000;
+ return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000;
}
static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 69d882cbe70..c01c1c16272 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -559,8 +559,8 @@
#define M1200X720_R60_VSP POSITIVE
/* 1200x900@60 Sync Polarity (DCON) */
-#define M1200X900_R60_HSP NEGATIVE
-#define M1200X900_R60_VSP NEGATIVE
+#define M1200X900_R60_HSP POSITIVE
+#define M1200X900_R60_VSP POSITIVE
/* 1280x600@60 Sync Polarity (GTF Mode) */
#define M1280x600_R60_HSP NEGATIVE
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 816ed08e7cf..1a61939b85f 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -37,7 +37,7 @@ config VIRTIO_BALLOON
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ depends on HAS_IOMEM && EXPERIMENTAL
select VIRTIO
select VIRTIO_RING
---help---
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index acc5e43c373..0269717436a 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev)
vring_transport_features(vdev);
for (i = 0; i < ARRAY_SIZE(vdev->features); i++) {
- writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET);
+ writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
writel(vdev->features[i],
vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
}
@@ -361,7 +361,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return 0;
}
+static const char *vm_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ return vm_dev->pdev->name;
+}
static struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
@@ -373,6 +378,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = {
.del_vqs = vm_del_vqs,
.get_features = vm_get_features,
.finalize_features = vm_finalize_features,
+ .bus_name = vm_bus_name,
};
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 3d1bf41e889..baabb7937ec 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
}
+/* wait for pending irq handlers */
+static void vp_synchronize_vectors(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i;
+
+ if (vp_dev->intx_enabled)
+ synchronize_irq(vp_dev->pci_dev->irq);
+
+ for (i = 0; i < vp_dev->msix_vectors; ++i)
+ synchronize_irq(vp_dev->msix_entries[i].vector);
+}
+
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ /* Flush out the status write, and flush in device writes,
+ * including MSi-X interrupts, if any. */
+ ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ /* Flush pending VQ/configuration callbacks. */
+ vp_synchronize_vectors(vdev);
}
/* the notify function used when creating a virt queue */
@@ -580,6 +598,13 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
false, false);
}
+static const char *vp_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ return pci_name(vp_dev->pci_dev);
+}
+
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
@@ -590,6 +615,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
.del_vqs = vp_del_vqs,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
+ .bus_name = vp_bus_name,
};
static void virtio_pci_release_dev(struct device *_d)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 6285867a935..79fd606b7cd 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -314,13 +314,6 @@ config NUC900_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called nuc900_wdt.
-config ADX_WATCHDOG
- tristate "Avionic Design Xanthos watchdog"
- depends on ARCH_PXA_ADX
- help
- Say Y here if you want support for the watchdog timer on Avionic
- Design Xanthos boards.
-
config TS72XX_WATCHDOG
tristate "TS-72XX SBC Watchdog"
depends on MACH_TS72XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 55bd5740e91..fe893e91935 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -51,7 +51,6 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
-obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
deleted file mode 100644
index af6e6b16475..00000000000
--- a/drivers/watchdog/adx_wdt.c
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Avionic Design GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/fs.h>
-#include <linux/gfp.h>
-#include <linux/io.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/watchdog.h>
-
-#define WATCHDOG_NAME "adx-wdt"
-
-/* register offsets */
-#define ADX_WDT_CONTROL 0x00
-#define ADX_WDT_CONTROL_ENABLE (1 << 0)
-#define ADX_WDT_CONTROL_nRESET (1 << 1)
-#define ADX_WDT_TIMEOUT 0x08
-
-static struct platform_device *adx_wdt_dev;
-static unsigned long driver_open;
-
-#define WDT_STATE_STOP 0
-#define WDT_STATE_START 1
-
-struct adx_wdt {
- void __iomem *base;
- unsigned long timeout;
- unsigned int state;
- unsigned int wake;
- spinlock_t lock;
-};
-
-static const struct watchdog_info adx_wdt_info = {
- .identity = "Avionic Design Xanthos Watchdog",
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
-};
-
-static void adx_wdt_start_locked(struct adx_wdt *wdt)
-{
- u32 ctrl;
-
- ctrl = readl(wdt->base + ADX_WDT_CONTROL);
- ctrl |= ADX_WDT_CONTROL_ENABLE;
- writel(ctrl, wdt->base + ADX_WDT_CONTROL);
- wdt->state = WDT_STATE_START;
-}
-
-static void adx_wdt_start(struct adx_wdt *wdt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wdt->lock, flags);
- adx_wdt_start_locked(wdt);
- spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_stop_locked(struct adx_wdt *wdt)
-{
- u32 ctrl;
-
- ctrl = readl(wdt->base + ADX_WDT_CONTROL);
- ctrl &= ~ADX_WDT_CONTROL_ENABLE;
- writel(ctrl, wdt->base + ADX_WDT_CONTROL);
- wdt->state = WDT_STATE_STOP;
-}
-
-static void adx_wdt_stop(struct adx_wdt *wdt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wdt->lock, flags);
- adx_wdt_stop_locked(wdt);
- spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
-{
- unsigned long timeout = seconds * 1000;
- unsigned long flags;
- unsigned int state;
-
- spin_lock_irqsave(&wdt->lock, flags);
- state = wdt->state;
- adx_wdt_stop_locked(wdt);
- writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
-
- if (state == WDT_STATE_START)
- adx_wdt_start_locked(wdt);
-
- wdt->timeout = timeout;
- spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
-{
- *seconds = wdt->timeout / 1000;
-}
-
-static void adx_wdt_keepalive(struct adx_wdt *wdt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wdt->lock, flags);
- writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
- spin_unlock_irqrestore(&wdt->lock, flags);
-}
-
-static int adx_wdt_open(struct inode *inode, struct file *file)
-{
- struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
-
- if (test_and_set_bit(0, &driver_open))
- return -EBUSY;
-
- file->private_data = wdt;
- adx_wdt_set_timeout(wdt, 30);
- adx_wdt_start(wdt);
-
- return nonseekable_open(inode, file);
-}
-
-static int adx_wdt_release(struct inode *inode, struct file *file)
-{
- struct adx_wdt *wdt = file->private_data;
-
- adx_wdt_stop(wdt);
- clear_bit(0, &driver_open);
-
- return 0;
-}
-
-static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct adx_wdt *wdt = file->private_data;
- void __user *argp = (void __user *)arg;
- unsigned long __user *p = argp;
- unsigned long seconds = 0;
- unsigned int options;
- long ret = -EINVAL;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
- return -EFAULT;
- else
- return 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_KEEPALIVE:
- adx_wdt_keepalive(wdt);
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(seconds, p))
- return -EFAULT;
-
- adx_wdt_set_timeout(wdt, seconds);
-
- /* fallthrough */
- case WDIOC_GETTIMEOUT:
- adx_wdt_get_timeout(wdt, &seconds);
- return put_user(seconds, p);
-
- case WDIOC_SETOPTIONS:
- if (copy_from_user(&options, argp, sizeof(options)))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD) {
- adx_wdt_stop(wdt);
- ret = 0;
- }
-
- if (options & WDIOS_ENABLECARD) {
- adx_wdt_start(wdt);
- ret = 0;
- }
-
- return ret;
-
- default:
- break;
- }
-
- return -ENOTTY;
-}
-
-static ssize_t adx_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- struct adx_wdt *wdt = file->private_data;
-
- if (len)
- adx_wdt_keepalive(wdt);
-
- return len;
-}
-
-static const struct file_operations adx_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = adx_wdt_open,
- .release = adx_wdt_release,
- .unlocked_ioctl = adx_wdt_ioctl,
- .write = adx_wdt_write,
-};
-
-static struct miscdevice adx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &adx_wdt_fops,
-};
-
-static int __devinit adx_wdt_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct adx_wdt *wdt;
- int ret = 0;
- u32 ctrl;
-
- wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
- if (!wdt) {
- dev_err(&pdev->dev, "cannot allocate WDT structure\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&wdt->lock);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
- return -ENXIO;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), res->name);
- if (!res) {
- dev_err(&pdev->dev, "cannot request I/O memory region\n");
- return -ENXIO;
- }
-
- wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!wdt->base) {
- dev_err(&pdev->dev, "cannot remap I/O memory region\n");
- return -ENXIO;
- }
-
- /* disable watchdog and reboot on timeout */
- ctrl = readl(wdt->base + ADX_WDT_CONTROL);
- ctrl &= ~ADX_WDT_CONTROL_ENABLE;
- ctrl &= ~ADX_WDT_CONTROL_nRESET;
- writel(ctrl, wdt->base + ADX_WDT_CONTROL);
-
- platform_set_drvdata(pdev, wdt);
- adx_wdt_dev = pdev;
-
- ret = misc_register(&adx_wdt_miscdev);
- if (ret) {
- dev_err(&pdev->dev, "cannot register miscdev on minor %d "
- "(err=%d)\n", WATCHDOG_MINOR, ret);
- return ret;
- }
-
- return 0;
-}
-
-static int __devexit adx_wdt_remove(struct platform_device *pdev)
-{
- struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
- misc_deregister(&adx_wdt_miscdev);
- adx_wdt_stop(wdt);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static void adx_wdt_shutdown(struct platform_device *pdev)
-{
- struct adx_wdt *wdt = platform_get_drvdata(pdev);
- adx_wdt_stop(wdt);
-}
-
-#ifdef CONFIG_PM
-static int adx_wdt_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
- wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
- adx_wdt_stop(wdt);
-
- return 0;
-}
-
-static int adx_wdt_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct adx_wdt *wdt = platform_get_drvdata(pdev);
-
- if (wdt->wake)
- adx_wdt_start(wdt);
-
- return 0;
-}
-
-static const struct dev_pm_ops adx_wdt_pm_ops = {
- .suspend = adx_wdt_suspend,
- .resume = adx_wdt_resume,
-};
-
-# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops)
-#else
-# define ADX_WDT_PM_OPS NULL
-#endif
-
-static struct platform_driver adx_wdt_driver = {
- .probe = adx_wdt_probe,
- .remove = __devexit_p(adx_wdt_remove),
- .shutdown = adx_wdt_shutdown,
- .driver = {
- .name = WATCHDOG_NAME,
- .owner = THIS_MODULE,
- .pm = ADX_WDT_PM_OPS,
- },
-};
-
-static int __init adx_wdt_init(void)
-{
- return platform_driver_register(&adx_wdt_driver);
-}
-
-static void __exit adx_wdt_exit(void)
-{
- platform_driver_unregister(&adx_wdt_driver);
-}
-
-module_init(adx_wdt_init);
-module_exit(adx_wdt_exit);
-
-MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 03f449a430d..5b89f7d6cd0 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -76,8 +76,6 @@ static int irq;
static void __iomem *virtbase;
static unsigned long coh901327_users;
static unsigned long boot_status;
-static u16 wdogenablestore;
-static u16 irqmaskstore;
static struct device *parent;
/*
@@ -461,6 +459,10 @@ out:
}
#ifdef CONFIG_PM
+
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+
static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
{
irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 3774c9b8dac..8464ea1c36a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -231,6 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+ set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
asminline_call(&cmn_regs, bios32_entrypoint);
if (cmn_regs.u1.ral != 0) {
@@ -248,8 +249,10 @@ static int __devinit cru_detect(unsigned long map_entry,
if ((physical_bios_base + physical_bios_offset)) {
cru_rom_addr =
ioremap(cru_physical_address, cru_length);
- if (cru_rom_addr)
+ if (cru_rom_addr) {
+ set_memory_x((unsigned long)cru_rom_addr, cru_length);
retval = 0;
+ }
}
printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index ba6ad662635..99796c5d913 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static int turn_SMI_watchdog_clear_off = 0;
+static int turn_SMI_watchdog_clear_off = 1;
module_param(turn_SMI_watchdog_clear_off, int, 0);
MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
- "Turn off SMI clearing watchdog (default=0)");
+ "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)");
/*
* Some TCO specific functions
@@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
ret = -EIO;
goto out_unmap;
}
- if (turn_SMI_watchdog_clear_off) {
+ if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 5de7e4fa5b8..a79e3840782 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -401,8 +401,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
(wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
- (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis",
- (wtcon & S3C2410_WTCON_INTEN) ? "" : "en");
+ (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis",
+ (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis");
return 0;
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index cc2cfbe33b3..bfaf9bb1ee0 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
return 0;
}
-static struct amba_id sp805_wdt_ids[] __initdata = {
+static struct amba_id sp805_wdt_ids[] = {
{
.id = 0x00141805,
.mask = 0x00ffffff,
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 7be38556aed..e789a47db41 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -150,7 +150,7 @@ static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev,
if (wm831x_wdt_cfgs[i].time == timeout)
break;
if (i == ARRAY_SIZE(wm831x_wdt_cfgs))
- ret = -EINVAL;
+ return -EINVAL;
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a767884a6c7..31ab82fda38 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target);
* alloc_xenballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get
* @pages: pages returned
- * @highmem: highmem or lowmem pages
+ * @highmem: allow highmem pages
* @return 0 on success, error otherwise
*/
int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
@@ -511,7 +511,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
mutex_lock(&balloon_mutex);
while (pgno < nr_pages) {
page = balloon_retrieve(highmem);
- if (page && PageHighMem(page) == highmem) {
+ if (page && (highmem || !PageHighMem(page))) {
pages[pgno++] = page;
} else {
enum bp_state st;
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index f6832f46aea..e1c4c6e5b46 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -135,7 +135,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
/* Grant foreign access to the page. */
gref->gref_id = gnttab_grant_foreign_access(op->domid,
pfn_to_mfn(page_to_pfn(gref->page)), readonly);
- if (gref->gref_id < 0) {
+ if ((int)gref->gref_id < 0) {
rc = gref->gref_id;
goto undo;
}
@@ -280,7 +280,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
goto out;
}
- gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY);
+ gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY);
if (!gref_ids) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 39871326afa..afca14d9042 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -114,11 +114,11 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
if (NULL == add)
return NULL;
- add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL);
- add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL);
- add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
- add->kmap_ops = kzalloc(sizeof(add->kmap_ops[0]) * count, GFP_KERNEL);
- add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL);
+ add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
+ add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
+ add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
+ add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
+ add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8e964b91c44..284798aaf8b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -166,7 +166,7 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- xen_io_tlb_start = alloc_bootmem(bytes);
+ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
if (!xen_io_tlb_start) {
m = "Cannot allocate Xen-SWIOTLB buffer!\n";
goto error;
@@ -179,7 +179,7 @@ retry:
bytes,
xen_io_tlb_nslabs);
if (rc) {
- free_bootmem(__pa(xen_io_tlb_start), bytes);
+ free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
m = "Failed to get contiguous memory for DMA from Xen!\n"\
"You either: don't have the permissions, do not have"\
" enough free memory under 4GB, or the hypervisor memory"\
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 81c3ce6b8bb..1906125eab4 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -35,6 +35,7 @@
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/xen/hypervisor.h>
+#include <asm/xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
#include <xen/events.h>
@@ -436,19 +437,20 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
{
struct gnttab_map_grant_ref op = {
- .flags = GNTMAP_host_map,
+ .flags = GNTMAP_host_map | GNTMAP_contains_pte,
.ref = gnt_ref,
.dom = dev->otherend_id,
};
struct vm_struct *area;
+ pte_t *pte;
*vaddr = NULL;
- area = alloc_vm_area(PAGE_SIZE);
+ area = alloc_vm_area(PAGE_SIZE, &pte);
if (!area)
return -ENOMEM;
- op.host_addr = (unsigned long)area->addr;
+ op.host_addr = arbitrary_virt_to_machine(pte).maddr;
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
@@ -527,6 +529,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
struct gnttab_unmap_grant_ref op = {
.host_addr = (unsigned long)vaddr,
};
+ unsigned int level;
/* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
* method so that we don't have to muck with vmalloc internals here.
@@ -548,6 +551,8 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
}
op.handle = (grant_handle_t)area->phys_addr;
+ op.host_addr = arbitrary_virt_to_machine(
+ lookup_address((unsigned long)vaddr, &level)).maddr;
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index b3b8f2f3ad1..ede860f921d 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -621,15 +621,6 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
-static void xs_reset_watches(void)
-{
- int err;
-
- err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
- if (err && err != -EEXIST)
- printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -906,9 +897,5 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
- /* shutdown watches for kexec boot */
- if (xen_hvm_domain())
- xs_reset_watches();
-
return 0;
}