summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/exstorob.c12
-rw-r--r--drivers/acpi/blacklist.c5
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_idle.c6
-rw-r--r--drivers/acpi/processor_thermal.c6
-rw-r--r--drivers/acpi/processor_throttling.c30
-rw-r--r--drivers/acpi/video.c7
-rw-r--r--drivers/ata/Kconfig21
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c143
-rw-r--r--drivers/ata/ata_piix.c14
-rw-r--r--drivers/ata/libata-acpi.c7
-rw-r--r--drivers/ata/libata-core.c44
-rw-r--r--drivers/ata/libata-eh.c146
-rw-r--r--drivers/ata/libata-pmp.c2
-rw-r--r--drivers/ata/libata-scsi.c159
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_atiixp.c1
-rw-r--r--drivers/ata/pata_cs5535.c3
-rw-r--r--drivers/ata/pata_octeon_cf.c4
-rw-r--r--drivers/ata/pata_platform.c8
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_rdc.c400
-rw-r--r--drivers/ata/pata_rz1000.c4
-rw-r--r--drivers/ata/sata_fsl.c1
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_sil.c13
-rw-r--r--drivers/ata/sata_sil24.c11
-rw-r--r--drivers/ata/sata_sis.c75
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoeblk.c13
-rw-r--r--drivers/block/aoe/aoedev.c1
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/char/hvc_iucv.c2
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c3
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/n_tty.c3
-rw-r--r--drivers/char/pty.c10
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/char/sysrq.c19
-rw-r--r--drivers/char/tpm/tpm_tis.c12
-rw-r--r--drivers/char/tty_ldisc.c10
-rw-r--r--drivers/clocksource/sh_cmt.c28
-rw-r--r--drivers/cpufreq/cpufreq.c95
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c73
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h25
-rw-r--r--drivers/crypto/mv_cesa.c606
-rw-r--r--drivers/crypto/mv_cesa.h119
-rw-r--r--drivers/crypto/padlock-sha.c329
-rw-r--r--drivers/crypto/talitos.c216
-rw-r--r--drivers/crypto/talitos.h1
-rw-r--r--drivers/firewire/core-iso.c4
-rw-r--r--drivers/firewire/ohci.c14
-rw-r--r--drivers/firewire/sbp2.c8
-rw-r--r--drivers/firmware/dmi_scan.c77
-rw-r--r--drivers/gpu/drm/drm_crtc.c40
-rw-r--r--drivers/gpu/drm/drm_edid.c72
-rw-r--r--drivers/gpu/drm/drm_sysfs.c51
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c99
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c51
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c87
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h21
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c3
-rw-r--r--drivers/gpu/drm/radeon/r100.c96
-rw-r--r--drivers/gpu/drm/radeon/r300.c44
-rw-r--r--drivers/gpu/drm/radeon/r420.c13
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h16
-rw-r--r--drivers/gpu/drm/radeon/r520.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c3
-rw-r--r--drivers/gpu/drm/radeon/rs600.c147
-rw-r--r--drivers/gpu/drm/radeon/rs690.c1
-rw-r--r--drivers/gpu/drm/radeon/rv515.c21
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-stu300.c157
-rw-r--r--drivers/ide/atiixp.c1
-rw-r--r--drivers/ide/ide-cs.c1
-rw-r--r--drivers/infiniband/core/iwcm.c1
-rw-r--r--drivers/infiniband/core/mad.c35
-rw-r--r--drivers/infiniband/core/mad_priv.h3
-rw-r--r--drivers/infiniband/core/multicast.c10
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/smi.c8
-rw-r--r--drivers/infiniband/core/uverbs_main.c10
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c37
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c21
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c52
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c47
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_config_reg.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c128
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c767
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h103
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c204
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c21
-rw-r--r--drivers/input/joydev.c68
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c1
-rw-r--r--drivers/input/keyboard/atkbd.c35
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/input/tablet/wacom_sys.c43
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c17
-rw-r--r--drivers/leds/ledtrig-gpio.c24
-rw-r--r--drivers/macintosh/via-maciisi.c2
-rw-r--r--drivers/md/dm-exception-store.c13
-rw-r--r--drivers/md/dm-exception-store.h4
-rw-r--r--drivers/md/dm-log-userspace-base.c39
-rw-r--r--drivers/md/dm-log-userspace-transfer.c8
-rw-r--r--drivers/md/dm-log-userspace-transfer.h2
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/dm-snap-persistent.c88
-rw-r--r--drivers/md/dm-snap.c23
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-table.c51
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/media/common/tuners/qt1010.c12
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c4
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h1
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c2
-rw-r--r--drivers/media/dvb/frontends/cx22700.c2
-rw-r--r--drivers/media/dvb/frontends/cx22702.c2
-rw-r--r--drivers/media/dvb/frontends/cx24110.c2
-rw-r--r--drivers/media/dvb/frontends/dvb_dummy_fe.c6
-rw-r--r--drivers/media/dvb/frontends/l64781.c2
-rw-r--r--drivers/media/dvb/frontends/lgs8gl5.c2
-rw-r--r--drivers/media/dvb/frontends/mt312.c2
-rw-r--r--drivers/media/dvb/frontends/nxt6000.c2
-rw-r--r--drivers/media/dvb/frontends/or51132.c2
-rw-r--r--drivers/media/dvb/frontends/or51211.c2
-rw-r--r--drivers/media/dvb/frontends/s5h1409.c2
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c2
-rw-r--r--drivers/media/dvb/frontends/si21xx.c2
-rw-r--r--drivers/media/dvb/frontends/sp8870.c2
-rw-r--r--drivers/media/dvb/frontends/sp887x.c2
-rw-r--r--drivers/media/dvb/frontends/stv0288.c2
-rw-r--r--drivers/media/dvb/frontends/stv0297.c2
-rw-r--r--drivers/media/dvb/frontends/stv0299.c2
-rw-r--r--drivers/media/dvb/frontends/tda10021.c2
-rw-r--r--drivers/media/dvb/frontends/tda10048.c2
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c4
-rw-r--r--drivers/media/dvb/frontends/tda10086.c2
-rw-r--r--drivers/media/dvb/frontends/tda8083.c2
-rw-r--r--drivers/media/dvb/frontends/ves1820.c2
-rw-r--r--drivers/media/dvb/frontends/ves1x93.c2
-rw-r--r--drivers/media/dvb/frontends/zl10353.c12
-rw-r--r--drivers/media/dvb/siano/Kconfig40
-rw-r--r--drivers/media/dvb/siano/Makefile9
-rw-r--r--drivers/media/dvb/siano/sms-cards.c102
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c2
-rw-r--r--drivers/media/dvb/siano/smsdvb.c44
-rw-r--r--drivers/media/dvb/siano/smssdio.c54
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/bw-qcam.c2
-rw-r--r--drivers/media/video/cx18/cx18-controls.c3
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx88/cx88-cards.c8
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c1
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c4
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c219
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c8
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h3
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c77
-rw-r--r--drivers/media/video/em28xx/em28xx.h4
-rw-r--r--drivers/media/video/gspca/Kconfig2
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-controls.c3
-rw-r--r--drivers/media/video/mt9v011.c156
-rw-r--r--drivers/media/video/mt9v011.h3
-rw-r--r--drivers/media/video/mx1_camera.c6
-rw-r--r--drivers/media/video/mx3_camera.c19
-rw-r--r--drivers/media/video/pxa_camera.c8
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c30
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c4
-rw-r--r--drivers/media/video/saa7134/saa7134.h4
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c5
-rw-r--r--drivers/media/video/stk-webcam.c4
-rw-r--r--drivers/media/video/uvc/uvc_driver.c24
-rw-r--r--drivers/media/video/uvc/uvc_status.c4
-rw-r--r--drivers/media/video/v4l2-ioctl.c15
-rw-r--r--drivers/media/video/zr364xx.c2
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/mtd/nftlcore.c15
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/8139cp.c5
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/arm/w90p910_ether.c4
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c8
-rw-r--r--drivers/net/atlx/atl1.c8
-rw-r--r--drivers/net/b44.c5
-rw-r--r--drivers/net/bnx2.c17
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/can/dev.c7
-rw-r--r--drivers/net/cnic.c143
-rw-r--r--drivers/net/cnic.h1
-rw-r--r--drivers/net/cnic_if.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h8
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/ich8lan.c94
-rw-r--r--drivers/net/e1000e/netdev.c22
-rw-r--r--drivers/net/fec.c9
-rw-r--r--drivers/net/fec_mpc52xx.c5
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/ibm_newemac/core.c2
-rw-r--r--drivers/net/irda/au1k_ir.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c4
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c27
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c90
-rw-r--r--drivers/net/ixp2000/ixpdev.c5
-rw-r--r--drivers/net/macb.c7
-rw-r--r--drivers/net/mlx4/cq.c1
-rw-r--r--drivers/net/mlx4/en_rx.c5
-rw-r--r--drivers/net/mlx4/en_tx.c5
-rw-r--r--drivers/net/mlx4/eq.c77
-rw-r--r--drivers/net/mlx4/icm.c1
-rw-r--r--drivers/net/mlx4/main.c37
-rw-r--r--drivers/net/mlx4/mcg.c1
-rw-r--r--drivers/net/mlx4/mlx4.h7
-rw-r--r--drivers/net/mlx4/mr.c1
-rw-r--r--drivers/net/mlx4/pd.c1
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/mlx4/qp.c2
-rw-r--r--drivers/net/mlx4/reset.c1
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c105
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/smc91x.c40
-rw-r--r--drivers/net/tulip/tulip_core.c5
-rw-r--r--drivers/net/tun.c72
-rw-r--r--drivers/net/ucc_geth.c5
-rw-r--r--drivers/net/usb/pegasus.h2
-rw-r--r--drivers/net/via-rhine.c5
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/virtio_net.c61
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c5
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c122
-rw-r--r--drivers/net/wireless/libertas/assoc.c18
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h4
-rw-r--r--drivers/net/wireless/mwl8k.c31
-rw-r--r--drivers/net/wireless/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c14
-rw-r--r--drivers/net/yellowfin.c28
-rw-r--r--drivers/net/zorro8390.c3
-rw-r--r--drivers/oprofile/cpu_buffer.c16
-rw-r--r--drivers/oprofile/oprof.c71
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/oprofile_files.c46
-rw-r--r--drivers/oprofile/oprofile_stats.c5
-rw-r--r--drivers/oprofile/oprofile_stats.h1
-rw-r--r--drivers/pci/intr_remapping.c14
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/pci.h13
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/platform/x86/wmi.c8
-rw-r--r--drivers/pps/pps.c2
-rw-r--r--drivers/s390/block/dasd.c28
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_alias.c5
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c47
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_fba.c9
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c24
-rw-r--r--drivers/s390/block/xpram.c65
-rw-r--r--drivers/s390/char/Kconfig10
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_async.c224
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c4
-rw-r--r--drivers/s390/char/tape_block.c12
-rw-r--r--drivers/s390/char/tape_core.c18
-rw-r--r--drivers/s390/char/tape_std.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/char/vmur.c19
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/cio/chsc.h24
-rw-r--r--drivers/s390/cio/cio.c56
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/css.c32
-rw-r--r--drivers/s390/cio/device.c176
-rw-r--r--drivers/s390/cio/device_fsm.c22
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c55
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/scsw.c843
-rw-r--r--drivers/s390/crypto/ap_bus.c17
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/net/netiucv.c9
-rw-r--r--drivers/s390/net/smsgiucv.c6
-rw-r--r--drivers/sbus/char/bbc_envctrl.c11
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c12
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c98
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c91
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c124
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/spi/spi_s3c24xx.c23
-rw-r--r--drivers/staging/comedi/comedi_fops.c8
-rw-r--r--drivers/staging/pohmelfs/inode.c9
-rw-r--r--drivers/thermal/thermal_sys.c9
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c5
-rw-r--r--drivers/video/xen-fbfront.c8
-rw-r--r--drivers/watchdog/ar7_wdt.c2
379 files changed, 6808 insertions, 4135 deletions
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 67340cc7014..257706e7734 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -70,6 +70,12 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
+ /* If Source and Target are the same, just return */
+
+ if (source_desc == target_desc) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* We know that source_desc is a buffer by now */
buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer);
@@ -161,6 +167,12 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
+ /* If Source and Target are the same, just return */
+
+ if (source_desc == target_desc) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* We know that source_desc is a string by now */
buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index f6baa77deef..0c4ca4d318b 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -78,9 +78,10 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
static int __init blacklist_by_year(void)
{
- int year = dmi_get_year(DMI_BIOS_DATE);
+ int year;
+
/* Doesn't exist? Likely an old system */
- if (year == -1) {
+ if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) {
printk(KERN_ERR PREFIX "no DMI BIOS year, "
"acpi=force is required to enable ACPI\n" );
return 1;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 84e0f3c0744..2cc4b303387 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -1151,6 +1151,9 @@ static int __init acpi_processor_init(void)
{
int result = 0;
+ if (acpi_disabled)
+ return 0;
+
memset(&errata, 0, sizeof(errata));
#ifdef CONFIG_SMP
@@ -1197,6 +1200,9 @@ out_proc:
static void __exit acpi_processor_exit(void)
{
+ if (acpi_disabled)
+ return;
+
acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit();
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0efa59e7e3a..66393d5c4c7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -162,8 +162,9 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
pr->power.timer_broadcast_on_state = state;
}
-static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
+static void lapic_timer_propagate_broadcast(void *arg)
{
+ struct acpi_processor *pr = (struct acpi_processor *) arg;
unsigned long reason;
reason = pr->power.timer_broadcast_on_state < INT_MAX ?
@@ -635,7 +636,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
working++;
}
- lapic_timer_propagate_broadcast(pr);
+ smp_call_function_single(pr->id, lapic_timer_propagate_broadcast,
+ pr, 1);
return (working);
}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 39838c66603..31adda1099e 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
if (pr->limit.thermal.tx > tx)
tx = pr->limit.thermal.tx;
- result = acpi_processor_set_throttling(pr, tx);
+ result = acpi_processor_set_throttling(pr, tx, false);
if (result)
goto end;
}
@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
if (state <= max_pstate) {
if (pr->flags.throttling && pr->throttling.state)
- result = acpi_processor_set_throttling(pr, 0);
+ result = acpi_processor_set_throttling(pr, 0, false);
cpufreq_set_cur_state(pr->id, state);
} else {
cpufreq_set_cur_state(pr->id, max_pstate);
result = acpi_processor_set_throttling(pr,
- state - max_pstate);
+ state - max_pstate, false);
}
return result;
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 227543789ba..ae39797aab5 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -62,7 +62,8 @@ struct throttling_tstate {
#define THROTTLING_POSTCHANGE (2)
static int acpi_processor_get_throttling(struct acpi_processor *pr);
-int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+int acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force);
static int acpi_processor_update_tsd_coord(void)
{
@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
*/
target_state = throttling_limit;
}
- return acpi_processor_set_throttling(pr, target_state);
+ return acpi_processor_set_throttling(pr, target_state, false);
}
/*
@@ -839,10 +840,10 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
if (ret >= 0) {
state = acpi_get_throttling_state(pr, value);
if (state == -1) {
- ACPI_WARNING((AE_INFO,
- "Invalid throttling state, reset"));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Invalid throttling state, reset\n"));
state = 0;
- ret = acpi_processor_set_throttling(pr, state);
+ ret = acpi_processor_set_throttling(pr, state, true);
if (ret)
return ret;
}
@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
}
static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
- int state)
+ int state, bool force)
{
u32 value = 0;
u32 duty_mask = 0;
@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
if (!pr->flags.throttling)
return -ENODEV;
- if (state == pr->throttling.state)
+ if (!force && (state == pr->throttling.state))
return 0;
if (state < pr->throttling_platform_limit)
@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
}
static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
- int state)
+ int state, bool force)
{
int ret;
acpi_integer value;
@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
if (!pr->flags.throttling)
return -ENODEV;
- if (state == pr->throttling.state)
+ if (!force && (state == pr->throttling.state))
return 0;
if (state < pr->throttling_platform_limit)
@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
-int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
+int acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force)
{
cpumask_var_t saved_mask;
int ret = 0;
@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
/* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, cpumask_of(pr->id));
ret = p_throttling->acpi_processor_set_throttling(pr,
- t_state.target_state);
+ t_state.target_state, force);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
set_cpus_allowed_ptr(current, cpumask_of(i));
ret = match_pr->throttling.
acpi_processor_set_throttling(
- match_pr, t_state.target_state);
+ match_pr, t_state.target_state, force);
}
}
/*
@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Disabling throttling (was T%d)\n",
pr->throttling.state));
- result = acpi_processor_set_throttling(pr, 0);
+ result = acpi_processor_set_throttling(pr, 0, false);
if (result)
goto end;
}
@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
if (strcmp(tmpbuf, charp) != 0)
return -EINVAL;
- result = acpi_processor_set_throttling(pr, state_val);
+ result = acpi_processor_set_throttling(pr, state_val, false);
if (result)
return result;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8851315ce85..60ea984c84a 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -2004,8 +2004,11 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
status = acpi_remove_notify_handler(device->dev->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
- sysfs_remove_link(&device->backlight->dev.kobj, "device");
- backlight_device_unregister(device->backlight);
+ if (device->backlight) {
+ sysfs_remove_link(&device->backlight->dev.kobj, "device");
+ backlight_device_unregister(device->backlight);
+ device->backlight = NULL;
+ }
if (device->cdev) {
sysfs_remove_link(&device->dev->dev.kobj,
"thermal_cooling");
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index b17c57f8503..ab2fa4eeb36 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -26,6 +26,17 @@ config ATA_NONSTANDARD
bool
default n
+config ATA_VERBOSE_ERROR
+ bool "Verbose ATA error reporting"
+ default y
+ help
+ This option adds parsing of ATA command descriptions and error bits
+ in libata kernel output, making it easier to interpret.
+ This option will enlarge the kernel by approx. 6KB. Disable it only
+ if kernel size is more important than ease of debugging.
+
+ If unsure, say Y.
+
config ATA_ACPI
bool "ATA ACPI Support"
depends on ACPI && PCI
@@ -586,6 +597,16 @@ config PATA_RB532
If unsure, say N.
+config PATA_RDC
+ tristate "RDC PATA support"
+ depends on PCI
+ help
+ This option enables basic support for the later RDC PATA controllers
+ controllers via the new ATA layer. For the RDC 1010, you need to
+ enable the IT821X driver instead.
+
+ If unsure, say N.
+
config PATA_RZ1000
tristate "PC Tech RZ1000 PATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 38906f9bbb4..463eb52236a 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
+obj-$(CONFIG_PATA_RDC) += pata_rdc.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fe3eba5d6b3..d4cd9c20331 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -329,10 +329,24 @@ static ssize_t ahci_activity_store(struct ata_device *dev,
enum sw_activity val);
static void ahci_init_sw_activity(struct ata_link *link);
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
+DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
+
static struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
&dev_attr_em_message_type,
&dev_attr_em_message,
+ &dev_attr_ahci_host_caps,
+ &dev_attr_ahci_host_version,
+ &dev_attr_ahci_port_cmd,
NULL
};
@@ -539,6 +553,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
+ /* AMD */
+ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */
+ /* AMD is using RAID class only for ahci controllers */
+ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -702,6 +722,36 @@ static void ahci_enable_ahci(void __iomem *mmio)
WARN_ON(1);
}
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap);
+}
+
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
+
+ return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
+}
+
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+}
+
/**
* ahci_save_initial_config - Save and fixup initial config values
* @pdev: target PCI device
@@ -1584,7 +1634,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
}
-static int ahci_kick_engine(struct ata_port *ap, int force_restart)
+static int ahci_kick_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -1592,18 +1642,16 @@ static int ahci_kick_engine(struct ata_port *ap, int force_restart)
u32 tmp;
int busy, rc;
- /* do we need to kick the port? */
- busy = status & (ATA_BUSY | ATA_DRQ);
- if (!busy && !force_restart)
- return 0;
-
/* stop engine */
rc = ahci_stop_engine(ap);
if (rc)
goto out_restart;
- /* need to do CLO? */
- if (!busy) {
+ /* need to do CLO?
+ * always do CLO if PMP is attached (AHCI-1.3 9.2)
+ */
+ busy = status & (ATA_BUSY | ATA_DRQ);
+ if (!busy && !sata_pmp_attached(ap)) {
rc = 0;
goto out_restart;
}
@@ -1651,7 +1699,7 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1, timeout_msec);
if (tmp & 0x1) {
- ahci_kick_engine(ap, 1);
+ ahci_kick_engine(ap);
return -EBUSY;
}
} else
@@ -1674,7 +1722,7 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
/* prepare for SRST (AHCI-1.1 10.4.1) */
- rc = ahci_kick_engine(ap, 1);
+ rc = ahci_kick_engine(ap);
if (rc && rc != -EOPNOTSUPP)
ata_link_printk(link, KERN_WARNING,
"failed to reset engine (errno=%d)\n", rc);
@@ -1890,7 +1938,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
ahci_check_ready);
if (rc)
- ahci_kick_engine(ap, 0);
+ ahci_kick_engine(ap);
}
return rc;
}
@@ -2271,7 +2319,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_FAILED)
- ahci_kick_engine(ap, 1);
+ ahci_kick_engine(ap);
}
static void ahci_pmp_attach(struct ata_port *ap)
@@ -2603,14 +2651,18 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
}
/*
- * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older
- * BIOS. The oldest version known to be broken is 0901 and working is
- * 1501 which was released on 2007-10-26. Force 32bit DMA on anything
- * older than 1501. Please read bko#9412 for more info.
+ * SB600 ahci controller on certain boards can't do 64bit DMA with
+ * older BIOS.
*/
-static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
+static bool ahci_sb600_32bit_only(struct pci_dev *pdev)
{
static const struct dmi_system_id sysids[] = {
+ /*
+ * The oldest version known to be broken is 0901 and
+ * working is 1501 which was released on 2007-10-26.
+ * Force 32bit DMA on anything older than 1501.
+ * Please read bko#9412 for more info.
+ */
{
.ident = "ASUS M2A-VM",
.matches = {
@@ -2618,31 +2670,48 @@ static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
"ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
},
+ .driver_data = "20071026", /* yyyymmdd */
+ },
+ /*
+ * It's yet unknown whether more recent BIOS fixes the
+ * problem. Blacklist the whole board for the time
+ * being. Please read the following thread for more
+ * info.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/42326
+ */
+ {
+ .ident = "Gigabyte GA-MA69VM-S2",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "GA-MA69VM-S2"),
+ },
},
{ }
};
- const char *cutoff_mmdd = "10/26";
- const char *date;
- int year;
+ const struct dmi_system_id *match;
+ match = dmi_first_match(sysids);
if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
- !dmi_check_system(sysids))
+ !match)
return false;
- /*
- * Argh.... both version and date are free form strings.
- * Let's hope they're using the same date format across
- * different versions.
- */
- date = dmi_get_system_info(DMI_BIOS_DATE);
- year = dmi_get_year(DMI_BIOS_DATE);
- if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' &&
- (year > 2007 ||
- (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0)))
- return false;
+ if (match->driver_data) {
+ int year, month, date;
+ char buf[9];
+
+ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
- dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, "
- "forcing 32bit DMA, update BIOS\n");
+ if (strcmp(buf, match->driver_data) >= 0)
+ return false;
+
+ dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
+ "forcing 32bit DMA, update BIOS\n", match->ident);
+ } else
+ dev_printk(KERN_WARNING, &pdev->dev, "%s: this board can't "
+ "do 64bit DMA, forcing 32bit\n", match->ident);
return true;
}
@@ -2857,8 +2926,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
- /* apply ASUS M2A_VM quirk */
- if (ahci_asus_m2a_vm_32bit_only(pdev))
+ /* apply sb600 32bit only quirk */
+ if (ahci_sb600_32bit_only(pdev))
hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
@@ -2869,7 +2938,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ)
- pi.flags |= ATA_FLAG_NCQ;
+ pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 56b8a3ff128..9ac4e378992 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -664,6 +664,8 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
return ata_sff_prereset(link, deadline);
}
+static DEFINE_SPINLOCK(piix_lock);
+
/**
* piix_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
@@ -677,8 +679,9 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
- unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned long flags;
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
@@ -708,6 +711,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
+ spin_lock_irqsave(&piix_lock, flags);
+
/* PIO configuration clears DTE unconditionally. It will be
* programmed in set_dmamode which is guaranteed to be called
* after set_piomode if any DMA mode is available.
@@ -747,6 +752,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
}
+
+ spin_unlock_irqrestore(&piix_lock, flags);
}
/**
@@ -764,6 +771,7 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned long flags;
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
@@ -777,6 +785,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
{ 2, 1 },
{ 2, 3 }, };
+ spin_lock_irqsave(&piix_lock, flags);
+
pci_read_config_word(dev, master_port, &master_data);
if (ap->udma_mask)
pci_read_config_byte(dev, 0x48, &udma_enable);
@@ -867,6 +877,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
/* Don't scribble on 0x48 if the controller does not support UDMA */
if (ap->udma_mask)
pci_write_config_byte(dev, 0x48, udma_enable);
+
+ spin_unlock_irqrestore(&piix_lock, flags);
}
/**
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ac176da1f94..01964b6e6f6 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -689,6 +689,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
struct ata_taskfile tf, ptf, rtf;
unsigned int err_mask;
const char *level;
+ const char *descr;
char msg[60];
int rc;
@@ -736,11 +737,13 @@ static int ata_acpi_run_tf(struct ata_device *dev,
snprintf(msg, sizeof(msg), "filtered out");
rc = 0;
}
+ descr = ata_get_cmd_descript(tf.command);
ata_dev_printk(dev, level,
- "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x %s\n",
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n",
tf.command, tf.feature, tf.nsect, tf.lbal,
- tf.lbam, tf.lbah, tf.device, msg);
+ tf.lbam, tf.lbah, tf.device,
+ (descr ? descr : "unknown"), msg);
return rc;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 072ba5ea138..df31deac5c8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -709,7 +709,13 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
head = tf->device & 0xf;
sect = tf->lbal;
- block = (cyl * dev->heads + head) * dev->sectors + sect;
+ if (!sect) {
+ ata_dev_printk(dev, KERN_WARNING, "device reported "
+ "invalid CHS sector 0\n");
+ sect = 1; /* oh well */
+ }
+
+ block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
}
return block;
@@ -2299,29 +2305,49 @@ static inline u8 ata_dev_knobble(struct ata_device *dev)
return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
-static void ata_dev_config_ncq(struct ata_device *dev,
+static int ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
struct ata_port *ap = dev->link->ap;
int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
+ unsigned int err_mask;
+ char *aa_desc = "";
if (!ata_id_has_ncq(dev->id)) {
desc[0] = '\0';
- return;
+ return 0;
}
if (dev->horkage & ATA_HORKAGE_NONCQ) {
snprintf(desc, desc_sz, "NCQ (not used)");
- return;
+ return 0;
}
if (ap->flags & ATA_FLAG_NCQ) {
hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
dev->flags |= ATA_DFLAG_NCQ;
}
+ if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
+ (ap->flags & ATA_FLAG_FPDMA_AA) &&
+ ata_id_has_fpdma_aa(dev->id)) {
+ err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
+ SATA_FPDMA_AA);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
+ "(error_mask=0x%x)\n", err_mask);
+ if (err_mask != AC_ERR_DEV) {
+ dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
+ return -EIO;
+ }
+ } else
+ aa_desc = ", AA";
+ }
+
if (hdepth >= ddepth)
- snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
+ snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
else
- snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
+ snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
+ ddepth, aa_desc);
+ return 0;
}
/**
@@ -2461,7 +2487,7 @@ int ata_dev_configure(struct ata_device *dev)
if (ata_id_has_lba(id)) {
const char *lba_desc;
- char ncq_desc[20];
+ char ncq_desc[24];
lba_desc = "LBA";
dev->flags |= ATA_DFLAG_LBA;
@@ -2475,7 +2501,9 @@ int ata_dev_configure(struct ata_device *dev)
}
/* config NCQ */
- ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+ rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
+ if (rc)
+ return rc;
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info) {
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 79711b64054..a04488f0de8 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -40,6 +40,7 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
#include "../scsi/scsi_transport_api.h"
#include <linux/libata.h>
@@ -999,7 +1000,9 @@ static void __ata_port_freeze(struct ata_port *ap)
* ata_port_freeze - abort & freeze port
* @ap: ATA port to freeze
*
- * Abort and freeze @ap.
+ * Abort and freeze @ap. The freeze operation must be called
+ * first, because some hardware requires special operations
+ * before the taskfile registers are accessible.
*
* LOCKING:
* spin_lock_irqsave(host lock)
@@ -1013,8 +1016,8 @@ int ata_port_freeze(struct ata_port *ap)
WARN_ON(!ap->ops->error_handler);
- nr_aborted = ata_port_abort(ap);
__ata_port_freeze(ap);
+ nr_aborted = ata_port_abort(ap);
return nr_aborted;
}
@@ -2110,6 +2113,116 @@ void ata_eh_autopsy(struct ata_port *ap)
}
/**
+ * ata_get_cmd_descript - get description for ATA command
+ * @command: ATA command code to get description for
+ *
+ * Return a textual description of the given command, or NULL if the
+ * command is not known.
+ *
+ * LOCKING:
+ * None
+ */
+const char *ata_get_cmd_descript(u8 command)
+{
+#ifdef CONFIG_ATA_VERBOSE_ERROR
+ static const struct
+ {
+ u8 command;
+ const char *text;
+ } cmd_descr[] = {
+ { ATA_CMD_DEV_RESET, "DEVICE RESET" },
+ { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
+ { ATA_CMD_STANDBY, "STANDBY" },
+ { ATA_CMD_IDLE, "IDLE" },
+ { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
+ { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
+ { ATA_CMD_NOP, "NOP" },
+ { ATA_CMD_FLUSH, "FLUSH CACHE" },
+ { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
+ { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
+ { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
+ { ATA_CMD_SERVICE, "SERVICE" },
+ { ATA_CMD_READ, "READ DMA" },
+ { ATA_CMD_READ_EXT, "READ DMA EXT" },
+ { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
+ { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
+ { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
+ { ATA_CMD_WRITE, "WRITE DMA" },
+ { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
+ { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
+ { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
+ { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
+ { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
+ { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
+ { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
+ { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
+ { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
+ { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
+ { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
+ { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
+ { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
+ { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
+ { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
+ { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
+ { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
+ { ATA_CMD_SET_FEATURES, "SET FEATURES" },
+ { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
+ { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
+ { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
+ { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
+ { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
+ { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
+ { ATA_CMD_SLEEP, "SLEEP" },
+ { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
+ { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
+ { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
+ { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
+ { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
+ { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
+ { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
+ { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
+ { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
+ { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
+ { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
+ { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
+ { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
+ { ATA_CMD_PMP_READ, "READ BUFFER" },
+ { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
+ { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
+ { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
+ { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
+ { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
+ { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
+ { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
+ { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
+ { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
+ { ATA_CMD_SMART, "SMART" },
+ { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
+ { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
+ { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
+ { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
+ { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
+ { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
+ { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
+ { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
+ { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
+ { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
+ { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
+ { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
+ { ATA_CMD_RESTORE, "RECALIBRATE" },
+ { 0, NULL } /* terminate list */
+ };
+
+ unsigned int i;
+ for (i = 0; cmd_descr[i].text; i++)
+ if (cmd_descr[i].command == command)
+ return cmd_descr[i].text;
+#endif
+
+ return NULL;
+}
+
+/**
* ata_eh_link_report - report error handling to user
* @link: ATA link EH is going on
*
@@ -2175,6 +2288,7 @@ static void ata_eh_link_report(struct ata_link *link)
ata_link_printk(link, KERN_ERR, "%s\n", desc);
}
+#ifdef CONFIG_ATA_VERBOSE_ERROR
if (ehc->i.serror)
ata_link_printk(link, KERN_ERR,
"SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
@@ -2195,6 +2309,7 @@ static void ata_eh_link_report(struct ata_link *link)
ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
+#endif
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
@@ -2226,14 +2341,23 @@ static void ata_eh_link_report(struct ata_link *link)
dma_str[qc->dma_dir]);
}
- if (ata_is_atapi(qc->tf.protocol))
- snprintf(cdb_buf, sizeof(cdb_buf),
+ if (ata_is_atapi(qc->tf.protocol)) {
+ if (qc->scsicmd)
+ scsi_print_command(qc->scsicmd);
+ else
+ snprintf(cdb_buf, sizeof(cdb_buf),
"cdb %02x %02x %02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x %02x %02x\n ",
cdb[0], cdb[1], cdb[2], cdb[3],
cdb[4], cdb[5], cdb[6], cdb[7],
cdb[8], cdb[9], cdb[10], cdb[11],
cdb[12], cdb[13], cdb[14], cdb[15]);
+ } else {
+ const char *descr = ata_get_cmd_descript(cmd->command);
+ if (descr)
+ ata_dev_printk(qc->dev, KERN_ERR,
+ "failed command: %s\n", descr);
+ }
ata_dev_printk(qc->dev, KERN_ERR,
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
@@ -2252,6 +2376,7 @@ static void ata_eh_link_report(struct ata_link *link)
res->device, qc->err_mask, ata_err_string(qc->err_mask),
qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
+#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
ATA_ERR)) {
if (res->command & ATA_BUSY)
@@ -2275,6 +2400,7 @@ static void ata_eh_link_report(struct ata_link *link)
res->feature & ATA_UNC ? "UNC " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
res->feature & ATA_ABORTED ? "ABRT " : "");
+#endif
}
}
@@ -2574,11 +2700,17 @@ int ata_eh_reset(struct ata_link *link, int classify,
postreset(slave, classes);
}
- /* clear cached SError */
+ /*
+ * Some controllers can't be frozen very well and may set
+ * spuruious error conditions during reset. Clear accumulated
+ * error information. As reset is the final recovery action,
+ * nothing is lost by doing this.
+ */
spin_lock_irqsave(link->ap->lock, flags);
- link->eh_info.serror = 0;
+ memset(&link->eh_info, 0, sizeof(link->eh_info));
if (slave)
- slave->eh_info.serror = 0;
+ memset(&slave->eh_info, 0, sizeof(link->eh_info));
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
spin_unlock_irqrestore(link->ap->lock, flags);
/* Make sure onlineness and classification result correspond.
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 619f2c33950..51f0ffb78cb 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -221,6 +221,8 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
{
u32 rev = gscr[SATA_PMP_GSCR_REV];
+ if (rev & (1 << 3))
+ return "1.2";
if (rev & (1 << 2))
return "1.1";
if (rev & (1 << 1))
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d0dfeef55db..b4ee28dec52 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1119,10 +1119,6 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
- if (ata_id_is_ssd(dev->id))
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
- sdev->request_queue);
-
/* ATA devices must be sector aligned */
blk_queue_update_dma_alignment(sdev->request_queue,
ATA_SECT_SIZE - 1);
@@ -1257,23 +1253,6 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
return queue_depth;
}
-/* XXX: for spindown warning */
-static void ata_delayed_done_timerfn(unsigned long arg)
-{
- struct scsi_cmnd *scmd = (void *)arg;
-
- scmd->scsi_done(scmd);
-}
-
-/* XXX: for spindown warning */
-static void ata_delayed_done(struct scsi_cmnd *scmd)
-{
- static struct timer_list timer;
-
- setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd);
- mod_timer(&timer, jiffies + 5 * HZ);
-}
-
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile
@@ -1338,32 +1317,6 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
system_entering_hibernation())
goto skip;
- /* XXX: This is for backward compatibility, will be
- * removed. Read Documentation/feature-removal-schedule.txt
- * for more info.
- */
- if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
- (system_state == SYSTEM_HALT ||
- system_state == SYSTEM_POWER_OFF)) {
- static unsigned long warned;
-
- if (!test_and_set_bit(0, &warned)) {
- ata_dev_printk(qc->dev, KERN_WARNING,
- "DISK MIGHT NOT BE SPUN DOWN PROPERLY. "
- "UPDATE SHUTDOWN UTILITY\n");
- ata_dev_printk(qc->dev, KERN_WARNING,
- "For more info, visit "
- "http://linux-ata.org/shutdown.html\n");
-
- /* ->scsi_done is not used, use it for
- * delayed completion.
- */
- scmd->scsi_done = qc->scsidone;
- qc->scsidone = ata_delayed_done;
- }
- goto skip;
- }
-
/* Issue ATA STANDBY IMMEDIATE command */
tf->command = ATA_CMD_STANDBYNOW1;
}
@@ -1764,14 +1717,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
}
}
- /* XXX: track spindown state for spindown skipping and warning */
- if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
- qc->tf.command == ATA_CMD_STANDBYNOW1))
- qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
- else if (likely(system_state != SYSTEM_HALT &&
- system_state != SYSTEM_POWER_OFF))
- qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN;
-
if (need_sense && !ap->ops->error_handler)
ata_dump_status(ap->print_id, &qc->result_tf);
@@ -2815,28 +2760,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
goto invalid_fld;
/*
- * Filter TPM commands by default. These provide an
- * essentially uncontrolled encrypted "back door" between
- * applications and the disk. Set libata.allow_tpm=1 if you
- * have a real reason for wanting to use them. This ensures
- * that installed software cannot easily mess stuff up without
- * user intent. DVR type users will probably ship with this enabled
- * for movie content management.
- *
- * Note that for ATA8 we can issue a DCS change and DCS freeze lock
- * for this and should do in future but that it is not sufficient as
- * DCS is an optional feature set. Thus we also do the software filter
- * so that we comply with the TC consortium stated goal that the user
- * can turn off TC features of their system.
- */
- if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
- goto invalid_fld;
-
- /* We may not issue DMA commands if no DMA mode is set */
- if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
- goto invalid_fld;
-
- /*
* 12 and 16 byte CDBs use different offsets to
* provide the various register values.
*/
@@ -2885,6 +2808,41 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->device = dev->devno ?
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
+ /* READ/WRITE LONG use a non-standard sect_size */
+ qc->sect_size = ATA_SECT_SIZE;
+ switch (tf->command) {
+ case ATA_CMD_READ_LONG:
+ case ATA_CMD_READ_LONG_ONCE:
+ case ATA_CMD_WRITE_LONG:
+ case ATA_CMD_WRITE_LONG_ONCE:
+ if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
+ goto invalid_fld;
+ qc->sect_size = scsi_bufflen(scmd);
+ }
+
+ /*
+ * Set flags so that all registers will be written, pass on
+ * write indication (used for PIO/DMA setup), result TF is
+ * copied back and we don't whine too much about its failure.
+ */
+ tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ tf->flags |= ATA_TFLAG_WRITE;
+
+ qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
+
+ /*
+ * Set transfer length.
+ *
+ * TODO: find out if we need to do more here to
+ * cover scatter/gather case.
+ */
+ ata_qc_set_pc_nbytes(qc);
+
+ /* We may not issue DMA commands if no DMA mode is set */
+ if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
+ goto invalid_fld;
+
/* sanity check for pio multi commands */
if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
goto invalid_fld;
@@ -2901,18 +2859,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
multi_count);
}
- /* READ/WRITE LONG use a non-standard sect_size */
- qc->sect_size = ATA_SECT_SIZE;
- switch (tf->command) {
- case ATA_CMD_READ_LONG:
- case ATA_CMD_READ_LONG_ONCE:
- case ATA_CMD_WRITE_LONG:
- case ATA_CMD_WRITE_LONG_ONCE:
- if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
- goto invalid_fld;
- qc->sect_size = scsi_bufflen(scmd);
- }
-
/*
* Filter SET_FEATURES - XFER MODE command -- otherwise,
* SET_FEATURES - XFER MODE must be preceded/succeeded
@@ -2920,30 +2866,27 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* controller (i.e. the reason for ->set_piomode(),
* ->set_dmamode(), and ->post_set_mode() hooks).
*/
- if ((tf->command == ATA_CMD_SET_FEATURES)
- && (tf->feature == SETFEATURES_XFER))
+ if (tf->command == ATA_CMD_SET_FEATURES &&
+ tf->feature == SETFEATURES_XFER)
goto invalid_fld;
/*
- * Set flags so that all registers will be written,
- * and pass on write indication (used for PIO/DMA
- * setup.)
- */
- tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
-
- if (scmd->sc_data_direction == DMA_TO_DEVICE)
- tf->flags |= ATA_TFLAG_WRITE;
-
- /*
- * Set transfer length.
+ * Filter TPM commands by default. These provide an
+ * essentially uncontrolled encrypted "back door" between
+ * applications and the disk. Set libata.allow_tpm=1 if you
+ * have a real reason for wanting to use them. This ensures
+ * that installed software cannot easily mess stuff up without
+ * user intent. DVR type users will probably ship with this enabled
+ * for movie content management.
*
- * TODO: find out if we need to do more here to
- * cover scatter/gather case.
+ * Note that for ATA8 we can issue a DCS change and DCS freeze lock
+ * for this and should do in future but that it is not sufficient as
+ * DCS is an optional feature set. Thus we also do the software filter
+ * so that we comply with the TC consortium stated goal that the user
+ * can turn off TC features of their system.
*/
- ata_qc_set_pc_nbytes(qc);
-
- /* request result TF and be quiet about device error */
- qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
+ if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
+ goto invalid_fld;
return 0;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 89a1e0018e7..be8e2628f82 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -164,6 +164,7 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
unsigned int action);
extern void ata_eh_autopsy(struct ata_port *ap);
+const char *ata_get_cmd_descript(u8 command);
extern void ata_eh_report(struct ata_port *ap);
extern int ata_eh_reset(struct ata_link *link, int classify,
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 45915566e4e..aa4b3f6ae77 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -246,6 +246,7 @@ static const struct pci_device_id atiixp[] = {
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), },
{ },
};
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index d33aa28239a..403f56165ce 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -202,7 +202,8 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
}
static const struct pci_device_id cs5535[] = {
- { PCI_VDEVICE(NS, 0x002D), },
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), },
{ },
};
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index abdd19fe990..d6f69561dc8 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -213,7 +213,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
* This is tI, C.F. spec. says 0, but Sony CF card requires
* more, we use 20 nS.
*/
- dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);;
+ dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
dma_tim.s.dmarq = dma_arq;
@@ -841,7 +841,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ocd = pdev->dev.platform_data;
cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
- res_cs0->end - res_cs0->start + 1);
+ resource_size(res_cs0));
if (!cs0)
return -ENOMEM;
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index d8d743af322..3f6ebc6c665 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -151,14 +151,14 @@ int __devinit __pata_platform_probe(struct device *dev,
*/
if (mmio) {
ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start,
- io_res->end - io_res->start + 1);
+ resource_size(io_res));
ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start,
- ctl_res->end - ctl_res->start + 1);
+ resource_size(ctl_res));
} else {
ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start,
- io_res->end - io_res->start + 1);
+ resource_size(io_res));
ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start,
- ctl_res->end - ctl_res->start + 1);
+ resource_size(ctl_res));
}
if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
dev_err(dev, "failed to map IO/CTL base\n");
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 8e3cdef8a25..45f1e10f917 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -151,7 +151,7 @@ static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
info->irq = irq;
info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!info->iobase)
return -ENOMEM;
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
new file mode 100644
index 00000000000..c843a1e07c4
--- /dev/null
+++ b/drivers/ata/pata_rdc.c
@@ -0,0 +1,400 @@
+/*
+ * pata_rdc - Driver for later RDC PATA controllers
+ *
+ * This is actually a driver for hardware meeting
+ * INCITS 370-2004 (1510D): ATA Host Adapter Standards
+ *
+ * Based on ata_piix.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <linux/dmi.h>
+
+#define DRV_NAME "pata_rdc"
+#define DRV_VERSION "0.01"
+
+struct rdc_host_priv {
+ u32 saved_iocfg;
+};
+
+/**
+ * rdc_pata_cable_detect - Probe host controller cable detect info
+ * @ap: Port for which cable detect info is desired
+ *
+ * Read 80c cable indicator from ATA PCI device's PCI config
+ * register. This register is normally set by firmware (BIOS).
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static int rdc_pata_cable_detect(struct ata_port *ap)
+{
+ struct rdc_host_priv *hpriv = ap->host->private_data;
+ u8 mask;
+
+ /* check BIOS cable detect results */
+ mask = 0x30 << (2 * ap->port_no);
+ if ((hpriv->saved_iocfg & mask) == 0)
+ return ATA_CBL_PATA40;
+ return ATA_CBL_PATA80;
+}
+
+/**
+ * rdc_pata_prereset - prereset for PATA host controller
+ * @link: Target link
+ * @deadline: deadline jiffies for the operation
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ static const struct pci_bits rdc_enable_bits[] = {
+ { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
+ { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
+ };
+
+ if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
+ return -ENOENT;
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
+ * rdc_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: um
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned int is_slave = (adev->devno != 0);
+ unsigned int master_port= ap->port_no ? 0x42 : 0x40;
+ unsigned int slave_port = 0x44;
+ u16 master_data;
+ u8 slave_data;
+ u8 udma_enable;
+ int control = 0;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ if (pio >= 2)
+ control |= 1; /* TIME1 enable */
+ if (ata_pio_need_iordy(adev))
+ control |= 2; /* IE enable */
+
+ if (adev->class == ATA_DEV_ATA)
+ control |= 4; /* PPE enable */
+
+ /* PIO configuration clears DTE unconditionally. It will be
+ * programmed in set_dmamode which is guaranteed to be called
+ * after set_piomode if any DMA mode is available.
+ */
+ pci_read_config_word(dev, master_port, &master_data);
+ if (is_slave) {
+ /* clear TIME1|IE1|PPE1|DTE1 */
+ master_data &= 0xff0f;
+ /* Enable SITRE (separate slave timing register) */
+ master_data |= 0x4000;
+ /* enable PPE1, IE1 and TIME1 as needed */
+ master_data |= (control << 4);
+ pci_read_config_byte(dev, slave_port, &slave_data);
+ slave_data &= (ap->port_no ? 0x0f : 0xf0);
+ /* Load the timing nibble for this slave */
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
+ << (ap->port_no ? 4 : 0);
+ } else {
+ /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
+ master_data &= 0xccf0;
+ /* Enable PPE, IE and TIME as appropriate */
+ master_data |= control;
+ /* load ISP and RCT */
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+ pci_write_config_word(dev, master_port, master_data);
+ if (is_slave)
+ pci_write_config_byte(dev, slave_port, slave_data);
+
+ /* Ensure the UDMA bit is off - it will be turned back on if
+ UDMA is selected */
+
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+ udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
+ pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+/**
+ * rdc_set_dmamode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Drive in question
+ *
+ * Set UDMA mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ u8 master_port = ap->port_no ? 0x42 : 0x40;
+ u16 master_data;
+ u8 speed = adev->dma_mode;
+ int devid = adev->devno + 2 * ap->port_no;
+ u8 udma_enable = 0;
+
+ static const /* ISP RTC */
+ u8 timings[][2] = { { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ pci_read_config_word(dev, master_port, &master_data);
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+
+ if (speed >= XFER_UDMA_0) {
+ unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+ u16 udma_timing;
+ u16 ideconf;
+ int u_clock, u_speed;
+
+ /*
+ * UDMA is handled by a combination of clock switching and
+ * selection of dividers
+ *
+ * Handy rule: Odd modes are UDMATIMx 01, even are 02
+ * except UDMA0 which is 00
+ */
+ u_speed = min(2 - (udma & 1), udma);
+ if (udma == 5)
+ u_clock = 0x1000; /* 100Mhz */
+ else if (udma > 2)
+ u_clock = 1; /* 66Mhz */
+ else
+ u_clock = 0; /* 33Mhz */
+
+ udma_enable |= (1 << devid);
+
+ /* Load the CT/RP selection */
+ pci_read_config_word(dev, 0x4A, &udma_timing);
+ udma_timing &= ~(3 << (4 * devid));
+ udma_timing |= u_speed << (4 * devid);
+ pci_write_config_word(dev, 0x4A, udma_timing);
+
+ /* Select a 33/66/100Mhz clock */
+ pci_read_config_word(dev, 0x54, &ideconf);
+ ideconf &= ~(0x1001 << devid);
+ ideconf |= u_clock << devid;
+ pci_write_config_word(dev, 0x54, ideconf);
+ } else {
+ /*
+ * MWDMA is driven by the PIO timings. We must also enable
+ * IORDY unconditionally along with TIME1. PPE has already
+ * been set when the PIO timing was set.
+ */
+ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
+ unsigned int control;
+ u8 slave_data;
+ const unsigned int needed_pio[3] = {
+ XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
+ };
+ int pio = needed_pio[mwdma] - XFER_PIO_0;
+
+ control = 3; /* IORDY|TIME1 */
+
+ /* If the drive MWDMA is faster than it can do PIO then
+ we must force PIO into PIO0 */
+
+ if (adev->pio_mode < needed_pio[mwdma])
+ /* Enable DMA timing only */
+ control |= 8; /* PIO cycles in PIO0 */
+
+ if (adev->devno) { /* Slave */
+ master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
+ master_data |= control << 4;
+ pci_read_config_byte(dev, 0x44, &slave_data);
+ slave_data &= (ap->port_no ? 0x0f : 0xf0);
+ /* Load the matching timing */
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
+ pci_write_config_byte(dev, 0x44, slave_data);
+ } else { /* Master */
+ master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
+ and master timing bits */
+ master_data |= control;
+ master_data |=
+ (timings[pio][0] << 12) |
+ (timings[pio][1] << 8);
+ }
+
+ udma_enable &= ~(1 << devid);
+ pci_write_config_word(dev, master_port, master_data);
+ }
+ pci_write_config_byte(dev, 0x48, udma_enable);
+}
+
+static struct ata_port_operations rdc_pata_ops = {
+ .inherits = &ata_bmdma32_port_ops,
+ .cable_detect = rdc_pata_cable_detect,
+ .set_piomode = rdc_set_piomode,
+ .set_dmamode = rdc_set_dmamode,
+ .prereset = rdc_pata_prereset,
+};
+
+static struct ata_port_info rdc_port_info = {
+
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA5,
+ .port_ops = &rdc_pata_ops,
+};
+
+static struct scsi_host_template rdc_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/**
+ * rdc_init_one - Register PIIX ATA PCI device with kernel services
+ * @pdev: PCI device to register
+ * @ent: Entry in rdc_pci_tbl matching with @pdev
+ *
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * and then hand over control to libata, for it to do the rest.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, or -ERRNO value.
+ */
+
+static int __devinit rdc_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static int printed_version;
+ struct device *dev = &pdev->dev;
+ struct ata_port_info port_info[2];
+ const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
+ unsigned long port_flags;
+ struct ata_host *host;
+ struct rdc_host_priv *hpriv;
+ int rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "version " DRV_VERSION "\n");
+
+ port_info[0] = rdc_port_info;
+ port_info[1] = rdc_port_info;
+
+ port_flags = port_info[0].flags;
+
+ /* enable device and prepare host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+
+ /* Save IOCFG, this will be used for cable detection, quirk
+ * detection and restoration on detach.
+ */
+ pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
+
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+ if (rc)
+ return rc;
+ host->private_data = hpriv;
+
+ pci_intx(pdev, 1);
+
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+
+ pci_set_master(pdev);
+ return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht);
+}
+
+static void rdc_remove_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct rdc_host_priv *hpriv = host->private_data;
+
+ pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
+
+ ata_pci_remove_one(pdev);
+}
+
+static const struct pci_device_id rdc_pci_tbl[] = {
+ { PCI_DEVICE(0x17F3, 0x1011), },
+ { PCI_DEVICE(0x17F3, 0x1012), },
+ { } /* terminate list */
+};
+
+static struct pci_driver rdc_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = rdc_pci_tbl,
+ .probe = rdc_init_one,
+ .remove = rdc_remove_one,
+};
+
+
+static int __init rdc_init(void)
+{
+ return pci_register_driver(&rdc_pci_driver);
+}
+
+static void __exit rdc_exit(void)
+{
+ pci_unregister_driver(&rdc_pci_driver);
+}
+
+module_init(rdc_init);
+module_exit(rdc_exit);
+
+MODULE_AUTHOR("Alan Cox (based on ata_piix)");
+MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 0c574c065c6..a5e4dfe60b4 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -85,7 +85,6 @@ static int rz1000_fifo_disable(struct pci_dev *pdev)
static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -93,8 +92,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
};
const struct ata_port_info *ppi[] = { &info, NULL };
- if (!printed_version++)
- printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+ printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
if (rz1000_fifo_disable(pdev) == 0)
return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 94eaa432c40..d344db42a00 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1257,6 +1257,7 @@ static struct scsi_host_template sata_fsl_sht = {
static struct ata_port_operations sata_fsl_ops = {
.inherits = &sata_pmp_port_ops,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = sata_fsl_qc_prep,
.qc_issue = sata_fsl_qc_issue,
.qc_fill_rtf = sata_fsl_qc_fill_rtf,
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 8d890cc5a7e..4406902b429 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -405,7 +405,7 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
struct ata_host *host = dev_instance;
struct inic_host_priv *hpriv = host->private_data;
u16 host_irq_stat;
- int i, handled = 0;;
+ int i, handled = 0;
host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c19417e0220..17f9ff9067a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4013,7 +4013,7 @@ static int mv_platform_probe(struct platform_device *pdev)
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
hpriv->base -= SATAHC0_REG_BASE;
/*
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 35bd5cc7f28..3cb69d5fb81 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -565,6 +565,19 @@ static void sil_freeze(struct ata_port *ap)
tmp |= SIL_MASK_IDE0_INT << ap->port_no;
writel(tmp, mmio_base + SIL_SYSCFG);
readl(mmio_base + SIL_SYSCFG); /* flush */
+
+ /* Ensure DMA_ENABLE is off.
+ *
+ * This is because the controller will not give us access to the
+ * taskfile registers while a DMA is in progress
+ */
+ iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
+ ap->ioaddr.bmdma_addr);
+
+ /* According to ata_bmdma_stop, an HDMA transition requires
+ * on PIO cycle. But we can't read a taskfile register.
+ */
+ ioread8(ap->ioaddr.bmdma_addr);
}
static void sil_thaw(struct ata_port *ap)
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 77aa8d7ecec..e6946fc527d 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -846,6 +846,17 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
if (!ata_is_atapi(qc->tf.protocol)) {
prb = &cb->ata.prb;
sge = cb->ata.sge;
+ if (ata_is_data(qc->tf.protocol)) {
+ u16 prot = 0;
+ ctrl = PRB_CTRL_PROTOCOL;
+ if (ata_is_ncq(qc->tf.protocol))
+ prot |= PRB_PROT_NCQ;
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ prot |= PRB_PROT_WRITE;
+ else
+ prot |= PRB_PROT_READ;
+ prb->prot = cpu_to_le16(prot);
+ }
} else {
prb = &cb->atapi.prb;
sge = cb->atapi.sge;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 8f983322861..f8a91bfd66a 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -109,8 +109,9 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
+static unsigned int get_scr_cfg_addr(struct ata_link *link, unsigned int sc_reg)
{
+ struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
u8 pmr;
@@ -131,6 +132,9 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
break;
}
}
+ if (link->pmp)
+ addr += 0x10;
+
return addr;
}
@@ -138,24 +142,12 @@ static u32 sis_scr_cfg_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
- unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
- u32 val2 = 0;
- u8 pmr;
+ unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
return -EINVAL;
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
-
pci_read_config_dword(pdev, cfg_addr, val);
-
- if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
- (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
-
- *val |= val2;
- *val &= 0xfffffffb; /* avoid problems with powerdowned ports */
-
return 0;
}
@@ -163,28 +155,16 @@ static int sis_scr_cfg_write(struct ata_link *link,
unsigned int sc_reg, u32 val)
{
struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
- unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
- u8 pmr;
-
- if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
- return -EINVAL;
-
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
+ unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
pci_write_config_dword(pdev, cfg_addr, val);
-
- if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
- (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- pci_write_config_dword(pdev, cfg_addr+0x10, val);
-
return 0;
}
static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
struct ata_port *ap = link->ap;
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u8 pmr;
+ void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
if (sc_reg > SCR_CONTROL)
return -EINVAL;
@@ -192,39 +172,23 @@ static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
if (ap->flags & SIS_FLAG_CFGSCR)
return sis_scr_cfg_read(link, sc_reg, val);
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
-
- *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
-
- if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
- (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- *val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
-
- *val &= 0xfffffffb;
-
+ *val = ioread32(base + sc_reg * 4);
return 0;
}
static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
struct ata_port *ap = link->ap;
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u8 pmr;
+ void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
-
if (ap->flags & SIS_FLAG_CFGSCR)
return sis_scr_cfg_write(link, sc_reg, val);
- else {
- iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
- if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
- (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
- return 0;
- }
+
+ iowrite32(val, base + (sc_reg * 4));
+ return 0;
}
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -236,7 +200,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 genctl, val;
u8 pmr;
u8 port2_start = 0x20;
- int rc;
+ int i, rc;
if (!printed_version++)
dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
@@ -319,6 +283,17 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap->flags & ATA_FLAG_SATA &&
+ ap->flags & ATA_FLAG_SLAVE_POSS) {
+ rc = ata_slave_link_init(ap);
+ if (rc)
+ return rc;
+ }
+ }
+
if (!(pi.flags & SIS_FLAG_CFGSCR)) {
void __iomem *mmio;
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 5e41e6dd657..db195abad69 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -155,7 +155,7 @@ struct aoedev {
u16 fw_ver; /* version of blade's firmware */
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
- struct request_queue blkq;
+ struct request_queue *blkq;
struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2307a271bdc..95d344971ed 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -264,9 +264,13 @@ aoeblk_gdalloc(void *vp)
goto err_disk;
}
- blk_queue_make_request(&d->blkq, aoeblk_make_request);
- if (bdi_init(&d->blkq.backing_dev_info))
+ d->blkq = blk_alloc_queue(GFP_KERNEL);
+ if (!d->blkq)
goto err_mempool;
+ blk_queue_make_request(d->blkq, aoeblk_make_request);
+ d->blkq->backing_dev_info.name = "aoe";
+ if (bdi_init(&d->blkq->backing_dev_info))
+ goto err_blkq;
spin_lock_irqsave(&d->lock, flags);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor * AOE_PARTITIONS;
@@ -276,7 +280,7 @@ aoeblk_gdalloc(void *vp)
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
d->aoemajor, d->aoeminor);
- gd->queue = &d->blkq;
+ gd->queue = d->blkq;
d->gd = gd;
d->flags &= ~DEVFL_GDALLOC;
d->flags |= DEVFL_UP;
@@ -287,6 +291,9 @@ aoeblk_gdalloc(void *vp)
aoedisk_add_sysfs(d);
return;
+err_blkq:
+ blk_cleanup_queue(d->blkq);
+ d->blkq = NULL;
err_mempool:
mempool_destroy(d->bufpool);
err_disk:
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index eeea477d960..fa67027789a 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -113,6 +113,7 @@ aoedev_freedev(struct aoedev *d)
if (d->bufpool)
mempool_destroy(d->bufpool);
skbpoolfree(d);
+ blk_cleanup_queue(d->blkq);
kfree(d);
}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8c9d50db5c3..c5855779058 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -49,6 +49,7 @@
#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
/* cover 915 and 945 variants */
@@ -81,7 +82,8 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
extern int agp_memory_reserved;
@@ -1216,6 +1218,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
case PCI_DEVICE_ID_INTEL_G41_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
+ case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
@@ -2195,6 +2198,8 @@ static const struct intel_driver_description {
"IGDNG/D", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
"IGDNG/M", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
+ "IGDNG/MA", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -2398,6 +2403,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
{ }
};
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 86105efb4eb..0ecac7e532f 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1006,7 +1006,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->dev->release = (void (*)(struct device *)) kfree;
rc = device_register(priv->dev);
if (rc) {
- kfree(priv->dev);
+ put_device(priv->dev);
goto out_error_dev;
}
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index cd0ba51f7c8..0d8c5788b8e 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -44,8 +44,8 @@
* want to register another driver on the same PCI id.
*/
static const struct pci_device_id pci_tbl[] = {
- { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
- { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(AMD, 0x7443), 0, },
+ { PCI_VDEVICE(AMD, 0x746b), 0, },
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 64d513f6836..4c4d4e140f9 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -46,8 +46,7 @@
* want to register another driver on the same PCI id.
*/
static const struct pci_device_id pci_tbl[] = {
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index afa8813e737..645237bda68 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -822,6 +822,7 @@ static const struct file_operations zero_fops = {
* - permits private mappings, "copies" are taken of the source of zeros
*/
static struct backing_dev_info zero_bdi = {
+ .name = "char/mem",
.capabilities = BDI_CAP_MAP_COPY,
};
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 973be2f4419..4e28b35024e 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
if (space < 2)
return -1;
tty->canon_column = tty->column = 0;
- tty_put_char(tty, '\r');
- tty_put_char(tty, c);
+ tty->ops->write(tty, "\r\n", 2);
return 2;
}
tty->canon_column = tty->column;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index d083c73d784..b33d6688e91 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to)
* the other side of the pty/tty pair.
*/
-static int pty_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct tty_struct *to = tty->link;
- int c;
if (tty->stopped)
return 0;
- /* This isn't locked but our 8K is quite sloppy so no
- big deal */
-
- c = pty_space(to);
- if (c > count)
- c = count;
if (c > 0) {
/* Stuff the data into the input queue of the other end */
c = tty_insert_flip_string(to, buf, c);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8c7444857a4..d8a9255e1a3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -240,6 +240,7 @@
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/cryptohash.h>
+#include <linux/fips.h>
#ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h>
@@ -413,6 +414,7 @@ struct entropy_store {
unsigned add_ptr;
int entropy_count;
int input_rotate;
+ __u8 *last_data;
};
static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ unsigned long flags;
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
while (nbytes) {
extract_buf(r, tmp);
+
+ if (r->last_data) {
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
i = min_t(int, nbytes, EXTRACT_SIZE);
memcpy(buf, tmp, i);
nbytes -= i;
@@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ /* Enable continuous test in fips mode */
+ if (fips_enabled)
+ r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
}
static int rand_initialize(void)
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5d7a02f63e1..50eecfe1d72 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -24,6 +24,7 @@
#include <linux/sysrq.h>
#include <linux/kbd_kern.h>
#include <linux/proc_fs.h>
+#include <linux/nmi.h>
#include <linux/quotaops.h>
#include <linux/perf_counter.h>
#include <linux/kernel.h>
@@ -222,12 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
{
- struct pt_regs *regs = get_irq_regs();
- if (regs) {
- printk(KERN_INFO "CPU%d:\n", smp_processor_id());
- show_regs(regs);
+ /*
+ * Fall back to the workqueue based printing if the
+ * backtrace printing did not succeed or the
+ * architecture has no support for it:
+ */
+ if (!trigger_all_cpu_backtrace()) {
+ struct pt_regs *regs = get_irq_regs();
+
+ if (regs) {
+ printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+ show_regs(regs);
+ }
+ schedule_work(&sysrq_showallcpus);
}
- schedule_work(&sysrq_showallcpus);
}
static struct sysrq_key_op sysrq_showallcpus_op = {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index aec1931608a..0b73e4ec1ad 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
goto out_err;
}
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+
if (request_locality(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
@@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
- /* Default timeouts */
- chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
- chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
-
dev_info(dev,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 1733d3439ad..e48af9f7921 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -508,8 +508,9 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
* be obtained while the delayed work queue halt ensures that no more
* data is fed to the ldisc.
*
- * In order to wait for any existing references to complete see
- * tty_ldisc_wait_idle.
+ * You need to do a 'flush_scheduled_work()' (outside the ldisc_mutex)
+ * in order to make sure any currently executing ldisc work is also
+ * flushed.
*/
static int tty_ldisc_halt(struct tty_struct *tty)
@@ -753,11 +754,14 @@ void tty_ldisc_hangup(struct tty_struct *tty)
* N_TTY.
*/
if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
+ /* Make sure the old ldisc is quiescent */
+ tty_ldisc_halt(tty);
+ flush_scheduled_work();
+
/* Avoid racing set_ldisc or tty_ldisc_release */
mutex_lock(&tty->ldisc_mutex);
if (tty->ldisc) { /* Not yet closed */
/* Switch back to N_TTY */
- tty_ldisc_halt(tty);
tty_ldisc_reinit(tty);
/* At this point we have a closed ldisc and we want to
reopen it. We could defer this to the next open but
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 2964f5f4a7e..6b3e0c2f33e 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -40,6 +40,7 @@ struct sh_cmt_priv {
struct platform_device *pdev;
unsigned long flags;
+ unsigned long flags_suspend;
unsigned long match_value;
unsigned long next_match_value;
unsigned long max_match_value;
@@ -667,11 +668,38 @@ static int __devexit sh_cmt_remove(struct platform_device *pdev)
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
+static int sh_cmt_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+
+ /* save flag state and stop CMT channel */
+ p->flags_suspend = p->flags;
+ sh_cmt_stop(p, p->flags);
+ return 0;
+}
+
+static int sh_cmt_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+
+ /* start CMT channel from saved state */
+ sh_cmt_start(p, p->flags_suspend);
+ return 0;
+}
+
+static struct dev_pm_ops sh_cmt_dev_pm_ops = {
+ .suspend = sh_cmt_suspend,
+ .resume = sh_cmt_resume,
+};
+
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
.remove = __devexit_p(sh_cmt_remove),
.driver = {
.name = "sh_cmt",
+ .pm = &sh_cmt_dev_pm_ops,
}
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fd69086d08d..2968ed6a9c4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
{
int ret = 0;
-#ifdef __powerpc__
int cpu = sysdev->id;
- unsigned int cur_freq = 0;
struct cpufreq_policy *cpu_policy;
dprintk("suspending cpu %u\n", cpu);
- /*
- * This whole bogosity is here because Powerbooks are made of fail.
- * No sane platform should need any of the code below to be run.
- * (it's entirely the wrong thing to do, as driver->get may
- * reenable interrupts on some architectures).
- */
-
if (!cpu_online(cpu))
return 0;
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
if (cpufreq_driver->suspend) {
ret = cpufreq_driver->suspend(cpu_policy, pmsg);
- if (ret) {
+ if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
- goto out;
- }
- }
-
- if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
- goto out;
-
- if (cpufreq_driver->get)
- cur_freq = cpufreq_driver->get(cpu_policy->cpu);
-
- if (!cur_freq || !cpu_policy->cur) {
- printk(KERN_ERR "cpufreq: suspend failed to assert current "
- "frequency is what timing core thinks it is.\n");
- goto out;
- }
-
- if (unlikely(cur_freq != cpu_policy->cur)) {
- struct cpufreq_freqs freqs;
-
- if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- dprintk("Warning: CPU frequency is %u, "
- "cpufreq assumed %u kHz.\n",
- cur_freq, cpu_policy->cur);
-
- freqs.cpu = cpu;
- freqs.old = cpu_policy->cur;
- freqs.new = cur_freq;
-
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_SUSPENDCHANGE, &freqs);
- adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
-
- cpu_policy->cur = cur_freq;
}
out:
cpufreq_cpu_put(cpu_policy);
-#endif /* __powerpc__ */
return ret;
}
@@ -1330,24 +1287,21 @@ out:
* cpufreq_resume - restore proper CPU frequency handling after resume
*
* 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
- * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
- * restored.
+ * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
+ * restored. It will verify that the current freq is in sync with
+ * what we believe it to be. This is a bit later than when it
+ * should be, but nonethteless it's better than calling
+ * cpufreq_driver->get() here which might re-enable interrupts...
*/
static int cpufreq_resume(struct sys_device *sysdev)
{
int ret = 0;
-#ifdef __powerpc__
int cpu = sysdev->id;
struct cpufreq_policy *cpu_policy;
dprintk("resuming cpu %u\n", cpu);
- /* As with the ->suspend method, all the code below is
- * only necessary because Powerbooks suck.
- * See commit 42d4dc3f4e1e for jokes. */
-
if (!cpu_online(cpu))
return 0;
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev)
}
}
- if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
- unsigned int cur_freq = 0;
-
- if (cpufreq_driver->get)
- cur_freq = cpufreq_driver->get(cpu_policy->cpu);
-
- if (!cur_freq || !cpu_policy->cur) {
- printk(KERN_ERR "cpufreq: resume failed to assert "
- "current frequency is what timing core "
- "thinks it is.\n");
- goto out;
- }
-
- if (unlikely(cur_freq != cpu_policy->cur)) {
- struct cpufreq_freqs freqs;
-
- if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- dprintk("Warning: CPU frequency "
- "is %u, cpufreq assumed %u kHz.\n",
- cur_freq, cpu_policy->cur);
-
- freqs.cpu = cpu;
- freqs.old = cpu_policy->cur;
- freqs.new = cur_freq;
-
- srcu_notifier_call_chain(
- &cpufreq_transition_notifier_list,
- CPUFREQ_RESUMECHANGE, &freqs);
- adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
-
- cpu_policy->cur = cur_freq;
- }
- }
-
-out:
schedule_work(&cpu_policy->update);
+
fail:
cpufreq_cpu_put(cpu_policy);
-#endif /* __powerpc__ */
return ret;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5b27692372b..b08403d7d1c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -13,7 +13,6 @@ if CRYPTO_HW
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on X86 && !UML
- select CRYPTO_ALGAPI
help
Some VIA processors come with an integrated crypto engine
(so called VIA PadLock ACE, Advanced Cryptography Engine)
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES
config CRYPTO_DEV_PADLOCK_SHA
tristate "PadLock driver for SHA1 and SHA256 algorithms"
depends on CRYPTO_DEV_PADLOCK
+ select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
help
@@ -157,6 +157,19 @@ config S390_PRNG
ANSI X9.17 standard. The PRNG is usable via the char device
/dev/prandom.
+config CRYPTO_DEV_MV_CESA
+ tristate "Marvell's Cryptographic Engine"
+ depends on PLAT_ORION
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
+ help
+ This driver allows you to utilize the Cryptographic Engines and
+ Security Accelerator (CESA) which can be found on the Marvell Orion
+ and Kirkwood SoCs, such as QNAP's TS-209.
+
+ Currently the driver supports AES in ECB and CBC mode without DMA.
+
config CRYPTO_DEV_HIFN_795X
tristate "Driver HIFN 795x crypto accelerator chips"
select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2bc884..6ffcb3f7f94 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 61b6e1bec8c..a33243c17b0 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
}
}
- tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct crypto4xx_ctx));
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 4c0dfb2b872..46e899ac924 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -31,8 +31,6 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
-#include <crypto/internal/hash.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include "crypto4xx_reg_def.h"
@@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
- if (alg->cra_type == &crypto_ablkcipher_type)
+ switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ default:
tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
- else if (alg->cra_type == &crypto_ahash_type)
- tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct crypto4xx_ctx));
+ break;
+ }
return 0;
}
@@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
}
int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
- struct crypto_alg *crypto_alg, int array_size)
+ struct crypto4xx_alg_common *crypto_alg,
+ int array_size)
{
struct crypto4xx_alg *alg;
int i;
@@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
return -ENOMEM;
alg->alg = crypto_alg[i];
- INIT_LIST_HEAD(&alg->alg.cra_list);
- if (alg->alg.cra_init == NULL)
- alg->alg.cra_init = crypto4xx_alg_init;
- if (alg->alg.cra_exit == NULL)
- alg->alg.cra_exit = crypto4xx_alg_exit;
alg->dev = sec_dev;
- rc = crypto_register_alg(&alg->alg);
+
+ switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ rc = crypto_register_ahash(&alg->alg.u.hash);
+ break;
+
+ default:
+ rc = crypto_register_alg(&alg->alg.u.cipher);
+ break;
+ }
+
if (rc) {
list_del(&alg->entry);
kfree(alg);
@@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
list_del(&alg->entry);
- crypto_unregister_alg(&alg->alg);
+ switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&alg->alg.u.hash);
+ break;
+
+ default:
+ crypto_unregister_alg(&alg->alg.u.cipher);
+ }
kfree(alg);
}
}
@@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
/**
* Supported Crypto Algorithms
*/
-struct crypto_alg crypto4xx_alg[] = {
+struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
- {
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = {
.decrypt = crypto4xx_decrypt,
}
}
- },
- /* Hash SHA1 */
- {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ahash_type,
- .cra_init = crypto4xx_sha1_alg_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ahash = {
- .digestsize = SHA1_DIGEST_SIZE,
- .init = crypto4xx_hash_init,
- .update = crypto4xx_hash_update,
- .final = crypto4xx_hash_final,
- .digest = crypto4xx_hash_digest,
- }
- }
- },
+ }},
};
/**
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 1ef10344936..da9cbe3b9fc 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,6 +22,8 @@
#ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__
+#include <crypto/internal/hash.h>
+
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -138,14 +140,31 @@ struct crypto4xx_req_ctx {
u16 sa_len;
};
+struct crypto4xx_alg_common {
+ u32 type;
+ union {
+ struct crypto_alg cipher;
+ struct ahash_alg hash;
+ } u;
+};
+
struct crypto4xx_alg {
struct list_head entry;
- struct crypto_alg alg;
+ struct crypto4xx_alg_common alg;
struct crypto4xx_device *dev;
};
-#define crypto_alg_to_crypto4xx_alg(x) \
- container_of(x, struct crypto4xx_alg, alg)
+static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
+ struct crypto_alg *x)
+{
+ switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AHASH:
+ return container_of(__crypto_ahash_alg(x),
+ struct crypto4xx_alg, alg.u.hash);
+ }
+
+ return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+}
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 00000000000..b21ef635f35
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
+/*
+ * Support for Marvell's crypto engine which can be found on some Orion5X
+ * boards.
+ *
+ * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ * License: GPLv2
+ *
+ */
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#include "mv_cesa.h"
+/*
+ * STM:
+ * /---------------------------------------\
+ * | | request complete
+ * \./ |
+ * IDLE -> new request -> BUSY -> done -> DEQUEUE
+ * /°\ |
+ * | | more scatter entries
+ * \________________/
+ */
+enum engine_status {
+ ENGINE_IDLE,
+ ENGINE_BUSY,
+ ENGINE_W_DEQUEUE,
+};
+
+/**
+ * struct req_progress - used for every crypt request
+ * @src_sg_it: sg iterator for src
+ * @dst_sg_it: sg iterator for dst
+ * @sg_src_left: bytes left in src to process (scatter list)
+ * @src_start: offset to add to src start position (scatter list)
+ * @crypt_len: length of current crypt process
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+ * @total_req_bytes: total number of bytes processed (request).
+ *
+ * sg helper are used to iterate over the scatterlist. Since the size of the
+ * SRAM may be less than the scatter size, this struct struct is used to keep
+ * track of progress within current scatterlist.
+ */
+struct req_progress {
+ struct sg_mapping_iter src_sg_it;
+ struct sg_mapping_iter dst_sg_it;
+
+ /* src mostly */
+ int sg_src_left;
+ int src_start;
+ int crypt_len;
+ /* dst mostly */
+ int sg_dst_left;
+ int dst_start;
+ int total_req_bytes;
+};
+
+struct crypto_priv {
+ void __iomem *reg;
+ void __iomem *sram;
+ int irq;
+ struct task_struct *queue_th;
+
+ /* the lock protects queue and eng_st */
+ spinlock_t lock;
+ struct crypto_queue queue;
+ enum engine_status eng_st;
+ struct ablkcipher_request *cur_req;
+ struct req_progress p;
+ int max_req_size;
+ int sram_size;
+};
+
+static struct crypto_priv *cpg;
+
+struct mv_ctx {
+ u8 aes_enc_key[AES_KEY_LEN];
+ u32 aes_dec_key[8];
+ int key_len;
+ u32 need_calc_aes_dkey;
+};
+
+enum crypto_op {
+ COP_AES_ECB,
+ COP_AES_CBC,
+};
+
+struct mv_req_ctx {
+ enum crypto_op op;
+ int decrypt;
+};
+
+static void compute_aes_dec_key(struct mv_ctx *ctx)
+{
+ struct crypto_aes_ctx gen_aes_key;
+ int key_pos;
+
+ if (!ctx->need_calc_aes_dkey)
+ return;
+
+ crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
+
+ key_pos = ctx->key_len + 24;
+ memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_256:
+ key_pos -= 2;
+ /* fall */
+ case AES_KEYSIZE_192:
+ key_pos -= 2;
+ memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
+ 4 * 4);
+ break;
+ }
+ ctx->need_calc_aes_dkey = 0;
+}
+
+static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ switch (len) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ ctx->key_len = len;
+ ctx->need_calc_aes_dkey = 1;
+
+ memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
+ return 0;
+}
+
+static void setup_data_in(struct ablkcipher_request *req)
+{
+ int ret;
+ void *buf;
+
+ if (!cpg->p.sg_src_left) {
+ ret = sg_miter_next(&cpg->p.src_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_src_left = cpg->p.src_sg_it.length;
+ cpg->p.src_start = 0;
+ }
+
+ cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
+
+ buf = cpg->p.src_sg_it.addr;
+ buf += cpg->p.src_start;
+
+ memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+
+ cpg->p.sg_src_left -= cpg->p.crypt_len;
+ cpg->p.src_start += cpg->p.crypt_len;
+}
+
+static void mv_process_current_q(int first_block)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct sec_accel_config op;
+
+ switch (req_ctx->op) {
+ case COP_AES_ECB:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+ break;
+ case COP_AES_CBC:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+ op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
+ ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
+ if (first_block)
+ memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
+ break;
+ }
+ if (req_ctx->decrypt) {
+ op.config |= CFG_DIR_DEC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
+ AES_KEY_LEN);
+ } else {
+ op.config |= CFG_DIR_ENC;
+ memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
+ AES_KEY_LEN);
+ }
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ op.config |= CFG_AES_LEN_128;
+ break;
+ case AES_KEYSIZE_192:
+ op.config |= CFG_AES_LEN_192;
+ break;
+ case AES_KEYSIZE_256:
+ op.config |= CFG_AES_LEN_256;
+ break;
+ }
+ op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
+ ENC_P_DST(SRAM_DATA_OUT_START);
+ op.enc_key_p = SRAM_DATA_KEY_P;
+
+ setup_data_in(req);
+ op.enc_len = cpg->p.crypt_len;
+ memcpy(cpg->sram + SRAM_CONFIG, &op,
+ sizeof(struct sec_accel_config));
+
+ writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
+ /* GO */
+ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
+
+ /*
+ * XXX: add timer if the interrupt does not occur for some mystery
+ * reason
+ */
+}
+
+static void mv_crypto_algo_completion(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ if (req_ctx->op != COP_AES_CBC)
+ return ;
+
+ memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
+}
+
+static void dequeue_complete_req(void)
+{
+ struct ablkcipher_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
+
+ cpg->p.total_req_bytes += cpg->p.crypt_len;
+ do {
+ int dst_copy;
+
+ if (!cpg->p.sg_dst_left) {
+ ret = sg_miter_next(&cpg->p.dst_sg_it);
+ BUG_ON(!ret);
+ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+ cpg->p.dst_start = 0;
+ }
+
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
+
+ dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+
+ memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+
+ cpg->p.sg_dst_left -= dst_copy;
+ cpg->p.crypt_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+ } while (cpg->p.crypt_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.total_req_bytes < req->nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+ mv_process_current_q(0);
+ } else {
+ sg_miter_stop(&cpg->p.src_sg_it);
+ sg_miter_stop(&cpg->p.dst_sg_it);
+ mv_crypto_algo_completion();
+ cpg->eng_st = ENGINE_IDLE;
+ req->base.complete(&req->base, 0);
+ }
+}
+
+static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+{
+ int i = 0;
+
+ do {
+ total_bytes -= sl[i].length;
+ i++;
+
+ } while (total_bytes > 0);
+
+ return i;
+}
+
+static void mv_enqueue_new_req(struct ablkcipher_request *req)
+{
+ int num_sgs;
+
+ cpg->cur_req = req;
+ memset(&cpg->p, 0, sizeof(struct req_progress));
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ num_sgs = count_sgs(req->dst, req->nbytes);
+ sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
+ mv_process_current_q(1);
+}
+
+static int queue_manag(void *data)
+{
+ cpg->eng_st = ENGINE_IDLE;
+ do {
+ struct ablkcipher_request *req;
+ struct crypto_async_request *async_req = NULL;
+ struct crypto_async_request *backlog;
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ if (cpg->eng_st == ENGINE_W_DEQUEUE)
+ dequeue_complete_req();
+
+ spin_lock_irq(&cpg->lock);
+ if (cpg->eng_st == ENGINE_IDLE) {
+ backlog = crypto_get_backlog(&cpg->queue);
+ async_req = crypto_dequeue_request(&cpg->queue);
+ if (async_req) {
+ BUG_ON(cpg->eng_st != ENGINE_IDLE);
+ cpg->eng_st = ENGINE_BUSY;
+ }
+ }
+ spin_unlock_irq(&cpg->lock);
+
+ if (backlog) {
+ backlog->complete(backlog, -EINPROGRESS);
+ backlog = NULL;
+ }
+
+ if (async_req) {
+ req = container_of(async_req,
+ struct ablkcipher_request, base);
+ mv_enqueue_new_req(req);
+ async_req = NULL;
+ }
+
+ schedule();
+
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static int mv_handle_req(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cpg->lock, flags);
+ ret = ablkcipher_enqueue_request(&cpg->queue, req);
+ spin_unlock_irqrestore(&cpg->lock, flags);
+ wake_up_process(cpg->queue_th);
+ return ret;
+}
+
+static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 0;
+
+ return mv_handle_req(req);
+}
+
+static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+{
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+ return mv_handle_req(req);
+}
+
+static int mv_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
+ return 0;
+}
+
+irqreturn_t crypto_int(int irq, void *priv)
+{
+ u32 val;
+
+ val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
+ if (!(val & SEC_INT_ACCEL0_DONE))
+ return IRQ_NONE;
+
+ val &= ~SEC_INT_ACCEL0_DONE;
+ writel(val, cpg->reg + FPGA_INT_STATUS);
+ writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
+ BUG_ON(cpg->eng_st != ENGINE_BUSY);
+ cpg->eng_st = ENGINE_W_DEQUEUE;
+ wake_up_process(cpg->queue_th);
+ return IRQ_HANDLED;
+}
+
+struct crypto_alg mv_aes_alg_ecb = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_ecb,
+ .decrypt = mv_dec_aes_ecb,
+ },
+ },
+};
+
+struct crypto_alg mv_aes_alg_cbc = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cra_init,
+ .cra_u = {
+ .ablkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = mv_setkey_aes,
+ .encrypt = mv_enc_aes_cbc,
+ .decrypt = mv_dec_aes_cbc,
+ },
+ },
+};
+
+static int mv_probe(struct platform_device *pdev)
+{
+ struct crypto_priv *cp;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ if (cpg) {
+ printk(KERN_ERR "Second crypto dev?\n");
+ return -EEXIST;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!res)
+ return -ENXIO;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ spin_lock_init(&cp->lock);
+ crypto_init_queue(&cp->queue, 50);
+ cp->reg = ioremap(res->start, res->end - res->start + 1);
+ if (!cp->reg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ if (!res) {
+ ret = -ENXIO;
+ goto err_unmap_reg;
+ }
+ cp->sram_size = res->end - res->start + 1;
+ cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
+ cp->sram = ioremap(res->start, cp->sram_size);
+ if (!cp->sram) {
+ ret = -ENOMEM;
+ goto err_unmap_reg;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 || irq == NO_IRQ) {
+ ret = irq;
+ goto err_unmap_sram;
+ }
+ cp->irq = irq;
+
+ platform_set_drvdata(pdev, cp);
+ cpg = cp;
+
+ cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
+ if (IS_ERR(cp->queue_th)) {
+ ret = PTR_ERR(cp->queue_th);
+ goto err_thread;
+ }
+
+ ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+ cp);
+ if (ret)
+ goto err_unmap_sram;
+
+ writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
+ writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
+
+ ret = crypto_register_alg(&mv_aes_alg_ecb);
+ if (ret)
+ goto err_reg;
+
+ ret = crypto_register_alg(&mv_aes_alg_cbc);
+ if (ret)
+ goto err_unreg_ecb;
+ return 0;
+err_unreg_ecb:
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+err_thread:
+ free_irq(irq, cp);
+err_reg:
+ kthread_stop(cp->queue_th);
+err_unmap_sram:
+ iounmap(cp->sram);
+err_unmap_reg:
+ iounmap(cp->reg);
+err:
+ kfree(cp);
+ cpg = NULL;
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int mv_remove(struct platform_device *pdev)
+{
+ struct crypto_priv *cp = platform_get_drvdata(pdev);
+
+ crypto_unregister_alg(&mv_aes_alg_ecb);
+ crypto_unregister_alg(&mv_aes_alg_cbc);
+ kthread_stop(cp->queue_th);
+ free_irq(cp->irq, cp);
+ memset(cp->sram, 0, cp->sram_size);
+ iounmap(cp->sram);
+ iounmap(cp->reg);
+ kfree(cp);
+ cpg = NULL;
+ return 0;
+}
+
+static struct platform_driver marvell_crypto = {
+ .probe = mv_probe,
+ .remove = mv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mv_crypto",
+ },
+};
+MODULE_ALIAS("platform:mv_crypto");
+
+static int __init mv_crypto_init(void)
+{
+ return platform_driver_register(&marvell_crypto);
+}
+module_init(mv_crypto_init);
+
+static void __exit mv_crypto_exit(void)
+{
+ platform_driver_unregister(&marvell_crypto);
+}
+module_exit(mv_crypto_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 00000000000..c3e25d3bb17
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
+#ifndef __MV_CRYPTO_H__
+
+#define DIGEST_INITIAL_VAL_A 0xdd00
+#define DES_CMD_REG 0xdd58
+
+#define SEC_ACCEL_CMD 0xde00
+#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
+#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
+#define SEC_CMD_DISABLE_SEC (1 << 2)
+
+#define SEC_ACCEL_DESC_P0 0xde04
+#define SEC_DESC_P0_PTR(x) (x)
+
+#define SEC_ACCEL_DESC_P1 0xde14
+#define SEC_DESC_P1_PTR(x) (x)
+
+#define SEC_ACCEL_CFG 0xde08
+#define SEC_CFG_STOP_DIG_ERR (1 << 0)
+#define SEC_CFG_CH0_W_IDMA (1 << 7)
+#define SEC_CFG_CH1_W_IDMA (1 << 8)
+#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
+#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
+
+#define SEC_ACCEL_STATUS 0xde0c
+#define SEC_ST_ACT_0 (1 << 0)
+#define SEC_ST_ACT_1 (1 << 1)
+
+/*
+ * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
+ * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
+ * someone forgot to remove it while switching to the core and moving to
+ * SEC_ACCEL_INT_STATUS.
+ */
+#define FPGA_INT_STATUS 0xdd68
+#define SEC_ACCEL_INT_STATUS 0xde20
+#define SEC_INT_AUTH_DONE (1 << 0)
+#define SEC_INT_DES_E_DONE (1 << 1)
+#define SEC_INT_AES_E_DONE (1 << 2)
+#define SEC_INT_AES_D_DONE (1 << 3)
+#define SEC_INT_ENC_DONE (1 << 4)
+#define SEC_INT_ACCEL0_DONE (1 << 5)
+#define SEC_INT_ACCEL1_DONE (1 << 6)
+#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
+#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
+
+#define SEC_ACCEL_INT_MASK 0xde24
+
+#define AES_KEY_LEN (8 * 4)
+
+struct sec_accel_config {
+
+ u32 config;
+#define CFG_OP_MAC_ONLY 0
+#define CFG_OP_CRYPT_ONLY 1
+#define CFG_OP_MAC_CRYPT 2
+#define CFG_OP_CRYPT_MAC 3
+#define CFG_MACM_MD5 (4 << 4)
+#define CFG_MACM_SHA1 (5 << 4)
+#define CFG_MACM_HMAC_MD5 (6 << 4)
+#define CFG_MACM_HMAC_SHA1 (7 << 4)
+#define CFG_ENCM_DES (1 << 8)
+#define CFG_ENCM_3DES (2 << 8)
+#define CFG_ENCM_AES (3 << 8)
+#define CFG_DIR_ENC (0 << 12)
+#define CFG_DIR_DEC (1 << 12)
+#define CFG_ENC_MODE_ECB (0 << 16)
+#define CFG_ENC_MODE_CBC (1 << 16)
+#define CFG_3DES_EEE (0 << 20)
+#define CFG_3DES_EDE (1 << 20)
+#define CFG_AES_LEN_128 (0 << 24)
+#define CFG_AES_LEN_192 (1 << 24)
+#define CFG_AES_LEN_256 (2 << 24)
+
+ u32 enc_p;
+#define ENC_P_SRC(x) (x)
+#define ENC_P_DST(x) ((x) << 16)
+
+ u32 enc_len;
+#define ENC_LEN(x) (x)
+
+ u32 enc_key_p;
+#define ENC_KEY_P(x) (x)
+
+ u32 enc_iv;
+#define ENC_IV_POINT(x) ((x) << 0)
+#define ENC_IV_BUF_POINT(x) ((x) << 16)
+
+ u32 mac_src_p;
+#define MAC_SRC_DATA_P(x) (x)
+#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
+
+ u32 mac_digest;
+ u32 mac_iv;
+}__attribute__ ((packed));
+ /*
+ * /-----------\ 0
+ * | ACCEL CFG | 4 * 8
+ * |-----------| 0x20
+ * | CRYPT KEY | 8 * 4
+ * |-----------| 0x40
+ * | IV IN | 4 * 4
+ * |-----------| 0x40 (inplace)
+ * | IV BUF | 4 * 4
+ * |-----------| 0x50
+ * | DATA IN | 16 * x (max ->max_req_size)
+ * |-----------| 0x50 (inplace operation)
+ * | DATA OUT | 16 * x (max ->max_req_size)
+ * \-----------/ SRAM size
+ */
+#define SRAM_CONFIG 0x00
+#define SRAM_DATA_KEY_P 0x20
+#define SRAM_DATA_IV 0x40
+#define SRAM_DATA_IV_BUF 0x40
+#define SRAM_DATA_IN_START 0x50
+#define SRAM_DATA_OUT_START 0x50
+
+#define SRAM_CFG_SPACE 0x50
+
+#endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a2c8e8514b6..76cb6b345e7 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -12,81 +12,43 @@
*
*/
-#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/cryptohash.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <asm/i387.h>
#include "padlock.h"
-#define SHA1_DEFAULT_FALLBACK "sha1-generic"
-#define SHA256_DEFAULT_FALLBACK "sha256-generic"
+struct padlock_sha_desc {
+ struct shash_desc fallback;
+};
struct padlock_sha_ctx {
- char *data;
- size_t used;
- int bypass;
- void (*f_sha_padlock)(const char *in, char *out, int count);
- struct hash_desc fallback;
+ struct crypto_shash *fallback;
};
-static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
-{
- return crypto_tfm_ctx(tfm);
-}
-
-/* We'll need aligned address on the stack */
-#define NEAREST_ALIGNED(ptr) \
- ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
-
-static struct crypto_alg sha1_alg, sha256_alg;
-
-static void padlock_sha_bypass(struct crypto_tfm *tfm)
+static int padlock_sha_init(struct shash_desc *desc)
{
- if (ctx(tfm)->bypass)
- return;
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
- crypto_hash_init(&ctx(tfm)->fallback);
- if (ctx(tfm)->data && ctx(tfm)->used) {
- struct scatterlist sg;
-
- sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
- crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
- }
-
- ctx(tfm)->used = 0;
- ctx(tfm)->bypass = 1;
-}
-
-static void padlock_sha_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->used = 0;
- ctx(tfm)->bypass = 0;
+ dctx->fallback.tfm = ctx->fallback;
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_shash_init(&dctx->fallback);
}
-static void padlock_sha_update(struct crypto_tfm *tfm,
- const uint8_t *data, unsigned int length)
+static int padlock_sha_update(struct shash_desc *desc,
+ const u8 *data, unsigned int length)
{
- /* Our buffer is always one page. */
- if (unlikely(!ctx(tfm)->bypass &&
- (ctx(tfm)->used + length > PAGE_SIZE)))
- padlock_sha_bypass(tfm);
-
- if (unlikely(ctx(tfm)->bypass)) {
- struct scatterlist sg;
- sg_init_one(&sg, (uint8_t *)data, length);
- crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
- return;
- }
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
- ctx(tfm)->used += length;
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ return crypto_shash_update(&dctx->fallback, data, length);
}
static inline void padlock_output_block(uint32_t *src,
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src,
*dst++ = swab32(*src++);
}
-static void padlock_do_sha1(const char *in, char *out, int count)
+static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128+16];
- char *result = NEAREST_ALIGNED(buf);
+ char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct sha1_state state;
+ unsigned int space;
+ unsigned int leftover;
int ts_state;
+ int err;
+
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+
+ if (state.count + count > ULONG_MAX)
+ return crypto_shash_finup(&dctx->fallback, in, count, out);
+
+ leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
+ space = SHA1_BLOCK_SIZE - leftover;
+ if (space) {
+ if (count > space) {
+ err = crypto_shash_update(&dctx->fallback, in, space) ?:
+ crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+ count -= space;
+ in += space;
+ } else {
+ memcpy(state.buffer + leftover, in, count);
+ in = state.buffer;
+ count += leftover;
+ state.count &= ~(SHA1_BLOCK_SIZE - 1);
+ }
+ }
+
+ memcpy(result, &state.state, SHA1_DIGEST_SIZE);
- ((uint32_t *)result)[0] = SHA1_H0;
- ((uint32_t *)result)[1] = SHA1_H1;
- ((uint32_t *)result)[2] = SHA1_H2;
- ((uint32_t *)result)[3] = SHA1_H3;
- ((uint32_t *)result)[4] = SHA1_H4;
-
/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
- : "+S"(in), "+D"(result)
- : "c"(count), "a"(0));
+ : \
+ : "c"((unsigned long)state.count + count), \
+ "a"((unsigned long)state.count), \
+ "S"(in), "D"(result));
irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+
+out:
+ return err;
}
-static void padlock_do_sha256(const char *in, char *out, int count)
+static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
+{
+ u8 buf[4];
+
+ return padlock_sha1_finup(desc, buf, 0, out);
+}
+
+static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int count, u8 *out)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
- char buf[128+16];
- char *result = NEAREST_ALIGNED(buf);
+ char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
+ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
+ struct sha256_state state;
+ unsigned int space;
+ unsigned int leftover;
int ts_state;
+ int err;
+
+ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+
+ if (state.count + count > ULONG_MAX)
+ return crypto_shash_finup(&dctx->fallback, in, count, out);
+
+ leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
+ space = SHA256_BLOCK_SIZE - leftover;
+ if (space) {
+ if (count > space) {
+ err = crypto_shash_update(&dctx->fallback, in, space) ?:
+ crypto_shash_export(&dctx->fallback, &state);
+ if (err)
+ goto out;
+ count -= space;
+ in += space;
+ } else {
+ memcpy(state.buf + leftover, in, count);
+ in = state.buf;
+ count += leftover;
+ state.count &= ~(SHA1_BLOCK_SIZE - 1);
+ }
+ }
- ((uint32_t *)result)[0] = SHA256_H0;
- ((uint32_t *)result)[1] = SHA256_H1;
- ((uint32_t *)result)[2] = SHA256_H2;
- ((uint32_t *)result)[3] = SHA256_H3;
- ((uint32_t *)result)[4] = SHA256_H4;
- ((uint32_t *)result)[5] = SHA256_H5;
- ((uint32_t *)result)[6] = SHA256_H6;
- ((uint32_t *)result)[7] = SHA256_H7;
+ memcpy(result, &state.state, SHA256_DIGEST_SIZE);
/* prevent taking the spurious DNA fault with padlock. */
ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
- : "+S"(in), "+D"(result)
- : "c"(count), "a"(0));
+ : \
+ : "c"((unsigned long)state.count + count), \
+ "a"((unsigned long)state.count), \
+ "S"(in), "D"(result));
irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
+
+out:
+ return err;
}
-static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
+static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
{
- if (unlikely(ctx(tfm)->bypass)) {
- crypto_hash_final(&ctx(tfm)->fallback, out);
- ctx(tfm)->bypass = 0;
- return;
- }
+ u8 buf[4];
- /* Pass the input buffer to PadLock microcode... */
- ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
-
- ctx(tfm)->used = 0;
+ return padlock_sha256_finup(desc, buf, 0, out);
}
static int padlock_cra_init(struct crypto_tfm *tfm)
{
+ struct crypto_shash *hash = __crypto_shash_cast(tfm);
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
- struct crypto_hash *fallback_tfm;
-
- /* For now we'll allocate one page. This
- * could eventually be configurable one day. */
- ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
- if (!ctx(tfm)->data)
- return -ENOMEM;
+ struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_shash *fallback_tfm;
+ int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
- free_page((unsigned long)(ctx(tfm)->data));
- return PTR_ERR(fallback_tfm);
+ err = PTR_ERR(fallback_tfm);
+ goto out;
}
- ctx(tfm)->fallback.tfm = fallback_tfm;
+ ctx->fallback = fallback_tfm;
+ hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
-}
-
-static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->f_sha_padlock = padlock_do_sha1;
- return padlock_cra_init(tfm);
-}
-
-static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
-{
- ctx(tfm)->f_sha_padlock = padlock_do_sha256;
-
- return padlock_cra_init(tfm);
+out:
+ return err;
}
static void padlock_cra_exit(struct crypto_tfm *tfm)
{
- if (ctx(tfm)->data) {
- free_page((unsigned long)(ctx(tfm)->data));
- ctx(tfm)->data = NULL;
- }
+ struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_hash(ctx(tfm)->fallback.tfm);
- ctx(tfm)->fallback.tfm = NULL;
+ crypto_free_shash(ctx->fallback);
}
-static struct crypto_alg sha1_alg = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-padlock",
- .cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct padlock_sha_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
- .cra_init = padlock_sha1_cra_init,
- .cra_exit = padlock_cra_exit,
- .cra_u = {
- .digest = {
- .dia_digestsize = SHA1_DIGEST_SIZE,
- .dia_init = padlock_sha_init,
- .dia_update = padlock_sha_update,
- .dia_final = padlock_sha_final,
- }
+static struct shash_alg sha1_alg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = padlock_sha_init,
+ .update = padlock_sha_update,
+ .finup = padlock_sha1_finup,
+ .final = padlock_sha1_final,
+ .descsize = sizeof(struct padlock_sha_desc),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct padlock_sha_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = padlock_cra_init,
+ .cra_exit = padlock_cra_exit,
}
};
-static struct crypto_alg sha256_alg = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-padlock",
- .cra_priority = PADLOCK_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct padlock_sha_ctx),
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
- .cra_init = padlock_sha256_cra_init,
- .cra_exit = padlock_cra_exit,
- .cra_u = {
- .digest = {
- .dia_digestsize = SHA256_DIGEST_SIZE,
- .dia_init = padlock_sha_init,
- .dia_update = padlock_sha_update,
- .dia_final = padlock_sha_final,
- }
+static struct shash_alg sha256_alg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = padlock_sha_init,
+ .update = padlock_sha_update,
+ .finup = padlock_sha256_finup,
+ .final = padlock_sha256_final,
+ .descsize = sizeof(struct padlock_sha_desc),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct padlock_sha_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = padlock_cra_init,
+ .cra_exit = padlock_cra_exit,
}
};
@@ -272,11 +275,11 @@ static int __init padlock_init(void)
return -ENODEV;
}
- rc = crypto_register_alg(&sha1_alg);
+ rc = crypto_register_shash(&sha1_alg);
if (rc)
goto out;
- rc = crypto_register_alg(&sha256_alg);
+ rc = crypto_register_shash(&sha256_alg);
if (rc)
goto out_unreg1;
@@ -285,7 +288,7 @@ static int __init padlock_init(void)
return 0;
out_unreg1:
- crypto_unregister_alg(&sha1_alg);
+ crypto_unregister_shash(&sha1_alg);
out:
printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
return rc;
@@ -293,8 +296,8 @@ out:
static void __exit padlock_fini(void)
{
- crypto_unregister_alg(&sha1_alg);
- crypto_unregister_alg(&sha256_alg);
+ crypto_unregister_shash(&sha1_alg);
+ crypto_unregister_shash(&sha256_alg);
}
module_init(padlock_init);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c70775fd3ce..c47ffe8a73e 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -86,6 +86,25 @@ struct talitos_request {
void *context;
};
+/* per-channel fifo management */
+struct talitos_channel {
+ /* request fifo */
+ struct talitos_request *fifo;
+
+ /* number of requests pending in channel h/w fifo */
+ atomic_t submit_count ____cacheline_aligned;
+
+ /* request submission (head) lock */
+ spinlock_t head_lock ____cacheline_aligned;
+ /* index to next free descriptor request */
+ int head;
+
+ /* request release (tail) lock */
+ spinlock_t tail_lock ____cacheline_aligned;
+ /* index to next in-progress/done descriptor request */
+ int tail;
+};
+
struct talitos_private {
struct device *dev;
struct of_device *ofdev;
@@ -101,15 +120,6 @@ struct talitos_private {
/* SEC Compatibility info */
unsigned long features;
- /* next channel to be assigned next incoming descriptor */
- atomic_t last_chan;
-
- /* per-channel number of requests pending in channel h/w fifo */
- atomic_t *submit_count;
-
- /* per-channel request fifo */
- struct talitos_request **fifo;
-
/*
* length of the request fifo
* fifo_len is chfifo_len rounded up to next power of 2
@@ -117,15 +127,10 @@ struct talitos_private {
*/
unsigned int fifo_len;
- /* per-channel index to next free descriptor request */
- int *head;
-
- /* per-channel index to next in-progress/done descriptor request */
- int *tail;
+ struct talitos_channel *chan;
- /* per-channel request submission (head) and release (tail) locks */
- spinlock_t *head_lock;
- spinlock_t *tail_lock;
+ /* next channel to be assigned next incoming descriptor */
+ atomic_t last_chan ____cacheline_aligned;
/* request callback tasklet */
struct tasklet_struct done_task;
@@ -141,6 +146,12 @@ struct talitos_private {
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
+{
+ talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
+ talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
+}
+
/*
* map virtual single (contiguous) pointer to h/w descriptor pointer
*/
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev,
unsigned char extent,
enum dma_data_direction dir)
{
+ dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
+
talitos_ptr->len = cpu_to_be16(len);
- talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir));
+ to_talitos_ptr(talitos_ptr, dma_addr);
talitos_ptr->j_extent = extent;
}
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch)
return -EIO;
}
- /* set done writeback and IRQ */
- setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE |
- TALITOS_CCCR_LO_CDIE);
+ /* set 36-bit addressing, done writeback enable and done IRQ enable */
+ setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
+ TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
/* emulate SEC's round-robin channel fifo polling scheme */
ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
- spin_lock_irqsave(&priv->head_lock[ch], flags);
+ spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
- if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
+ if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
/* h/w fifo is full */
- spin_unlock_irqrestore(&priv->head_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
return -EAGAIN;
}
- head = priv->head[ch];
- request = &priv->fifo[ch][head];
+ head = priv->chan[ch].head;
+ request = &priv->chan[ch].fifo[head];
/* map descriptor and save caller data */
request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
request->context = context;
/* increment fifo head */
- priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);
+ priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
smp_wmb();
request->desc = desc;
/* GO! */
wmb();
- out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);
+ out_be32(priv->reg + TALITOS_FF(ch),
+ cpu_to_be32(upper_32_bits(request->dma_desc)));
+ out_be32(priv->reg + TALITOS_FF_LO(ch),
+ cpu_to_be32(lower_32_bits(request->dma_desc)));
- spin_unlock_irqrestore(&priv->head_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
return -EINPROGRESS;
}
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
unsigned long flags;
int tail, status;
- spin_lock_irqsave(&priv->tail_lock[ch], flags);
+ spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
- tail = priv->tail[ch];
- while (priv->fifo[ch][tail].desc) {
- request = &priv->fifo[ch][tail];
+ tail = priv->chan[ch].tail;
+ while (priv->chan[ch].fifo[tail].desc) {
+ request = &priv->chan[ch].fifo[tail];
/* descriptors with their done bits set don't get the error */
rmb();
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
request->desc = NULL;
/* increment fifo tail */
- priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
+ priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
- spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
- atomic_dec(&priv->submit_count[ch]);
+ atomic_dec(&priv->chan[ch].submit_count);
saved_req.callback(dev, saved_req.desc, saved_req.context,
status);
/* channel may resume processing in single desc error case */
if (error && !reset_ch && status == error)
return;
- spin_lock_irqsave(&priv->tail_lock[ch], flags);
- tail = priv->tail[ch];
+ spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
+ tail = priv->chan[ch].tail;
}
- spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
+ spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
}
/*
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data)
static struct talitos_desc *current_desc(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
- int tail = priv->tail[ch];
+ int tail = priv->chan[ch].tail;
dma_addr_t cur_desc;
cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
- while (priv->fifo[ch][tail].dma_desc != cur_desc) {
+ while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1);
- if (tail == priv->tail[ch]) {
+ if (tail == priv->chan[ch].tail) {
dev_err(dev, "couldn't locate current descriptor\n");
return NULL;
}
}
- return priv->fifo[ch][tail].desc;
+ return priv->chan[ch].fifo[tail].desc;
}
/*
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
int n_sg = sg_count;
while (n_sg--) {
- link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg));
+ to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
struct talitos_desc *desc = &edesc->desc;
unsigned int cryptlen = areq->cryptlen;
unsigned int authsize = ctx->authsize;
- unsigned int ivsize;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
int sg_count, ret;
int sg_link_tbl_len;
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
0, DMA_TO_DEVICE);
/* hmac data */
- map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) -
- sg_virt(areq->assoc), sg_virt(areq->assoc), 0,
- DMA_TO_DEVICE);
+ map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
+ sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
/* cipher iv */
- ivsize = crypto_aead_ivsize(aead);
map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
DMA_TO_DEVICE);
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->src_is_chained);
if (sg_count == 1) {
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
} else {
sg_link_tbl_len = cryptlen;
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
&edesc->link_tbl[0]);
if (sg_count > 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->
- src));
+ to_talitos_ptr(&desc->ptr[4],
+ sg_dma_address(areq->src));
}
}
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dst_is_chained);
if (sg_count == 1) {
- desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1];
- desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents + 1);
+ to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
+ (edesc->src_nents + 1) *
+ sizeof(struct talitos_ptr));
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
link_tbl_ptr->len = cpu_to_be16(authsize);
/* icv data follows link tables */
- link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents +
- edesc->dst_nents + 2);
-
+ to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
+ (edesc->src_nents + edesc->dst_nents + 2) *
+ sizeof(struct talitos_ptr));
desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* first DWORD empty */
desc->ptr[0].len = 0;
- desc->ptr[0].ptr = 0;
+ to_talitos_ptr(&desc->ptr[0], 0);
desc->ptr[0].j_extent = 0;
/* cipher iv */
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
edesc->src_is_chained);
if (sg_count == 1) {
- desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src));
+ to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
} else {
sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
&edesc->link_tbl[0]);
if (sg_count > 1) {
+ to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
} else {
/* Only one segment now, so no link tbl needed */
- desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->
- src));
+ to_talitos_ptr(&desc->ptr[3],
+ sg_dma_address(areq->src));
}
}
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
edesc->dst_is_chained);
if (sg_count == 1) {
- desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst));
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
} else {
struct talitos_ptr *link_tbl_ptr =
&edesc->link_tbl[edesc->src_nents + 1];
+ to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+ (edesc->src_nents + 1) *
+ sizeof(struct talitos_ptr));
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
- edesc->dma_link_tbl +
- edesc->src_nents + 1);
sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
link_tbl_ptr);
dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* last DWORD empty */
desc->ptr[6].len = 0;
- desc->ptr[6].ptr = 0;
+ to_talitos_ptr(&desc->ptr[6], 0);
desc->ptr[6].j_extent = 0;
ret = talitos_submit(dev, desc, callback, areq);
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev)
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
- kfree(priv->submit_count);
- kfree(priv->tail);
- kfree(priv->head);
-
- if (priv->fifo)
- for (i = 0; i < priv->num_channels; i++)
- kfree(priv->fifo[i]);
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->chan[i].fifo)
+ kfree(priv->chan[i].fifo);
- kfree(priv->fifo);
- kfree(priv->head_lock);
- kfree(priv->tail_lock);
+ kfree(priv->chan);
if (priv->irq != NO_IRQ) {
free_irq(priv->irq, dev);
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev,
if (of_device_is_compatible(np, "fsl,sec2.1"))
priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
- priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
- GFP_KERNEL);
- priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
- GFP_KERNEL);
- if (!priv->head_lock || !priv->tail_lock) {
- dev_err(dev, "failed to allocate fifo locks\n");
+ priv->chan = kzalloc(sizeof(struct talitos_channel) *
+ priv->num_channels, GFP_KERNEL);
+ if (!priv->chan) {
+ dev_err(dev, "failed to allocate channel management space\n");
err = -ENOMEM;
goto err_out;
}
for (i = 0; i < priv->num_channels; i++) {
- spin_lock_init(&priv->head_lock[i]);
- spin_lock_init(&priv->tail_lock[i]);
- }
-
- priv->fifo = kmalloc(sizeof(struct talitos_request *) *
- priv->num_channels, GFP_KERNEL);
- if (!priv->fifo) {
- dev_err(dev, "failed to allocate request fifo\n");
- err = -ENOMEM;
- goto err_out;
+ spin_lock_init(&priv->chan[i].head_lock);
+ spin_lock_init(&priv->chan[i].tail_lock);
}
priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
for (i = 0; i < priv->num_channels; i++) {
- priv->fifo[i] = kzalloc(sizeof(struct talitos_request) *
- priv->fifo_len, GFP_KERNEL);
- if (!priv->fifo[i]) {
+ priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
+ priv->fifo_len, GFP_KERNEL);
+ if (!priv->chan[i].fifo) {
dev_err(dev, "failed to allocate request fifo %d\n", i);
err = -ENOMEM;
goto err_out;
}
}
- priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
- GFP_KERNEL);
- if (!priv->submit_count) {
- dev_err(dev, "failed to allocate fifo submit count space\n");
- err = -ENOMEM;
- goto err_out;
- }
for (i = 0; i < priv->num_channels; i++)
- atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1));
+ atomic_set(&priv->chan[i].submit_count,
+ -(priv->chfifo_len - 1));
- priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
- priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
- if (!priv->head || !priv->tail) {
- dev_err(dev, "failed to allocate request index space\n");
- err = -ENOMEM;
- goto err_out;
- }
+ dma_set_mask(dev, DMA_BIT_MASK(36));
/* reset and initialize the h/w */
err = init_device(dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 575981f0cfd..ff5a1450e14 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -57,6 +57,7 @@
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
+#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 110e731f557..1c0b504a42f 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -196,7 +196,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
- data, sizeof(data))) {
+ data, 8)) {
case RCODE_GENERATION:
/* A generation change frees all bandwidth. */
return allocate ? -EAGAIN : bandwidth;
@@ -233,7 +233,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
data[1] = old ^ c;
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
- offset, data, sizeof(data))) {
+ offset, data, 8)) {
case RCODE_GENERATION:
/* A generation change frees all channels. */
return allocate ? -EAGAIN : i;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index ecddd11b797..76b321bb73f 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/pci_ids.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -2372,6 +2373,9 @@ static void ohci_pmac_off(struct pci_dev *dev)
#define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
+#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
+#define PCI_DEVICE_ID_AGERE_FW643 0x5901
+
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
@@ -2422,6 +2426,16 @@ static int __devinit pci_probe(struct pci_dev *dev,
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
+ /* dual-buffer mode is broken if more than one IR context is active */
+ if (dev->vendor == PCI_VENDOR_ID_AGERE &&
+ dev->device == PCI_DEVICE_ID_AGERE_FW643)
+ ohci->use_dualbuffer = false;
+
+ /* dual-buffer mode is broken */
+ if (dev->vendor == PCI_VENDOR_ID_RICOH &&
+ dev->device == PCI_DEVICE_ID_RICOH_R5C832)
+ ohci->use_dualbuffer = false;
+
/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
#if !defined(CONFIG_X86_32)
/* dual-buffer mode is broken with descriptor addresses above 2G */
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 8d51568ee14..e5df822a813 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -456,12 +456,12 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
}
spin_unlock_irqrestore(&card->lock, flags);
- if (&orb->link != &lu->orb_list)
+ if (&orb->link != &lu->orb_list) {
orb->callback(orb, &status);
- else
+ kref_put(&orb->kref, free_orb);
+ } else {
fw_error("status write for unknown orb\n");
-
- kref_put(&orb->kref, free_orb);
+ }
fw_send_response(card, request, RCODE_COMPLETE);
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 24c84ae8152..938100f14b1 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -568,35 +568,76 @@ const struct dmi_device * dmi_find_device(int type, const char *name,
EXPORT_SYMBOL(dmi_find_device);
/**
- * dmi_get_year - Return year of a DMI date
- * @field: data index (like dmi_get_system_info)
+ * dmi_get_date - parse a DMI date
+ * @field: data index (see enum dmi_field)
+ * @yearp: optional out parameter for the year
+ * @monthp: optional out parameter for the month
+ * @dayp: optional out parameter for the day
*
- * Returns -1 when the field doesn't exist. 0 when it is broken.
+ * The date field is assumed to be in the form resembling
+ * [mm[/dd]]/yy[yy] and the result is stored in the out
+ * parameters any or all of which can be omitted.
+ *
+ * If the field doesn't exist, all out parameters are set to zero
+ * and false is returned. Otherwise, true is returned with any
+ * invalid part of date set to zero.
+ *
+ * On return, year, month and day are guaranteed to be in the
+ * range of [0,9999], [0,12] and [0,31] respectively.
*/
-int dmi_get_year(int field)
+bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
- int year;
- const char *s = dmi_get_system_info(field);
+ int year = 0, month = 0, day = 0;
+ bool exists;
+ const char *s, *y;
+ char *e;
- if (!s)
- return -1;
- if (*s == '\0')
- return 0;
- s = strrchr(s, '/');
- if (!s)
- return 0;
+ s = dmi_get_system_info(field);
+ exists = s;
+ if (!exists)
+ goto out;
- s += 1;
- year = simple_strtoul(s, NULL, 0);
- if (year && year < 100) { /* 2-digit year */
+ /*
+ * Determine year first. We assume the date string resembles
+ * mm/dd/yy[yy] but the original code extracted only the year
+ * from the end. Keep the behavior in the spirit of no
+ * surprises.
+ */
+ y = strrchr(s, '/');
+ if (!y)
+ goto out;
+
+ y++;
+ year = simple_strtoul(y, &e, 10);
+ if (y != e && year < 100) { /* 2-digit year */
year += 1900;
if (year < 1996) /* no dates < spec 1.0 */
year += 100;
}
+ if (year > 9999) /* year should fit in %04d */
+ year = 0;
+
+ /* parse the mm and dd */
+ month = simple_strtoul(s, &e, 10);
+ if (s == e || *e != '/' || !month || month > 12) {
+ month = 0;
+ goto out;
+ }
- return year;
+ s = e + 1;
+ day = simple_strtoul(s, &e, 10);
+ if (s == y || s == e || *e != '/' || day > 31)
+ day = 0;
+out:
+ if (yearp)
+ *yearp = year;
+ if (monthp)
+ *monthp = month;
+ if (dayp)
+ *dayp = day;
+ return exists;
}
-EXPORT_SYMBOL(dmi_get_year);
+EXPORT_SYMBOL(dmi_get_date);
/**
* dmi_walk - Walk the DMI table and get called back for every record
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 33be210d672..2f631c75f70 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -258,31 +258,6 @@ void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
EXPORT_SYMBOL(drm_mode_object_find);
/**
- * drm_crtc_from_fb - find the CRTC structure associated with an fb
- * @dev: DRM device
- * @fb: framebuffer in question
- *
- * LOCKING:
- * Caller must hold mode_config lock.
- *
- * Find CRTC in the mode_config structure that matches @fb.
- *
- * RETURNS:
- * Pointer to the CRTC or NULL if it wasn't found.
- */
-struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb)
- return crtc;
- }
- return NULL;
-}
-
-/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
*
@@ -328,11 +303,20 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
+ struct drm_mode_set set;
+ int ret;
/* remove from any CRTC */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->fb == fb)
- crtc->fb = NULL;
+ if (crtc->fb == fb) {
+ /* should turn off the crtc */
+ memset(&set, 0, sizeof(struct drm_mode_set));
+ set.crtc = crtc;
+ set.fb = NULL;
+ ret = crtc->funcs->set_config(&set);
+ if (ret)
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
}
drm_mode_object_put(dev, &fb->base);
@@ -1511,7 +1495,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
set.mode = mode;
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
- set.fb =fb;
+ set.fb = fb;
ret = crtc->funcs->set_config(&set);
out:
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 80cc6d06d61..7f2728bbc16 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -502,12 +502,40 @@ static int add_detailed_info(struct drm_connector *connector,
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_display_mode *newmode;
- /* EDID up to and including 1.2 may put monitor info here */
- if (edid->version == 1 && edid->revision < 3)
- continue;
-
- /* Detailed mode timing */
- if (timing->pixel_clock) {
+ /* X server check is version 1.1 or higher */
+ if (edid->version == 1 && edid->revision >= 1 &&
+ !timing->pixel_clock) {
+ /* Other timing or info */
+ switch (data->type) {
+ case EDID_DETAIL_MONITOR_SERIAL:
+ break;
+ case EDID_DETAIL_MONITOR_STRING:
+ break;
+ case EDID_DETAIL_MONITOR_RANGE:
+ /* Get monitor range data */
+ break;
+ case EDID_DETAIL_MONITOR_NAME:
+ break;
+ case EDID_DETAIL_MONITOR_CPDATA:
+ break;
+ case EDID_DETAIL_STD_MODES:
+ /* Five modes per detailed section */
+ for (j = 0; j < 5; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[j];
+ newmode = drm_mode_std(dev, std);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
newmode = drm_mode_detailed(dev, edid, timing, quirks);
if (!newmode)
continue;
@@ -518,38 +546,6 @@ static int add_detailed_info(struct drm_connector *connector,
drm_mode_probed_add(connector, newmode);
modes++;
- continue;
- }
-
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- /* Five modes per detailed section */
- for (j = 0; j < 5; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
}
}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 85ec31b3ff0..f7a615b80c7 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,44 +22,50 @@
#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+static struct device_type drm_sysfs_device_minor = {
+ .name = "drm_minor"
+};
+
/**
- * drm_sysfs_suspend - DRM class suspend hook
+ * drm_class_suspend - DRM class suspend hook
* @dev: Linux device to suspend
* @state: power state to enter
*
* Just figures out what the actual struct drm_device associated with
* @dev is and calls its suspend hook, if present.
*/
-static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
+static int drm_class_suspend(struct device *dev, pm_message_t state)
{
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->suspend)
- return drm_dev->driver->suspend(drm_dev, state);
-
+ if (dev->type == &drm_sysfs_device_minor) {
+ struct drm_minor *drm_minor = to_drm_minor(dev);
+ struct drm_device *drm_dev = drm_minor->dev;
+
+ if (drm_minor->type == DRM_MINOR_LEGACY &&
+ !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+ drm_dev->driver->suspend)
+ return drm_dev->driver->suspend(drm_dev, state);
+ }
return 0;
}
/**
- * drm_sysfs_resume - DRM class resume hook
+ * drm_class_resume - DRM class resume hook
* @dev: Linux device to resume
*
* Just figures out what the actual struct drm_device associated with
* @dev is and calls its resume hook, if present.
*/
-static int drm_sysfs_resume(struct device *dev)
+static int drm_class_resume(struct device *dev)
{
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->resume)
- return drm_dev->driver->resume(drm_dev);
-
+ if (dev->type == &drm_sysfs_device_minor) {
+ struct drm_minor *drm_minor = to_drm_minor(dev);
+ struct drm_device *drm_dev = drm_minor->dev;
+
+ if (drm_minor->type == DRM_MINOR_LEGACY &&
+ !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+ drm_dev->driver->resume)
+ return drm_dev->driver->resume(drm_dev);
+ }
return 0;
}
@@ -99,8 +105,8 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
goto err_out;
}
- class->suspend = drm_sysfs_suspend;
- class->resume = drm_sysfs_resume;
+ class->suspend = drm_class_suspend;
+ class->resume = drm_class_resume;
err = class_create_file(class, &class_attr_version);
if (err)
@@ -480,6 +486,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
minor->kdev.class = drm_class;
minor->kdev.release = drm_sysfs_device_release;
minor->kdev.devt = minor->device;
+ minor->kdev.type = &drm_sysfs_device_minor;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
else if (minor->type == DRM_MINOR_RENDER)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7537f57d8a8..5b4f87e5562 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -222,6 +222,7 @@ typedef struct drm_i915_private {
unsigned int edp_support:1;
int lvds_ssc_freq;
+ int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -384,6 +385,9 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
+ /** LRU list of objects with fence regs on them. */
+ struct list_head fence_list;
+
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
@@ -451,6 +455,9 @@ struct drm_i915_gem_object {
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
+ /** This object's place on the fenced object LRU */
+ struct list_head fence_list;
+
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 140bee142fc..80e5ba490dc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -978,6 +978,7 @@ int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj;
uint32_t read_domains = args->read_domains;
@@ -1010,8 +1011,18 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj, obj->size, read_domains, write_domain);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ /* Update the LRU on the fence for the CPU access that's
+ * about to occur.
+ */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+ list_move_tail(&obj_priv->fence_list,
+ &dev_priv->mm.fence_list);
+ }
+
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
@@ -1155,8 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Need a new fence register? */
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj);
if (ret) {
mutex_unlock(&dev->struct_mutex);
@@ -2208,6 +2218,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
struct drm_i915_gem_object *old_obj_priv = NULL;
int i, ret, avail;
+ /* Just update our place in the LRU if our fence is getting used. */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+ list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+ return 0;
+ }
+
switch (obj_priv->tiling_mode) {
case I915_TILING_NONE:
WARN(1, "allocating a fence for non-tiled object?\n");
@@ -2229,7 +2245,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
}
/* First try to find a free reg */
-try_again:
avail = 0;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
@@ -2243,63 +2258,62 @@ try_again:
/* None available, try to steal one or wait for a user to finish */
if (i == dev_priv->num_fence_regs) {
- uint32_t seqno = dev_priv->mm.next_gem_seqno;
+ struct drm_gem_object *old_obj = NULL;
if (avail == 0)
return -ENOSPC;
- for (i = dev_priv->fence_reg_start;
- i < dev_priv->num_fence_regs; i++) {
- uint32_t this_seqno;
-
- reg = &dev_priv->fence_regs[i];
- old_obj_priv = reg->obj->driver_private;
+ list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
+ fence_list) {
+ old_obj = old_obj_priv->obj;
if (old_obj_priv->pin_count)
continue;
+ /* Take a reference, as otherwise the wait_rendering
+ * below may cause the object to get freed out from
+ * under us.
+ */
+ drm_gem_object_reference(old_obj);
+
/* i915 uses fences for GPU access to tiled buffers */
if (IS_I965G(dev) || !old_obj_priv->active)
break;
- /* find the seqno of the first available fence */
- this_seqno = old_obj_priv->last_rendering_seqno;
- if (this_seqno != 0 &&
- reg->obj->write_domain == 0 &&
- i915_seqno_passed(seqno, this_seqno))
- seqno = this_seqno;
- }
-
- /*
- * Now things get ugly... we have to wait for one of the
- * objects to finish before trying again.
- */
- if (i == dev_priv->num_fence_regs) {
- if (seqno == dev_priv->mm.next_gem_seqno) {
- i915_gem_flush(dev,
- I915_GEM_GPU_DOMAINS,
- I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL,
- I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
- return -ENOMEM;
+ /* This brings the object to the head of the LRU if it
+ * had been written to. The only way this should
+ * result in us waiting longer than the expected
+ * optimal amount of time is if there was a
+ * fence-using buffer later that was read-only.
+ */
+ i915_gem_object_flush_gpu_write_domain(old_obj);
+ ret = i915_gem_object_wait_rendering(old_obj);
+ if (ret != 0) {
+ drm_gem_object_unreference(old_obj);
+ return ret;
}
- ret = i915_wait_request(dev, seqno);
- if (ret)
- return ret;
- goto try_again;
+ break;
}
/*
* Zap this virtual mapping so we can set up a fence again
* for this object next time we need it.
*/
- i915_gem_release_mmap(reg->obj);
+ i915_gem_release_mmap(old_obj);
+
+ i = old_obj_priv->fence_reg;
+ reg = &dev_priv->fence_regs[i];
+
old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
+ list_del_init(&old_obj_priv->fence_list);
+
+ drm_gem_object_unreference(old_obj);
}
obj_priv->fence_reg = i;
+ list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
+
reg->obj = obj;
if (IS_I965G(dev))
@@ -2342,6 +2356,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
+ list_del_init(&obj_priv->fence_list);
}
/**
@@ -3595,9 +3610,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
* Pre-965 chips need a fence register set up in order to
* properly handle tiled surfaces.
*/
- if (!IS_I965G(dev) &&
- obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj);
if (ret != 0) {
if (ret != -EBUSY && ret != -ERESTARTSYS)
@@ -3806,6 +3819,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
+ INIT_LIST_HEAD(&obj_priv->fence_list);
return 0;
}
@@ -4218,15 +4232,11 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- int ret;
-
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- ret = i915_gem_idle(dev);
drm_irq_uninstall(dev);
-
- return ret;
+ return i915_gem_idle(dev);
}
void
@@ -4253,6 +4263,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 300aee3296c..f806fcc54e0 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -59,6 +59,16 @@ find_section(struct bdb_header *bdb, int section_id)
return NULL;
}
+static u16
+get_blocksize(void *p)
+{
+ u16 *block_ptr, block_size;
+
+ block_ptr = (u16 *)((char *)p - 2);
+ block_size = *block_ptr;
+ return block_size;
+}
+
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
struct lvds_dvo_timing *dvo_timing)
@@ -215,6 +225,41 @@ parse_general_features(struct drm_i915_private *dev_priv,
}
static void
+parse_general_definitions(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *general;
+ const int crt_bus_map_table[] = {
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ GPIOF,
+ };
+
+ /* Set sensible defaults in case we can't find the general block
+ or it is the wrong chipset */
+ dev_priv->crt_ddc_bus = -1;
+
+ general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (general) {
+ u16 block_size = get_blocksize(general);
+ if (block_size >= sizeof(*general)) {
+ int bus_pin = general->crt_ddc_gmbus_pin;
+ DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
+ if ((bus_pin >= 1) && (bus_pin <= 6)) {
+ dev_priv->crt_ddc_bus =
+ crt_bus_map_table[bus_pin-1];
+ }
+ } else {
+ DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
+ block_size);
+ }
+ }
+}
+
+static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
@@ -222,7 +267,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_general_definitions *p_defs;
struct child_device_config *p_child;
int i, child_device_num, count;
- u16 block_size, *block_ptr;
+ u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
@@ -240,8 +285,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
return;
}
/* get the block size of general definitions */
- block_ptr = (u16 *)((char *)p_defs - 2);
- block_size = *block_ptr;
+ block_size = get_blocksize(p_defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
sizeof(*p_child);
@@ -362,6 +406,7 @@ intel_init_bios(struct drm_device *dev)
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
+ parse_general_definitions(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4cf8e2e88a4..590f81c8f59 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -508,6 +508,7 @@ void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_output *intel_output;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 i2c_reg;
intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
@@ -527,8 +528,12 @@ void intel_crt_init(struct drm_device *dev)
/* Set up the DDC bus. */
if (IS_IGDNG(dev))
i2c_reg = PCH_GPIOA;
- else
+ else {
i2c_reg = GPIOA;
+ /* Use VBT information for CRT DDC if available */
+ if (dev_priv->crt_ddc_bus != -1)
+ i2c_reg = dev_priv->crt_ddc_bus;
+ }
intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
if (!intel_output->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
@@ -537,6 +542,10 @@ void intel_crt_init(struct drm_device *dev)
}
intel_output->type = INTEL_OUTPUT_ANALOG;
+ intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d6fce213341..748ed50c55c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -666,7 +666,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
intel_clock_t clock;
int err = target;
- if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
(I915_READ(LVDS)) != 0) {
/*
* For LVDS, if the panel is on, just rely on its current
@@ -2005,7 +2005,21 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
return;
}
-const static int latency_ns = 3000; /* default for non-igd platforms */
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+const static int latency_ns = 5000;
static int intel_get_fifo_size(struct drm_device *dev, int plane)
{
@@ -2396,7 +2410,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_sdvo) {
dpll |= DPLL_DVO_HIGH_SPEED;
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- if (IS_I945G(dev) || IS_I945GM(dev))
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
else if (IS_IGDNG(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
@@ -3170,7 +3184,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct intel_output *intel_output = to_intel_output(connector);
- if (type_mask & (1 << intel_output->type))
+ if (type_mask & intel_output->clone_mask)
index_mask |= (1 << entry);
entry++;
}
@@ -3218,30 +3232,30 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, PCH_DP_D);
} else if (IS_I9XX(dev)) {
- int found;
- u32 reg;
+ bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
+
if (!found && SUPPORTS_INTEGRATED_DP(dev))
intel_dp_init(dev, DP_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (IS_G4X(dev))
- reg = SDVOC;
- else
- reg = SDVOB;
- if (I915_READ(reg) & SDVO_DETECTED) {
+ if (I915_READ(SDVOB) & SDVO_DETECTED)
found = intel_sdvo_init(dev, SDVOC);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+
+ if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
+
+ if (SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
- if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ if (SUPPORTS_INTEGRATED_DP(dev))
intel_dp_init(dev, DP_C);
}
+
if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D);
} else
@@ -3253,51 +3267,10 @@ static void intel_setup_outputs(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct intel_output *intel_output = to_intel_output(connector);
struct drm_encoder *encoder = &intel_output->enc;
- int crtc_mask = 0, clone_mask = 0;
- /* valid crtcs */
- switch(intel_output->type) {
- case INTEL_OUTPUT_HDMI:
- crtc_mask = ((1 << 0)|
- (1 << 1));
- clone_mask = ((1 << INTEL_OUTPUT_HDMI));
- break;
- case INTEL_OUTPUT_DVO:
- case INTEL_OUTPUT_SDVO:
- crtc_mask = ((1 << 0)|
- (1 << 1));
- clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
- (1 << INTEL_OUTPUT_DVO) |
- (1 << INTEL_OUTPUT_SDVO));
- break;
- case INTEL_OUTPUT_ANALOG:
- crtc_mask = ((1 << 0)|
- (1 << 1));
- clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
- (1 << INTEL_OUTPUT_DVO) |
- (1 << INTEL_OUTPUT_SDVO));
- break;
- case INTEL_OUTPUT_LVDS:
- crtc_mask = (1 << 1);
- clone_mask = (1 << INTEL_OUTPUT_LVDS);
- break;
- case INTEL_OUTPUT_TVOUT:
- crtc_mask = ((1 << 0) |
- (1 << 1));
- clone_mask = (1 << INTEL_OUTPUT_TVOUT);
- break;
- case INTEL_OUTPUT_DISPLAYPORT:
- crtc_mask = ((1 << 0) |
- (1 << 1));
- clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
- break;
- case INTEL_OUTPUT_EDP:
- crtc_mask = (1 << 1);
- clone_mask = (1 << INTEL_OUTPUT_EDP);
- break;
- }
- encoder->possible_crtcs = crtc_mask;
- encoder->possible_clones = intel_connector_clones(dev, clone_mask);
+ encoder->possible_crtcs = intel_output->crtc_mask;
+ encoder->possible_clones = intel_connector_clones(dev,
+ intel_output->clone_mask);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a6ff15ac548..2b914d73207 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1254,6 +1254,18 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else
intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
+ if (output_reg == DP_B)
+ intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
+ else if (output_reg == DP_C)
+ intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
+ else if (output_reg == DP_D)
+ intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+
+ if (IS_eDP(intel_output)) {
+ intel_output->crtc_mask = (1 << 1);
+ intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+ } else
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d6f92ea1b55..26a6227c15f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -57,6 +57,25 @@
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
@@ -86,6 +105,8 @@ struct intel_output {
bool needs_tv_clock;
void *dev_priv;
void (*hot_plug)(struct intel_output *);
+ int crtc_mask;
+ int clone_mask;
};
struct intel_crtc {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 13bff20930e..a4d2606de77 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -435,14 +435,20 @@ void intel_dvo_init(struct drm_device *dev)
continue;
intel_output->type = INTEL_OUTPUT_DVO;
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
+ intel_output->clone_mask =
+ (1 << INTEL_DVO_TMDS_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
+ intel_output->clone_mask =
+ (1 << INTEL_DVO_LVDS_CLONE_BIT);
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1842290cded..fa304e13601 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -230,22 +230,28 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
/* Set up the DDC bus. */
- if (sdvox_reg == SDVOB)
+ if (sdvox_reg == SDVOB) {
+ intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
- else if (sdvox_reg == SDVOC)
+ } else if (sdvox_reg == SDVOC) {
+ intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
- else if (sdvox_reg == HDMIB)
+ } else if (sdvox_reg == HDMIB) {
+ intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
"HDMIB");
- else if (sdvox_reg == HDMIC)
+ } else if (sdvox_reg == HDMIC) {
+ intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
"HDMIC");
- else if (sdvox_reg == HDMID)
+ } else if (sdvox_reg == HDMID) {
+ intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
"HDMID");
-
+ }
if (!intel_output->ddc_bus)
goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3f445a80c55..8df02ef8926 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -916,6 +916,8 @@ void intel_lvds_init(struct drm_device *dev)
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
intel_output->type = INTEL_OUTPUT_LVDS;
+ intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
+ intel_output->crtc_mask = (1 << 1);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 5371d933255..d3b74ba62b4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1458,7 +1458,7 @@ intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
(SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
caps++;
if (sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0))
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
caps++;
if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
@@ -1967,6 +1967,9 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
intel_sdvo_set_colorimetry(intel_output,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ intel_output->clone_mask =
+ (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
}
} else if (flags & SDVO_OUTPUT_SVID0) {
@@ -1975,11 +1978,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
sdvo_priv->is_tv = true;
intel_output->needs_tv_clock = true;
+ intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
} else if (flags & SDVO_OUTPUT_RGB0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
} else if (flags & SDVO_OUTPUT_RGB1) {
sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
@@ -1991,12 +1997,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
sdvo_priv->is_lvds = true;
+ intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
} else if (flags & SDVO_OUTPUT_LVDS1) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
sdvo_priv->is_lvds = true;
+ intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
} else {
unsigned char bytes[2];
@@ -2009,6 +2019,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
bytes[0], bytes[1]);
ret = false;
}
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
if (ret && registered)
ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index da4ab4dc163..5b1c9e9fdba 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1718,6 +1718,7 @@ intel_tv_init(struct drm_device *dev)
if (!intel_output) {
return;
}
+
connector = &intel_output->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1729,6 +1730,8 @@ intel_tv_init(struct drm_device *dev)
drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
tv_priv = (struct intel_tv_priv *)(intel_output + 1);
intel_output->type = INTEL_OUTPUT_TVOUT;
+ intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_output->dev_priv = tv_priv;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f1ba8ff4113..68e728e8be4 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -254,6 +254,72 @@ void r100_mc_fini(struct radeon_device *rdev)
/*
+ * Interrupts
+ */
+int r100_irq_set(struct radeon_device *rdev)
+{
+ uint32_t tmp = 0;
+
+ if (rdev->irq.sw_int) {
+ tmp |= RADEON_SW_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ tmp |= RADEON_CRTC_VBLANK_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ tmp |= RADEON_CRTC2_VBLANK_MASK;
+ }
+ WREG32(RADEON_GEN_INT_CNTL, tmp);
+ return 0;
+}
+
+static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
+{
+ uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+ uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
+ RADEON_CRTC2_VBLANK_STAT;
+
+ if (irqs) {
+ WREG32(RADEON_GEN_INT_STATUS, irqs);
+ }
+ return irqs & irq_mask;
+}
+
+int r100_irq_process(struct radeon_device *rdev)
+{
+ uint32_t status;
+
+ status = r100_irq_ack(rdev);
+ if (!status) {
+ return IRQ_NONE;
+ }
+ while (status) {
+ /* SW interrupt */
+ if (status & RADEON_SW_INT_TEST) {
+ radeon_fence_process(rdev);
+ }
+ /* Vertical blank interrupts */
+ if (status & RADEON_CRTC_VBLANK_STAT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ }
+ if (status & RADEON_CRTC2_VBLANK_STAT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ }
+ status = r100_irq_ack(rdev);
+ }
+ return IRQ_HANDLED;
+}
+
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0)
+ return RREG32(RADEON_CRTC_CRNT_FRAME);
+ else
+ return RREG32(RADEON_CRTC2_CRNT_FRAME);
+}
+
+
+/*
* Fence emission
*/
void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -1025,6 +1091,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
tmp |= tile_flags;
ib[idx] = tmp;
break;
+ case RADEON_RB3D_ZPASS_ADDR:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+ break;
default:
/* FIXME: we don't want to allow anyothers packet */
break;
@@ -1556,26 +1632,6 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
r100_pll_errata_after_data(rdev);
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- if (reg < 0x10000)
- return readl(((void __iomem *)rdev->rmmio) + reg);
- else {
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- }
-}
-
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- if (reg < 0x10000)
- writel(v, ((void __iomem *)rdev->rmmio) + reg);
- else {
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- }
-}
-
int r100_init(struct radeon_device *rdev)
{
return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 9c8d41534a5..051bca6e3a4 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -83,8 +83,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
- mb();
}
+ mb();
}
int rv370_pcie_gart_enable(struct radeon_device *rdev)
@@ -448,6 +448,7 @@ void r300_gpu_init(struct radeon_device *rdev)
/* rv350,rv370,rv380 */
rdev->num_gb_pipes = 1;
}
+ rdev->num_z_pipes = 1;
gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
switch (rdev->num_gb_pipes) {
case 2:
@@ -486,7 +487,8 @@ void r300_gpu_init(struct radeon_device *rdev)
printk(KERN_WARNING "Failed to wait MC idle while "
"programming pipes. Bad things might happen.\n");
}
- DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
+ DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
+ rdev->num_gb_pipes, rdev->num_z_pipes);
}
int r300_ga_reset(struct radeon_device *rdev)
@@ -593,27 +595,6 @@ void r300_vram_info(struct radeon_device *rdev)
/*
- * Indirect registers accessor
- */
-uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- uint32_t r;
-
- WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
- (void)RREG32(RADEON_PCIE_INDEX);
- r = RREG32(RADEON_PCIE_DATA);
- return r;
-}
-
-void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
- (void)RREG32(RADEON_PCIE_INDEX);
- WREG32(RADEON_PCIE_DATA, (v));
- (void)RREG32(RADEON_PCIE_DATA);
-}
-
-/*
* PCIE Lanes
*/
@@ -1014,7 +995,7 @@ static const unsigned r300_reg_safe_bm[159] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
+ 0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
};
static int r300_packet0_check(struct radeon_cs_parser *p,
@@ -1403,6 +1384,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
track->textures[i].txdepth = tmp;
break;
+ case R300_ZB_ZPASS_ADDR:
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+ break;
+ case 0x4be8:
+ /* valid register only on RV530 */
+ if (p->rdev->family == CHIP_RV530)
+ break;
+ /* fallthrough do not move */
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
reg, idx);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index dea497a979f..97426a6f370 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -165,7 +165,18 @@ void r420_pipes_init(struct radeon_device *rdev)
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
- DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
+
+ if (rdev->family == CHIP_RV530) {
+ tmp = RREG32(RV530_GB_PIPE_SELECT2);
+ if ((tmp & 3) == 3)
+ rdev->num_z_pipes = 2;
+ else
+ rdev->num_z_pipes = 1;
+ } else
+ rdev->num_z_pipes = 1;
+
+ DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
+ rdev->num_gb_pipes, rdev->num_z_pipes);
}
void r420_gpu_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 036691b38cb..e1d5e0331e1 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -350,6 +350,7 @@
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
+#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
/* master controls */
@@ -438,14 +439,15 @@
# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4
# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff
-#define R500_DxMODE_INT_MASK 0x6540
-#define R500_D1MODE_INT_MASK (1<<0)
-#define R500_D2MODE_INT_MASK (1<<8)
-
#define AVIVO_D1MODE_DATA_FORMAT 0x6528
# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
+#define AVIVO_D1MODE_VBLANK_STATUS 0x6534
+# define AVIVO_VBLANK_ACK (1 << 4)
#define AVIVO_D1MODE_VLINE_START_END 0x6538
+#define AVIVO_DxMODE_INT_MASK 0x6540
+# define AVIVO_D1MODE_INT_MASK (1 << 0)
+# define AVIVO_D2MODE_INT_MASK (1 << 8)
#define AVIVO_D1MODE_VIEWPORT_START 0x6580
#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
@@ -475,6 +477,7 @@
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
+#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
#define AVIVO_D2GRPH_ENABLE 0x6900
@@ -497,6 +500,7 @@
#define AVIVO_D2CUR_SIZE 0x6c10
#define AVIVO_D2CUR_POSITION 0x6c14
+#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34
#define AVIVO_D2MODE_VLINE_START_END 0x6d38
#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
@@ -748,4 +752,8 @@
# define AVIVO_I2C_EN (1 << 0)
# define AVIVO_I2C_RESET (1 << 8)
+#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
+# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
+# define AVIVO_D2_VBLANK_INTERRUPT (1 << 5)
+
#endif
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 09fb0b6ec7d..ebd6b0f7bdf 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -177,7 +177,6 @@ void r520_gpu_init(struct radeon_device *rdev)
*/
/* workaround for RV530 */
if (rdev->family == CHIP_RV530) {
- WREG32(0x4124, 1);
WREG32(0x4128, 0xFF);
}
r420_pipes_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b1d945b8ed6..b519fb2fecb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
uint64_t *gpu_addr);
void radeon_object_unpin(struct radeon_object *robj);
int radeon_object_wait(struct radeon_object *robj);
+int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
int radeon_object_evict_vram(struct radeon_device *rdev);
int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
void radeon_object_force_delete(struct radeon_device *rdev);
@@ -574,6 +575,7 @@ struct radeon_asic {
void (*ring_start)(struct radeon_device *rdev);
int (*irq_set)(struct radeon_device *rdev);
int (*irq_process)(struct radeon_device *rdev);
+ u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
int (*cs_parse)(struct radeon_cs_parser *p);
int (*copy_blit)(struct radeon_device *rdev,
@@ -653,6 +655,7 @@ struct radeon_device {
int usec_timeout;
enum radeon_pll_errata pll_errata;
int num_gb_pipes;
+ int num_z_pipes;
int disp_priority;
/* BIOS */
uint8_t *bios;
@@ -666,14 +669,11 @@ struct radeon_device {
resource_size_t rmmio_base;
resource_size_t rmmio_size;
void *rmmio;
- radeon_rreg_t mm_rreg;
- radeon_wreg_t mm_wreg;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
radeon_rreg_t pll_rreg;
radeon_wreg_t pll_wreg;
- radeon_rreg_t pcie_rreg;
- radeon_wreg_t pcie_wreg;
+ uint32_t pcie_reg_mask;
radeon_rreg_t pciep_rreg;
radeon_wreg_t pciep_wreg;
struct radeon_clock clock;
@@ -705,22 +705,42 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
+static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ if (reg < 0x10000)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ }
+}
+
+static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ if (reg < 0x10000)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ }
+}
+
/*
* Registers read & write functions.
*/
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
-#define RREG32(reg) rdev->mm_rreg(rdev, (reg))
-#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
-#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg))
-#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v))
+#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
+#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -736,6 +756,24 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
WREG32_PLL(reg, tmp_); \
} while (0)
+/*
+ * Indirect registers accessor
+ */
+static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+ r = RREG32(RADEON_PCIE_DATA);
+ return r;
+}
+
+static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+ WREG32(RADEON_PCIE_DATA, (v));
+}
+
void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -862,6 +900,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
+#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 9a75876e0c3..93d8f888930 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -49,6 +49,7 @@ void r100_vram_info(struct radeon_device *rdev);
int r100_gpu_reset(struct radeon_device *rdev);
int r100_mc_init(struct radeon_device *rdev);
void r100_mc_fini(struct radeon_device *rdev);
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
int r100_wb_init(struct radeon_device *rdev);
void r100_wb_fini(struct radeon_device *rdev);
int r100_gart_enable(struct radeon_device *rdev);
@@ -96,6 +97,7 @@ static struct radeon_asic r100_asic = {
.ring_start = &r100_ring_start,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
.cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
@@ -156,6 +158,7 @@ static struct radeon_asic r300_asic = {
.ring_start = &r300_ring_start,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
@@ -196,6 +199,7 @@ static struct radeon_asic r420_asic = {
.ring_start = &r300_ring_start,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
@@ -243,6 +247,7 @@ static struct radeon_asic rs400_asic = {
.ring_start = &r300_ring_start,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
@@ -261,11 +266,14 @@ static struct radeon_asic rs400_asic = {
/*
* rs600.
*/
+int rs600_init(struct radeon_device *dev);
void rs600_errata(struct radeon_device *rdev);
void rs600_vram_info(struct radeon_device *rdev);
int rs600_mc_init(struct radeon_device *rdev);
void rs600_mc_fini(struct radeon_device *rdev);
int rs600_irq_set(struct radeon_device *rdev);
+int rs600_irq_process(struct radeon_device *rdev);
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
int rs600_gart_enable(struct radeon_device *rdev);
void rs600_gart_disable(struct radeon_device *rdev);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
@@ -274,7 +282,7 @@ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs600_asic = {
- .init = &r300_init,
+ .init = &rs600_init,
.errata = &rs600_errata,
.vram_info = &rs600_vram_info,
.gpu_reset = &r300_gpu_reset,
@@ -291,7 +299,8 @@ static struct radeon_asic rs600_asic = {
.cp_disable = &r100_cp_disable,
.ring_start = &r300_ring_start,
.irq_set = &rs600_irq_set,
- .irq_process = &r100_irq_process,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
@@ -316,7 +325,7 @@ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
static struct radeon_asic rs690_asic = {
- .init = &r300_init,
+ .init = &rs600_init,
.errata = &rs690_errata,
.vram_info = &rs690_vram_info,
.gpu_reset = &r300_gpu_reset,
@@ -333,7 +342,8 @@ static struct radeon_asic rs690_asic = {
.cp_disable = &r100_cp_disable,
.ring_start = &r300_ring_start,
.irq_set = &rs600_irq_set,
- .irq_process = &r100_irq_process,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
@@ -381,8 +391,9 @@ static struct radeon_asic rv515_asic = {
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &rv515_ring_start,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
@@ -423,8 +434,9 @@ static struct radeon_asic r520_asic = {
.cp_fini = &r100_cp_fini,
.cp_disable = &r100_cp_disable,
.ring_start = &rv515_ring_start,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index afc4db280b9..2a027e00762 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -685,23 +685,15 @@ static const uint32_t default_tvdac_adj[CHIP_LAST] = {
0x00780000, /* rs480 */
};
-static struct radeon_encoder_tv_dac
- *radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev)
+static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev,
+ struct radeon_encoder_tv_dac *tv_dac)
{
- struct radeon_encoder_tv_dac *tv_dac = NULL;
-
- tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
-
- if (!tv_dac)
- return NULL;
-
tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
tv_dac->ps2_tvdac_adj = 0x00880000;
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
-
- return tv_dac;
+ return;
}
struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
@@ -713,19 +705,18 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
uint16_t dac_info;
uint8_t rev, bg, dac;
struct radeon_encoder_tv_dac *tv_dac = NULL;
+ int found = 0;
+
+ tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+ if (!tv_dac)
+ return NULL;
if (rdev->bios == NULL)
- return radeon_legacy_get_tv_dac_info_from_table(rdev);
+ goto out;
/* first check TV table */
dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (dac_info) {
- tv_dac =
- kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
-
- if (!tv_dac)
- return NULL;
-
rev = RBIOS8(dac_info + 0x3);
if (rev > 4) {
bg = RBIOS8(dac_info + 0xc) & 0xf;
@@ -739,6 +730,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
bg = RBIOS8(dac_info + 0x10) & 0xf;
dac = RBIOS8(dac_info + 0x11) & 0xf;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ found = 1;
} else if (rev > 1) {
bg = RBIOS8(dac_info + 0xc) & 0xf;
dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
@@ -751,22 +743,15 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
bg = RBIOS8(dac_info + 0xe) & 0xf;
dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ found = 1;
}
-
tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
-
- } else {
+ }
+ if (!found) {
/* then check CRT table */
dac_info =
combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
- tv_dac =
- kzalloc(sizeof(struct radeon_encoder_tv_dac),
- GFP_KERNEL);
-
- if (!tv_dac)
- return NULL;
-
rev = RBIOS8(dac_info) & 0x3;
if (rev < 2) {
bg = RBIOS8(dac_info + 0x3) & 0xf;
@@ -775,6 +760,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
(bg << 16) | (dac << 20);
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ found = 1;
} else {
bg = RBIOS8(dac_info + 0x4) & 0xf;
dac = RBIOS8(dac_info + 0x5) & 0xf;
@@ -782,13 +768,17 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
(bg << 16) | (dac << 20);
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+ found = 1;
}
} else {
DRM_INFO("No TV DAC info found in BIOS\n");
- return radeon_legacy_get_tv_dac_info_from_table(rdev);
}
}
+out:
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
+
return tv_dac;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index d8356827ef1..7a52c461145 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -406,6 +406,15 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
{
uint32_t gb_tile_config, gb_pipe_sel = 0;
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
+ uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2);
+ if ((z_pipe_sel & 3) == 3)
+ dev_priv->num_z_pipes = 2;
+ else
+ dev_priv->num_z_pipes = 1;
+ } else
+ dev_priv->num_z_pipes = 1;
+
/* RS4xx/RS6xx/R4xx/R5xx */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 9ff6dcb97f9..7693f7c67bd 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -225,25 +225,18 @@ void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
void radeon_register_accessor_init(struct radeon_device *rdev)
{
- rdev->mm_rreg = &r100_mm_rreg;
- rdev->mm_wreg = &r100_mm_wreg;
rdev->mc_rreg = &radeon_invalid_rreg;
rdev->mc_wreg = &radeon_invalid_wreg;
rdev->pll_rreg = &radeon_invalid_rreg;
rdev->pll_wreg = &radeon_invalid_wreg;
- rdev->pcie_rreg = &radeon_invalid_rreg;
- rdev->pcie_wreg = &radeon_invalid_wreg;
rdev->pciep_rreg = &radeon_invalid_rreg;
rdev->pciep_wreg = &radeon_invalid_wreg;
/* Don't change order as we are overridding accessor. */
if (rdev->family < CHIP_RV515) {
- rdev->pcie_rreg = &rv370_pcie_rreg;
- rdev->pcie_wreg = &rv370_pcie_wreg;
- }
- if (rdev->family >= CHIP_RV515) {
- rdev->pcie_rreg = &rv515_pcie_rreg;
- rdev->pcie_wreg = &rv515_pcie_wreg;
+ rdev->pcie_reg_mask = 0xff;
+ } else {
+ rdev->pcie_reg_mask = 0x7ff;
}
/* FIXME: not sure here */
if (rdev->family <= CHIP_R580) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 3933f8216a3..6fa32dac4e9 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -100,9 +100,10 @@
* 1.28- Add support for VBL on CRTC2
* 1.29- R500 3D cmd buffer support
* 1.30- Add support for occlusion queries
+ * 1.31- Add support for num Z pipes from GET_PARAM
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 30
+#define DRIVER_MINOR 31
#define DRIVER_PATCHLEVEL 0
/*
@@ -329,6 +330,7 @@ typedef struct drm_radeon_private {
resource_size_t fb_aper_offset;
int num_gb_pipes;
+ int num_z_pipes;
int track_flush;
drm_local_map_t *mmio;
@@ -689,6 +691,7 @@ extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pciga
/* pipe config regs */
#define R400_GB_PIPE_SELECT 0x402c
+#define RV530_GB_PIPE_SELECT2 0x4124
#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
#define R300_GB_TILE_CONFIG 0x4018
# define R300_ENABLE_TILING (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3206c0ad7b6..ec383edf5f3 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -574,6 +574,8 @@ int radeonfb_create(struct radeon_device *rdev,
goto out_unref;
}
+ memset_io(fbptr, 0, aligned_size);
+
strcpy(info->fix.id, "radeondrmfb");
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index cded5180c75..d880edf254d 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -262,8 +262,34 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- /* FIXME: implement */
- return 0;
+ struct drm_radeon_gem_busy *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_object *robj;
+ int r;
+ uint32_t cur_placement;
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ return -EINVAL;
+ }
+ robj = gobj->driver_private;
+ r = radeon_object_busy_domain(robj, &cur_placement);
+ switch (cur_placement) {
+ case TTM_PL_VRAM:
+ args->domain = RADEON_GEM_DOMAIN_VRAM;
+ break;
+ case TTM_PL_TT:
+ args->domain = RADEON_GEM_DOMAIN_GTT;
+ break;
+ case TTM_PL_SYSTEM:
+ args->domain = RADEON_GEM_DOMAIN_CPU;
+ default:
+ break;
+ }
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gobj);
+ mutex_unlock(&dev->struct_mutex);
+ return r;
}
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 491d569deb0..9805e4b6ca1 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,60 +32,6 @@
#include "radeon.h"
#include "atom.h"
-static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
-{
- uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
- uint32_t irq_mask = RADEON_SW_INT_TEST;
-
- if (irqs) {
- WREG32(RADEON_GEN_INT_STATUS, irqs);
- }
- return irqs & irq_mask;
-}
-
-int r100_irq_set(struct radeon_device *rdev)
-{
- uint32_t tmp = 0;
-
- if (rdev->irq.sw_int) {
- tmp |= RADEON_SW_INT_ENABLE;
- }
- /* Todo go through CRTC and enable vblank int or not */
- WREG32(RADEON_GEN_INT_CNTL, tmp);
- return 0;
-}
-
-int r100_irq_process(struct radeon_device *rdev)
-{
- uint32_t status;
-
- status = r100_irq_ack(rdev);
- if (!status) {
- return IRQ_NONE;
- }
- while (status) {
- /* SW interrupt */
- if (status & RADEON_SW_INT_TEST) {
- radeon_fence_process(rdev);
- }
- status = r100_irq_ack(rdev);
- }
- return IRQ_HANDLED;
-}
-
-int rs600_irq_set(struct radeon_device *rdev)
-{
- uint32_t tmp = 0;
-
- if (rdev->irq.sw_int) {
- tmp |= RADEON_SW_INT_ENABLE;
- }
- WREG32(RADEON_GEN_INT_CNTL, tmp);
- /* Todo go through CRTC and enable vblank int or not */
- WREG32(R500_DxMODE_INT_MASK, 0);
- return 0;
-}
-
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3357110e30c..dce09ada32b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -95,6 +95,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_NUM_GB_PIPES:
value = rdev->num_gb_pipes;
break;
+ case RADEON_INFO_NUM_Z_PIPES:
+ value = rdev->num_z_pipes;
+ break;
default:
DRM_DEBUG("Invalid request %d\n", info->request);
return -EINVAL;
@@ -141,19 +144,42 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
*/
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
{
- /* FIXME: implement */
- return 0;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (crtc < 0 || crtc > 1) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ return radeon_get_vblank_counter(rdev, crtc);
}
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{
- /* FIXME: implement */
- return 0;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (crtc < 0 || crtc > 1) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ rdev->irq.crtc_vblank_int[crtc] = true;
+
+ return radeon_irq_set(rdev);
}
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{
- /* FIXME: implement */
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (crtc < 0 || crtc > 1) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return;
+ }
+
+ rdev->irq.crtc_vblank_int[crtc] = false;
+
+ radeon_irq_set(rdev);
}
@@ -295,5 +321,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 7d06dc98a42..0da72f18fd3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -310,10 +310,13 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
}
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
else {
@@ -323,10 +326,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
}
break;
}
-
- if (mode != DRM_MODE_DPMS_OFF) {
- radeon_crtc_load_lut(crtc);
- }
}
/* properly set crtc bpp when using atombios */
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 34d0f58eb94..9322675ef6d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1066,6 +1066,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ encoder->possible_crtcs = 0x1;
drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
if (rdev->is_atom_bios)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index e98cae3bf4a..b85fb83d7ae 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -316,6 +316,25 @@ int radeon_object_wait(struct radeon_object *robj)
return r;
}
+int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
+{
+ int r = 0;
+
+ r = radeon_object_reserve(robj, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("radeon: failed to reserve object for waiting.\n");
+ return r;
+ }
+ spin_lock(&robj->tobj.lock);
+ *cur_placement = robj->tobj.mem.mem_type;
+ if (robj->tobj.sync_obj) {
+ r = ttm_bo_wait(&robj->tobj, true, true, true);
+ }
+ spin_unlock(&robj->tobj.lock);
+ radeon_object_unreserve(robj);
+ return r;
+}
+
int radeon_object_evict_vram(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP) {
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index e1b61857446..4df43f62c67 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -982,12 +982,15 @@
# define RS400_TMDS2_PLLRST (1 << 1)
#define RADEON_GEN_INT_CNTL 0x0040
+# define RADEON_CRTC_VBLANK_MASK (1 << 0)
+# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
-# define RADEON_VSYNC_INT_AK (1 << 2)
-# define RADEON_VSYNC_INT (1 << 2)
-# define RADEON_VSYNC2_INT_AK (1 << 6)
-# define RADEON_VSYNC2_INT (1 << 6)
+# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
+# define RADEON_CRTC_VBLANK_STAT (1 << 0)
+# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
+# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
@@ -2334,6 +2337,9 @@
# define RADEON_RE_WIDTH_SHIFT 0
# define RADEON_RE_HEIGHT_SHIFT 16
+#define RADEON_RB3D_ZPASS_DATA 0x3290
+#define RADEON_RB3D_ZPASS_ADDR 0x3294
+
#define RADEON_SE_CNTL 0x1c4c
# define RADEON_FFACE_CULL_CW (0 << 0)
# define RADEON_FFACE_CULL_CCW (1 << 0)
@@ -3568,4 +3574,6 @@
#define RADEON_SCRATCH_REG4 0x15f0
#define RADEON_SCRATCH_REG5 0x15f4
+#define RV530_GB_PIPE_SELECT2 0x4124
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 46645f3e032..2882f40d5ec 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3081,6 +3081,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
case RADEON_PARAM_NUM_GB_PIPES:
value = dev_priv->num_gb_pipes;
break;
+ case RADEON_PARAM_NUM_Z_PIPES:
+ value = dev_priv->num_z_pipes;
+ break;
default:
DRM_DEBUG("Invalid parameter %d\n", param->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index bbea6dee4a9..02fd11aad6a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -240,6 +240,88 @@ void rs600_mc_fini(struct radeon_device *rdev)
/*
+ * Interrupts
+ */
+int rs600_irq_set(struct radeon_device *rdev)
+{
+ uint32_t tmp = 0;
+ uint32_t mode_int = 0;
+
+ if (rdev->irq.sw_int) {
+ tmp |= RADEON_SW_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ tmp |= AVIVO_DISPLAY_INT_STATUS;
+ mode_int |= AVIVO_D1MODE_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ tmp |= AVIVO_DISPLAY_INT_STATUS;
+ mode_int |= AVIVO_D2MODE_INT_MASK;
+ }
+ WREG32(RADEON_GEN_INT_CNTL, tmp);
+ WREG32(AVIVO_DxMODE_INT_MASK, mode_int);
+ return 0;
+}
+
+static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
+{
+ uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+ uint32_t irq_mask = RADEON_SW_INT_TEST;
+
+ if (irqs & AVIVO_DISPLAY_INT_STATUS) {
+ *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS);
+ if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
+ WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
+ }
+ if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
+ WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
+ }
+ } else {
+ *r500_disp_int = 0;
+ }
+
+ if (irqs) {
+ WREG32(RADEON_GEN_INT_STATUS, irqs);
+ }
+ return irqs & irq_mask;
+}
+
+int rs600_irq_process(struct radeon_device *rdev)
+{
+ uint32_t status;
+ uint32_t r500_disp_int;
+
+ status = rs600_irq_ack(rdev, &r500_disp_int);
+ if (!status && !r500_disp_int) {
+ return IRQ_NONE;
+ }
+ while (status || r500_disp_int) {
+ /* SW interrupt */
+ if (status & RADEON_SW_INT_TEST) {
+ radeon_fence_process(rdev);
+ }
+ /* Vertical blank interrupts */
+ if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ }
+ if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ }
+ status = rs600_irq_ack(rdev, &r500_disp_int);
+ }
+ return IRQ_HANDLED;
+}
+
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+ if (crtc == 0)
+ return RREG32(AVIVO_D1CRTC_FRAME_COUNT);
+ else
+ return RREG32(AVIVO_D2CRTC_FRAME_COUNT);
+}
+
+
+/*
* Global GPU functions
*/
void rs600_disable_vga(struct radeon_device *rdev)
@@ -327,3 +409,68 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
((reg) & RS600_MC_ADDR_MASK));
WREG32(RS600_MC_DATA, v);
}
+
+static const unsigned rs600_reg_safe_bm[219] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
+ 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
+ 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
+ 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
+ 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
+ 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+};
+
+int rs600_init(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 839595b0072..879882533e4 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -652,3 +652,4 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
WREG32(RS690_MC_DATA, v);
WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
}
+
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index fd8f3ca716e..0566fb67e46 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -400,25 +400,6 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
WREG32(MC_IND_INDEX, 0);
}
-uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- uint32_t r;
-
- WREG32(PCIE_INDEX, ((reg) & 0x7ff));
- (void)RREG32(PCIE_INDEX);
- r = RREG32(PCIE_DATA);
- return r;
-}
-
-void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- WREG32(PCIE_INDEX, ((reg) & 0x7ff));
- (void)RREG32(PCIE_INDEX);
- WREG32(PCIE_DATA, (v));
- (void)RREG32(PCIE_DATA);
-}
-
-
/*
* Debugfs info
*/
@@ -527,7 +508,7 @@ static const unsigned r500_reg_safe_bm[219] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF,
+ 0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d258b02aef4..827da085813 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -674,7 +674,14 @@ omap_i2c_isr(int this_irq, void *dev_id)
err = 0;
complete:
- omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat);
+ /*
+ * Ack the stat in one go, but [R/X]DR and [R/X]RDY should be
+ * acked after the data operation is complete.
+ * Ref: TRM SWPU114Q Figure 18-31
+ */
+ omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat &
+ ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
+ OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
if (stat & OMAP_I2C_STAT_NACK) {
err |= OMAP_I2C_STAT_NACK;
@@ -687,6 +694,9 @@ complete:
}
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
OMAP_I2C_STAT_AL)) {
+ omap_i2c_ack_stat(dev, stat &
+ (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
+ OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
omap_i2c_complete_cmd(dev, err);
return IRQ_HANDLED;
}
@@ -774,7 +784,7 @@ complete:
* memory to the I2C interface.
*/
- if (cpu_is_omap34xx()) {
+ if (dev->rev <= OMAP_I2C_REV_ON_3430) {
while (!(stat & OMAP_I2C_STAT_XUDF)) {
if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 182e711318b..d2728a28a8d 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -117,7 +117,8 @@ enum stu300_error {
STU300_ERROR_NONE = 0,
STU300_ERROR_ACKNOWLEDGE_FAILURE,
STU300_ERROR_BUS_ERROR,
- STU300_ERROR_ARBITRATION_LOST
+ STU300_ERROR_ARBITRATION_LOST,
+ STU300_ERROR_UNKNOWN
};
/* timeout waiting for the controller to respond */
@@ -127,7 +128,7 @@ enum stu300_error {
* The number of address send athemps tried before giving up.
* If the first one failes it seems like 5 to 8 attempts are required.
*/
-#define NUM_ADDR_RESEND_ATTEMPTS 10
+#define NUM_ADDR_RESEND_ATTEMPTS 12
/* I2C clock speed, in Hz 0-400kHz*/
static unsigned int scl_frequency = 100000;
@@ -149,6 +150,7 @@ module_param(scl_frequency, uint, 0644);
* @msg_index: index of current message
* @msg_len: length of current message
*/
+
struct stu300_dev {
struct platform_device *pdev;
struct i2c_adapter adapter;
@@ -188,6 +190,27 @@ static inline u32 stu300_r8(void __iomem *address)
return readl(address) & 0x000000FFU;
}
+static void stu300_irq_enable(struct stu300_dev *dev)
+{
+ u32 val;
+ val = stu300_r8(dev->virtbase + I2C_CR);
+ val |= I2C_CR_INTERRUPT_ENABLE;
+ /* Twice paranoia (possible HW glitch) */
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+}
+
+static void stu300_irq_disable(struct stu300_dev *dev)
+{
+ u32 val;
+ val = stu300_r8(dev->virtbase + I2C_CR);
+ val &= ~I2C_CR_INTERRUPT_ENABLE;
+ /* Twice paranoia (possible HW glitch) */
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+ stu300_wr8(val, dev->virtbase + I2C_CR);
+}
+
+
/*
* Tells whether a certain event or events occurred in
* response to a command. The events represent states in
@@ -196,9 +219,10 @@ static inline u32 stu300_r8(void __iomem *address)
* documentation and can only be treated as abstract state
* machine states.
*
- * @ret 0 = event has not occurred, any other value means
- * the event occurred.
+ * @ret 0 = event has not occurred or unknown error, any
+ * other value means the correct event occurred or an error.
*/
+
static int stu300_event_occurred(struct stu300_dev *dev,
enum stu300_event mr_event) {
u32 status1;
@@ -206,11 +230,28 @@ static int stu300_event_occurred(struct stu300_dev *dev,
/* What event happened? */
status1 = stu300_r8(dev->virtbase + I2C_SR1);
+
if (!(status1 & I2C_SR1_EVF_IND))
/* No event at all */
return 0;
+
status2 = stu300_r8(dev->virtbase + I2C_SR2);
+ /* Block any multiple interrupts */
+ stu300_irq_disable(dev);
+
+ /* Check for errors first */
+ if (status2 & I2C_SR2_AF_IND) {
+ dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
+ return 1;
+ } else if (status2 & I2C_SR2_BERR_IND) {
+ dev->cmd_err = STU300_ERROR_BUS_ERROR;
+ return 1;
+ } else if (status2 & I2C_SR2_ARLO_IND) {
+ dev->cmd_err = STU300_ERROR_ARBITRATION_LOST;
+ return 1;
+ }
+
switch (mr_event) {
case STU300_EVENT_1:
if (status1 & I2C_SR1_ADSL_IND)
@@ -221,10 +262,6 @@ static int stu300_event_occurred(struct stu300_dev *dev,
case STU300_EVENT_7:
case STU300_EVENT_8:
if (status1 & I2C_SR1_BTF_IND) {
- if (status2 & I2C_SR2_AF_IND)
- dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
- else if (status2 & I2C_SR2_BERR_IND)
- dev->cmd_err = STU300_ERROR_BUS_ERROR;
return 1;
}
break;
@@ -240,8 +277,6 @@ static int stu300_event_occurred(struct stu300_dev *dev,
case STU300_EVENT_6:
if (status2 & I2C_SR2_ENDAD_IND) {
/* First check for any errors */
- if (status2 & I2C_SR2_AF_IND)
- dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
return 1;
}
break;
@@ -252,8 +287,15 @@ static int stu300_event_occurred(struct stu300_dev *dev,
default:
break;
}
- if (status2 & I2C_SR2_ARLO_IND)
- dev->cmd_err = STU300_ERROR_ARBITRATION_LOST;
+ /* If we get here, we're on thin ice.
+ * Here we are in a status where we have
+ * gotten a response that does not match
+ * what we requested.
+ */
+ dev->cmd_err = STU300_ERROR_UNKNOWN;
+ dev_err(&dev->pdev->dev,
+ "Unhandled interrupt! %d sr1: 0x%x sr2: 0x%x\n",
+ mr_event, status1, status2);
return 0;
}
@@ -262,21 +304,20 @@ static irqreturn_t stu300_irh(int irq, void *data)
struct stu300_dev *dev = data;
int res;
+ /* Just make sure that the block is clocked */
+ clk_enable(dev->clk);
+
/* See if this was what we were waiting for */
spin_lock(&dev->cmd_issue_lock);
- if (dev->cmd_event != STU300_EVENT_NONE) {
- res = stu300_event_occurred(dev, dev->cmd_event);
- if (res || dev->cmd_err != STU300_ERROR_NONE) {
- u32 val;
-
- complete(&dev->cmd_complete);
- /* Block any multiple interrupts */
- val = stu300_r8(dev->virtbase + I2C_CR);
- val &= ~I2C_CR_INTERRUPT_ENABLE;
- stu300_wr8(val, dev->virtbase + I2C_CR);
- }
- }
+
+ res = stu300_event_occurred(dev, dev->cmd_event);
+ if (res || dev->cmd_err != STU300_ERROR_NONE)
+ complete(&dev->cmd_complete);
+
spin_unlock(&dev->cmd_issue_lock);
+
+ clk_disable(dev->clk);
+
return IRQ_HANDLED;
}
@@ -308,7 +349,6 @@ static int stu300_start_and_await_event(struct stu300_dev *dev,
stu300_wr8(cr_value, dev->virtbase + I2C_CR);
ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
STU300_TIMEOUT);
-
if (ret < 0) {
dev_err(&dev->pdev->dev,
"wait_for_completion_interruptible_timeout() "
@@ -342,7 +382,6 @@ static int stu300_await_event(struct stu300_dev *dev,
enum stu300_event mr_event)
{
int ret;
- u32 val;
if (unlikely(irqs_disabled())) {
/* TODO: implement polling for this case if need be. */
@@ -354,36 +393,18 @@ static int stu300_await_event(struct stu300_dev *dev,
/* Is it already here? */
spin_lock_irq(&dev->cmd_issue_lock);
dev->cmd_err = STU300_ERROR_NONE;
- if (stu300_event_occurred(dev, mr_event)) {
- spin_unlock_irq(&dev->cmd_issue_lock);
- goto exit_await_check_err;
- }
- init_completion(&dev->cmd_complete);
- dev->cmd_err = STU300_ERROR_NONE;
dev->cmd_event = mr_event;
- /* Turn on the I2C interrupt for current operation */
- val = stu300_r8(dev->virtbase + I2C_CR);
- val |= I2C_CR_INTERRUPT_ENABLE;
- stu300_wr8(val, dev->virtbase + I2C_CR);
-
- /* Twice paranoia (possible HW glitch) */
- stu300_wr8(val, dev->virtbase + I2C_CR);
+ init_completion(&dev->cmd_complete);
- /* Check again: is it already here? */
- if (unlikely(stu300_event_occurred(dev, mr_event))) {
- /* Disable IRQ again. */
- val &= ~I2C_CR_INTERRUPT_ENABLE;
- stu300_wr8(val, dev->virtbase + I2C_CR);
- spin_unlock_irq(&dev->cmd_issue_lock);
- goto exit_await_check_err;
- }
+ /* Turn on the I2C interrupt for current operation */
+ stu300_irq_enable(dev);
/* Unlock the command block and wait for the event to occur */
spin_unlock_irq(&dev->cmd_issue_lock);
+
ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
STU300_TIMEOUT);
-
if (ret < 0) {
dev_err(&dev->pdev->dev,
"wait_for_completion_interruptible_timeout()"
@@ -401,7 +422,6 @@ static int stu300_await_event(struct stu300_dev *dev,
return -ETIMEDOUT;
}
- exit_await_check_err:
if (dev->cmd_err != STU300_ERROR_NONE) {
if (mr_event != STU300_EVENT_6) {
dev_err(&dev->pdev->dev, "controller "
@@ -457,18 +477,19 @@ struct stu300_clkset {
};
static const struct stu300_clkset stu300_clktable[] = {
- { 0, 0xFFU },
- { 2500000, I2C_OAR2_FR_25_10MHZ },
- { 10000000, I2C_OAR2_FR_10_1667MHZ },
- { 16670000, I2C_OAR2_FR_1667_2667MHZ },
- { 26670000, I2C_OAR2_FR_2667_40MHZ },
- { 40000000, I2C_OAR2_FR_40_5333MHZ },
- { 53330000, I2C_OAR2_FR_5333_66MHZ },
- { 66000000, I2C_OAR2_FR_66_80MHZ },
- { 80000000, I2C_OAR2_FR_80_100MHZ },
+ { 0, 0xFFU },
+ { 2500000, I2C_OAR2_FR_25_10MHZ },
+ { 10000000, I2C_OAR2_FR_10_1667MHZ },
+ { 16670000, I2C_OAR2_FR_1667_2667MHZ },
+ { 26670000, I2C_OAR2_FR_2667_40MHZ },
+ { 40000000, I2C_OAR2_FR_40_5333MHZ },
+ { 53330000, I2C_OAR2_FR_5333_66MHZ },
+ { 66000000, I2C_OAR2_FR_66_80MHZ },
+ { 80000000, I2C_OAR2_FR_80_100MHZ },
{ 100000000, 0xFFU },
};
+
static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
{
@@ -494,10 +515,10 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
if (dev->speed > 100000)
/* Fast Mode I2C */
- val = ((clkrate/dev->speed)-9)/3;
+ val = ((clkrate/dev->speed) - 9)/3 + 1;
else
/* Standard Mode I2C */
- val = ((clkrate/dev->speed)-7)/2;
+ val = ((clkrate/dev->speed) - 7)/2 + 1;
/* According to spec the divider must be > 2 */
if (val < 0x002) {
@@ -557,6 +578,7 @@ static int stu300_init_hw(struct stu300_dev *dev)
*/
clkrate = clk_get_rate(dev->clk);
ret = stu300_set_clk(dev, clkrate);
+
if (ret)
return ret;
/*
@@ -641,7 +663,6 @@ static int stu300_xfer_msg(struct i2c_adapter *adap,
int attempts = 0;
struct stu300_dev *dev = i2c_get_adapdata(adap);
-
clk_enable(dev->clk);
/* Remove this if (0) to trace each and every message. */
@@ -715,14 +736,15 @@ static int stu300_xfer_msg(struct i2c_adapter *adap,
if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) {
dev_dbg(&dev->pdev->dev, "managed to get address "
- "through after %d attempts\n", attempts);
+ "through after %d attempts\n", attempts);
} else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) {
dev_dbg(&dev->pdev->dev, "I give up, tried %d times "
- "to resend address.\n",
- NUM_ADDR_RESEND_ATTEMPTS);
+ "to resend address.\n",
+ NUM_ADDR_RESEND_ATTEMPTS);
goto exit_disable;
}
+
if (msg->flags & I2C_M_RD) {
/* READ: we read the actual bytes one at a time */
for (i = 0; i < msg->len; i++) {
@@ -804,8 +826,10 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
{
int ret = -1;
int i;
+
struct stu300_dev *dev = i2c_get_adapdata(adap);
dev->msg_len = num;
+
for (i = 0; i < num; i++) {
/*
* Another driver appears to send stop for each message,
@@ -817,6 +841,7 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
dev->msg_index = i;
ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1)));
+
if (ret != 0) {
num = ret;
break;
@@ -845,6 +870,7 @@ stu300_probe(struct platform_device *pdev)
struct resource *res;
int bus_nr;
int ret = 0;
+ char clk_name[] = "I2C0";
dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL);
if (!dev) {
@@ -854,7 +880,8 @@ stu300_probe(struct platform_device *pdev)
}
bus_nr = pdev->id;
- dev->clk = clk_get(&pdev->dev, NULL);
+ clk_name[3] += (char)bus_nr;
+ dev->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(dev->clk)) {
ret = PTR_ERR(dev->clk);
dev_err(&pdev->dev, "could not retrieve i2c bus clock\n");
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index 923cbfe259d..6396c3ad325 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -177,6 +177,7 @@ static const struct pci_device_id atiixp_pci_tbl[] = {
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 },
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 527908ff298..063b933d864 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = {
PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 8f9509e1ebf..55d093a36ae 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
* In either case, must tell the provider to reject.
*/
cm_id_priv->state = IW_CM_STATE_DESTROYING;
+ cm_id->device->iwcm->reject(cm_id, NULL, 0);
break;
case IW_CM_STATE_CONN_SENT:
case IW_CM_STATE_DESTROYING:
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index de922a04ca2..7522008fda8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2,6 +2,7 @@
* Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2009 HNR Consulting. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API");
MODULE_AUTHOR("Hal Rosenstock");
MODULE_AUTHOR("Sean Hefty");
+int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
+int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
+
+module_param_named(send_queue_size, mad_sendq_size, int, 0444);
+MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
+module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
+MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
+
static struct kmem_cache *ib_mad_cache;
static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
/* Port list lock */
-static spinlock_t ib_mad_port_list_lock;
-
+static DEFINE_SPINLOCK(ib_mad_port_list_lock);
/* Forward declarations */
static int method_in_use(struct ib_mad_mgmt_method_table **method,
@@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
unsigned long delay;
if (list_empty(&mad_agent_priv->wait_list)) {
- cancel_delayed_work(&mad_agent_priv->timed_work);
+ __cancel_delayed_work(&mad_agent_priv->timed_work);
} else {
mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
struct ib_mad_send_wr_private,
@@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
if (time_after(mad_agent_priv->timeout,
mad_send_wr->timeout)) {
mad_agent_priv->timeout = mad_send_wr->timeout;
- cancel_delayed_work(&mad_agent_priv->timed_work);
+ __cancel_delayed_work(&mad_agent_priv->timed_work);
delay = mad_send_wr->timeout - jiffies;
if ((long)delay <= 0)
delay = 1;
@@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
/* Reschedule a work item if we have a shorter timeout */
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
- cancel_delayed_work(&mad_agent_priv->timed_work);
+ __cancel_delayed_work(&mad_agent_priv->timed_work);
queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
&mad_agent_priv->timed_work, delay);
}
@@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
qp_init_attr.send_cq = qp_info->port_priv->cq;
qp_init_attr.recv_cq = qp_info->port_priv->cq;
qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
- qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
+ qp_init_attr.cap.max_send_wr = mad_sendq_size;
+ qp_init_attr.cap.max_recv_wr = mad_recvq_size;
qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
qp_init_attr.qp_type = qp_type;
@@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
goto error;
}
/* Use minimum queue sizes unless the CQ is resized */
- qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
- qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
+ qp_info->send_queue.max_active = mad_sendq_size;
+ qp_info->recv_queue.max_active = mad_recvq_size;
return 0;
error:
@@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
- cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
+ cq_size = (mad_sendq_size + mad_recvq_size) * 2;
port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler,
NULL, port_priv, cq_size, 0);
@@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void)
{
int ret;
- spin_lock_init(&ib_mad_port_list_lock);
+ mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
+ mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
+
+ mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
+ mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
ib_mad_cache = kmem_cache_create("ib_mad",
sizeof(struct ib_mad_private),
@@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)
module_init(ib_mad_init_module);
module_exit(ib_mad_cleanup_module);
-
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 05ce331733b..9430ab4969c 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -2,6 +2,7 @@
* Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2009 HNR Consulting. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -49,6 +50,8 @@
/* QP and CQ parameters */
#define IB_MAD_QP_SEND_SIZE 128
#define IB_MAD_QP_RECV_SIZE 512
+#define IB_MAD_QP_MIN_SIZE 64
+#define IB_MAD_QP_MAX_SIZE 8192
#define IB_MAD_SEND_REQ_MAX_SG 2
#define IB_MAD_RECV_REQ_MAX_SG 1
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 107f170c57c..8d82ba17135 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -106,6 +106,8 @@ struct mcast_group {
struct ib_sa_query *query;
int query_id;
u16 pkey_index;
+ u8 leave_state;
+ int retries;
};
struct mcast_member {
@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
rec = group->rec;
rec.join_state = leave_state;
+ group->leave_state = leave_state;
ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
port->port_num, IB_SA_METHOD_DELETE, &rec,
@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
{
struct mcast_group *group = context;
- mcast_work_handler(&group->work);
+ if (status && group->retries > 0 &&
+ !send_leave(group, group->leave_state))
+ group->retries--;
+ else
+ mcast_work_handler(&group->work);
}
static struct mcast_group *acquire_group(struct mcast_port *port,
@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
if (!group)
return NULL;
+ group->retries = 3;
group->port = port;
group->rec.mgid = *mgid;
group->pkey_index = MCAST_INVALID_PKEY_INDEX;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1865049e80f..82543716d59 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -109,10 +109,10 @@ static struct ib_client sa_client = {
.remove = ib_sa_remove_one
};
-static spinlock_t idr_lock;
+static DEFINE_SPINLOCK(idr_lock);
static DEFINE_IDR(query_idr);
-static spinlock_t tid_lock;
+static DEFINE_SPINLOCK(tid_lock);
static u32 tid;
#define PATH_REC_FIELD(field) \
@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
{
int ret;
- spin_lock_init(&idr_lock);
- spin_lock_init(&tid_lock);
-
get_random_bytes(&tid, sizeof tid);
ret = ib_register_client(&sa_client);
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 87236753bce..5855e4405d9 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
hop_cnt = smp->hop_cnt;
/* See section 14.2.2.2, Vol 1 IB spec */
+ /* C14-6 -- valid hop_cnt values are from 0 to 63 */
+ if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
+ return IB_SMI_DISCARD;
+
if (!ib_get_smp_direction(smp)) {
/* C14-9:1 */
if (hop_cnt && hop_ptr == 0) {
@@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
hop_cnt = smp->hop_cnt;
/* See section 14.2.2.2, Vol 1 IB spec */
+ /* C14-6 -- valid hop_cnt values are from 0 to 63 */
+ if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
+ return IB_SMI_DISCARD;
+
if (!ib_get_smp_direction(smp)) {
/* C14-9:1 -- sender should have incremented hop_ptr */
if (hop_cnt && hop_ptr == 0)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index eb36a81dd09..d3fff9e008a 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr);
-static spinlock_t map_lock;
+static DEFINE_SPINLOCK(map_lock);
static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (hdr.command < 0 ||
hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
- !uverbs_cmd_table[hdr.command] ||
- !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
+ !uverbs_cmd_table[hdr.command])
return -EINVAL;
if (!file->ucontext &&
hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
return -EINVAL;
+ if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
+ return -ENOSYS;
+
return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
hdr.in_words * 4, hdr.out_words * 4);
}
@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
{
int ret;
- spin_lock_init(&map_lock);
-
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs");
if (ret) {
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 0cfbb6d2f76..8250740c94b 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
static void c2_print_macaddr(struct net_device *netdev)
{
- pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
- "IRQ %u\n", netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
- netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
- netdev->irq);
+ pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
}
static void c2_set_rxbufsize(struct c2_port *c2_port)
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index f1948fad85d..ad723bd8bf4 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
/* Register pseudo network device */
dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
if (!dev->pseudo_netdev)
- goto out3;
+ goto out;
ret = register_netdev(dev->pseudo_netdev);
if (ret)
- goto out2;
+ goto out_free_netdev;
pr_debug("%s:%u\n", __func__, __LINE__);
strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.post_recv = c2_post_receive;
dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
+ if (dev->ibdev.iwcm == NULL) {
+ ret = -ENOMEM;
+ goto out_unregister_netdev;
+ }
dev->ibdev.iwcm->add_ref = c2_add_ref;
dev->ibdev.iwcm->rem_ref = c2_rem_ref;
dev->ibdev.iwcm->get_qp = c2_get_qp;
@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
ret = ib_register_device(&dev->ibdev);
if (ret)
- goto out1;
+ goto out_free_iwcm;
for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev,
c2_dev_attributes[i]);
if (ret)
- goto out0;
+ goto out_unregister_ibdev;
}
- goto out3;
+ goto out;
-out0:
+out_unregister_ibdev:
ib_unregister_device(&dev->ibdev);
-out1:
+out_free_iwcm:
+ kfree(dev->ibdev.iwcm);
+out_unregister_netdev:
unregister_netdev(dev->pseudo_netdev);
-out2:
+out_free_netdev:
free_netdev(dev->pseudo_netdev);
-out3:
+out:
pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 62f9cf2f94e..72ed3396b72 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
wqe->qpcaps = attr->qpcaps;
wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
wqe->rqe_count = cpu_to_be16(attr->rqe_count);
- wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type));
+ wqe->flags_rtr_type = cpu_to_be16(attr->flags |
+ V_RTR_TYPE(attr->rtr_type) |
+ V_CHAN(attr->chan));
wqe->ord = cpu_to_be32(attr->ord);
wqe->ird = cpu_to_be32(attr->ird);
wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1032,6 +1034,7 @@ err3:
err2:
cxio_hal_destroy_ctrl_qp(rdev_p);
err1:
+ rdev_p->t3cdev_p->ulp = NULL;
list_del(&rdev_p->entry);
return err;
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 32e3b1461d8..a197a5b7ac7 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -327,6 +327,11 @@ enum rdma_init_rtr_types {
#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
+#define S_CHAN 4
+#define M_CHAN 0x3
+#define V_CHAN(x) ((x) << S_CHAN)
+#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
+
struct t3_rdma_init_attr {
u32 tid;
u32 qpid;
@@ -346,6 +351,7 @@ struct t3_rdma_init_attr {
u16 flags;
u16 rqe_count;
u32 irs;
+ u32 chan;
};
struct t3_rdma_init_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 26fc0a4eaa7..b0ea0105ddf 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *);
-static void iwch_err_handler(struct t3cdev *, u32, u32);
+static void iwch_event_handler(struct t3cdev *, u32, u32);
struct cxgb3_client t3c_client = {
.name = "iw_cxgb3",
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
.remove = close_rnic_dev,
.handlers = t3c_handlers,
.redirect = iwch_ep_redirect,
- .err_handler = iwch_err_handler
+ .event_handler = iwch_event_handler
};
static LIST_HEAD(dev_list);
@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
static void open_rnic_dev(struct t3cdev *tdev)
{
struct iwch_dev *rnicp;
- static int vers_printed;
PDBG("%s t3cdev %p\n", __func__, tdev);
- if (!vers_printed++)
- printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
+ printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
DRV_VERSION);
rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
if (!rnicp) {
@@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev)
mutex_unlock(&dev_mutex);
}
-static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error)
+static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
{
struct cxio_rdev *rdev = tdev->ulp;
- struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev);
+ struct iwch_dev *rnicp;
struct ib_event event;
+ u32 portnum = port_id + 1;
- if (status == OFFLOAD_STATUS_DOWN) {
+ if (!rdev)
+ return;
+ rnicp = rdev_to_iwch_dev(rdev);
+ switch (evt) {
+ case OFFLOAD_STATUS_DOWN: {
rdev->flags = CXIO_ERROR_FATAL;
-
- event.device = &rnicp->ibdev;
event.event = IB_EVENT_DEVICE_FATAL;
- event.element.port_num = 0;
- ib_dispatch_event(&event);
+ break;
+ }
+ case OFFLOAD_PORT_DOWN: {
+ event.event = IB_EVENT_PORT_ERR;
+ break;
+ }
+ case OFFLOAD_PORT_UP: {
+ event.event = IB_EVENT_PORT_ACTIVE;
+ break;
+ }
}
+ event.device = &rnicp->ibdev;
+ event.element.port_num = portnum;
+ ib_dispatch_event(&event);
+
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 52d7bb0c2a1..66b41351910 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -286,7 +286,7 @@ void __free_ep(struct kref *kref)
ep = container_of(container_of(kref, struct iwch_ep_common, kref),
struct iwch_ep, com);
PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
- if (ep->com.flags & RELEASE_RESOURCES) {
+ if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -297,7 +297,7 @@ void __free_ep(struct kref *kref)
static void release_ep_resources(struct iwch_ep *ep)
{
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- ep->com.flags |= RELEASE_RESOURCES;
+ set_bit(RELEASE_RESOURCES, &ep->com.flags);
put_ep(&ep->com);
}
@@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
event.provider_data = ep;
- if (state_read(&ep->parent_ep->com) != DEAD)
+ if (state_read(&ep->parent_ep->com) != DEAD) {
+ get_ep(&ep->com);
ep->parent_ep->com.cm_id->event_handler(
ep->parent_ep->com.cm_id,
&event);
+ }
put_ep(&ep->parent_ep->com);
ep->parent_ep = NULL;
}
@@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
* We get 2 abort replies from the HW. The first one must
* be ignored except for scribbling that we need one more.
*/
- if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) {
- ep->com.flags |= ABORT_REQ_IN_PROGRESS;
+ if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
return CPL_RET_BUF_DONE;
}
@@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
/*
* We're gonna mark this puppy DEAD, but keep
* the reference on it until the ULP accepts or
- * rejects the CR.
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see iwch_accept_cr()).
*/
__state_set(&ep->com, CLOSING);
- get_ep(&ep->com);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p\n", ep);
+ wake_up(&ep->com.waitq);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
@@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
* We get 2 peer aborts from the HW. The first one must
* be ignored except for scribbling that we need one more.
*/
- if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) {
- ep->com.flags |= PEER_ABORT_IN_PROGRESS;
+ if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
return CPL_RET_BUF_DONE;
}
@@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
/*
* We're gonna mark this puppy DEAD, but keep
* the reference on it until the ULP accepts or
- * rejects the CR.
+ * rejects the CR. Also wake up anyone waiting
+ * in rdma connection migration (see iwch_accept_cr()).
*/
- get_ep(&ep->com);
+ ep->com.rpl_done = 1;
+ ep->com.rpl_err = -ECONNRESET;
+ PDBG("waking up ep %p\n", ep);
+ wake_up(&ep->com.waitq);
break;
case MORIBUND:
case CLOSING:
@@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
err = send_mpa_reject(ep, pdata, pdata_len);
err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
}
+ put_ep(&ep->com);
return 0;
}
@@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD)
- return -ECONNRESET;
+ if (state_read(&ep->com) == DEAD) {
+ err = -ECONNRESET;
+ goto err;
+ }
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
BUG_ON(!qp);
@@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
(conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
abort_connection(ep, NULL, GFP_KERNEL);
- return -EINVAL;
+ err = -EINVAL;
+ goto err;
}
cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
ep->com.qp = qp;
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
@@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
- get_ep(&ep->com);
-
/* bind QP to EP and move to RTS */
attrs.mpa_attr = ep->mpa_attr;
attrs.max_ird = ep->ird;
@@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = iwch_modify_qp(ep->com.qp->rhp,
ep->com.qp, mask, &attrs, 1);
if (err)
- goto err;
+ goto err1;
/* if needed, wait for wr_ack */
if (iwch_rqes_posted(qp)) {
wait_event(ep->com.waitq, ep->com.rpl_done);
err = ep->com.rpl_err;
if (err)
- goto err;
+ goto err1;
}
err = send_mpa_reply(ep, conn_param->private_data,
conn_param->private_data_len);
if (err)
- goto err;
+ goto err1;
state_set(&ep->com, FPDU_MODE);
established_upcall(ep);
put_ep(&ep->com);
return 0;
-err:
+err1:
ep->com.cm_id = NULL;
ep->com.qp = NULL;
cm_id->rem_ref(cm_id);
+err:
put_ep(&ep->com);
return err;
}
@@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
ep->com.state = CLOSING;
start_ep_timer(ep);
}
+ set_bit(CLOSE_SENT, &ep->com.flags);
break;
case CLOSING:
- close = 1;
- if (abrupt) {
- stop_ep_timer(ep);
- ep->com.state = ABORTING;
- } else
- ep->com.state = MORIBUND;
+ if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
+ close = 1;
+ if (abrupt) {
+ stop_ep_timer(ep);
+ ep->com.state = ABORTING;
+ } else
+ ep->com.state = MORIBUND;
+ }
break;
case MORIBUND:
case ABORTING:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 43c0aea7ead..b9efadfffb4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -145,9 +145,10 @@ enum iwch_ep_state {
};
enum iwch_ep_flags {
- PEER_ABORT_IN_PROGRESS = (1 << 0),
- ABORT_REQ_IN_PROGRESS = (1 << 1),
- RELEASE_RESOURCES = (1 << 2),
+ PEER_ABORT_IN_PROGRESS = 0,
+ ABORT_REQ_IN_PROGRESS = 1,
+ RELEASE_RESOURCES = 2,
+ CLOSE_SENT = 3,
};
struct iwch_ep_common {
@@ -162,7 +163,7 @@ struct iwch_ep_common {
wait_queue_head_t waitq;
int rpl_done;
int rpl_err;
- u32 flags;
+ unsigned long flags;
};
struct iwch_listen_ep {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index ec49a5cbdeb..e1ec65ebb01 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -39,7 +39,7 @@
#include "iwch.h"
#include "iwch_provider.h"
-static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
+static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
{
u32 mmid;
@@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+ return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
}
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
struct iwch_mr *mhp, int shift)
{
u32 stag;
+ int ret;
if (cxio_register_phys_mem(&rhp->rdev,
&stag, mhp->attr.pdid,
@@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
mhp->attr.pbl_size, mhp->attr.pbl_addr))
return -ENOMEM;
- iwch_finish_mem_reg(mhp, stag);
-
- return 0;
+ ret = iwch_finish_mem_reg(mhp, stag);
+ if (ret)
+ cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+ return ret;
}
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
@@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
int npages)
{
u32 stag;
+ int ret;
/* We could support this... */
if (npages > mhp->attr.pbl_size)
@@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
mhp->attr.pbl_size, mhp->attr.pbl_addr))
return -ENOMEM;
- iwch_finish_mem_reg(mhp, stag);
+ ret = iwch_finish_mem_reg(mhp, stag);
+ if (ret)
+ cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
- return 0;
+ return ret;
}
int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e2a63214008..6895523779d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
spin_lock_init(&chp->lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
- insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+ if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
+ cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
+ kfree(chp);
+ return ERR_PTR(-ENOMEM);
+ }
if (ucontext) {
struct iwch_mm_entry *mm;
@@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
mhp->attr.stag = stag;
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
- insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+ cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
+ kfree(mhp);
+ return ERR_PTR(-ENOMEM);
+ }
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmw);
}
@@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
struct iwch_mr *mhp;
u32 mmid;
u32 stag = 0;
- int ret;
+ int ret = 0;
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
- return ERR_PTR(-ENOMEM);
+ goto err;
mhp->rhp = rhp;
ret = iwch_alloc_pbl(mhp, pbl_depth);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err1;
mhp->attr.pbl_size = pbl_depth;
ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
- if (ret) {
- iwch_free_pbl(mhp);
- kfree(mhp);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err2;
mhp->attr.pdid = php->pdid;
mhp->attr.type = TPT_NON_SHARED_MR;
mhp->attr.stag = stag;
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+ if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+ goto err3;
+
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmr);
+err3:
+ cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
+ mhp->attr.pbl_addr);
+err2:
+ iwch_free_pbl(mhp);
+err1:
+ kfree(mhp);
+err:
+ return ERR_PTR(ret);
}
static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
@@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
spin_lock_init(&qhp->lock);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
- insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid);
+
+ if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
+ cxio_destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ kfree(qhp);
+ return ERR_PTR(-ENOMEM);
+ }
if (udata) {
@@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev)
bail2:
ib_unregister_device(&dev->ibdev);
bail1:
+ kfree(dev->ibdev.iwcm);
return ret;
}
@@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev)
device_remove_file(&dev->ibdev.dev,
iwch_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
+ kfree(dev->ibdev.iwcm);
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 27bbdc8e773..6e865347194 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
init_attr.rqe_count = iwch_rqes_posted(qhp);
init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
+ init_attr.chan = qhp->ep->l2t->smt_idx;
if (peer2peer) {
init_attr.rtr_type = RTR_READ;
if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fab18a2c74a..5b635aa5947 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
#include "ehca_tools.h"
#include "hcp_if.h"
-#define HCAD_VERSION "0028"
+#define HCAD_VERSION "0029"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -64,7 +64,7 @@ static int ehca_hw_level = 0;
static int ehca_poll_all_eqs = 1;
int ehca_debug_level = 0;
-int ehca_nr_ports = 2;
+int ehca_nr_ports = -1;
int ehca_use_hp_mr = 0;
int ehca_port_act_time = 30;
int ehca_static_rate = -1;
@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
"Hardware level (0: autosensing (default), "
"0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
MODULE_PARM_DESC(nr_ports,
- "number of connected ports (-1: autodetect, 1: port one only, "
- "2: two ports (default)");
+ "number of connected ports (-1: autodetect (default), "
+ "1: port one only, 2: two ports)");
MODULE_PARM_DESC(use_hp_mr,
"Use high performance MRs (default: no)");
MODULE_PARM_DESC(port_act_time,
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 5a3d96f84c7..8fd88cd828f 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -786,7 +786,11 @@ repoll:
wc->slid = cqe->rlid;
wc->dlid_path_bits = cqe->dlid;
wc->src_qp = cqe->remote_qp_number;
- wc->wc_flags = cqe->w_completion_flags;
+ /*
+ * HW has "Immed data present" and "GRH present" in bits 6 and 5.
+ * SW defines those in bits 1 and 0, so we can just shift and mask.
+ */
+ wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level;
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index c568b28f4e2..8c1213f8916 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -125,14 +125,30 @@ struct ib_perf {
u8 data[192];
} __attribute__ ((packed));
+/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
+struct tcslfl {
+ u32 tc:8;
+ u32 sl:4;
+ u32 fl:20;
+} __attribute__ ((packed));
+
+/* IP Version/TC/FL packed into 32 bits, as in GRH */
+struct vertcfl {
+ u32 ver:4;
+ u32 tc:8;
+ u32 fl:20;
+} __attribute__ ((packed));
static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct ib_perf *in_perf = (struct ib_perf *)in_mad;
struct ib_perf *out_perf = (struct ib_perf *)out_mad;
struct ib_class_port_info *poi =
(struct ib_class_port_info *)out_perf->data;
+ struct tcslfl *tcslfl =
+ (struct tcslfl *)&poi->redirect_tcslfl;
struct ehca_shca *shca =
container_of(ibdev, struct ehca_shca, ib_device);
struct ehca_sport *sport = &shca->sport[port_num - 1];
@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
poi->base_version = 1;
poi->class_version = 1;
poi->resp_time_value = 18;
- poi->redirect_lid = sport->saved_attr.lid;
- poi->redirect_qp = sport->pma_qp_nr;
+
+ /* copy local routing information from WC where applicable */
+ tcslfl->sl = in_wc->sl;
+ poi->redirect_lid =
+ sport->saved_attr.lid | in_wc->dlid_path_bits;
+ poi->redirect_qp = sport->pma_qp_nr;
poi->redirect_qkey = IB_QP1_QKEY;
- poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
+
+ ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
+ &poi->redirect_pkey);
+
+ /* if request was globally routed, copy route info */
+ if (in_grh) {
+ struct vertcfl *vertcfl =
+ (struct vertcfl *)&in_grh->version_tclass_flow;
+ memcpy(poi->redirect_gid, in_grh->dgid.raw,
+ sizeof(poi->redirect_gid));
+ tcslfl->tc = vertcfl->tc;
+ tcslfl->fl = vertcfl->fl;
+ } else
+ /* else only fill in default GID */
+ ehca_query_gid(ibdev, port_num, 0,
+ (union ib_gid *)&poi->redirect_gid);
ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
sport->saved_attr.lid, sport->pma_qp_nr);
@@ -183,8 +218,7 @@ perf_reply:
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad)
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
{
int ret;
@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS;
ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
- ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
+ ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
+ in_mad, out_mad);
return ret;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 23173982b32..38a28700661 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
pd->port_cnt = 1;
port_fp(fp) = pd;
pd->port_pid = get_pid(task_pid(current));
- strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
+ strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
ipath_stats.sps_ports++;
ret = 0;
} else
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 16a702d4601..ceb98ee7866 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
if (smp->attr_mod)
smp->status |= IB_SMP_INVALID_FIELD;
- strncpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+ memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
return reply(smp);
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ae3d7590346..3cb3f47a10b 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx4_ib_alloc_ucontext_resp resp;
int err;
+ if (!dev->ib_active)
+ return ERR_PTR(-EAGAIN);
+
resp.qp_tab_size = dev->dev->caps.num_qps;
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
- static int mlx4_ib_version_printed;
struct mlx4_ib_dev *ibdev;
int num_ports = 0;
int i;
- if (!mlx4_ib_version_printed) {
- printk(KERN_INFO "%s", mlx4_ib_version);
- ++mlx4_ib_version_printed;
- }
+ printk_once(KERN_INFO "%s", mlx4_ib_version);
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
num_ports++;
@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_reg;
}
+ ibdev->ib_active = true;
+
return ibdev;
err_reg:
@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
break;
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+ ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
break;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 8a7dd6795fa..3486d7675e5 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -175,6 +175,7 @@ struct mlx4_ib_dev {
spinlock_t sm_lock;
struct mutex cap_mask_mutex;
+ bool ib_active;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c4a02648c8a..219b10397b4 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
}
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
- if (send_cq == recv_cq)
+ if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
- else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
}
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+ __releases(&send_cq->lock) __releases(&recv_cq->lock)
{
- if (send_cq == recv_cq)
+ if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
- else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 65ad359fdf1..056b2a4c697 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
event.device = &dev->ib_dev;
event.event = IB_EVENT_DEVICE_FATAL;
event.element.port_num = 0;
+ dev->active = false;
ib_dispatch_event(&event);
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h
index 75671f75cac..155bc66395b 100644
--- a/drivers/infiniband/hw/mthca/mthca_config_reg.h
+++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h
@@ -34,8 +34,6 @@
#ifndef MTHCA_CONFIG_REG_H
#define MTHCA_CONFIG_REG_H
-#include <asm/page.h>
-
#define MTHCA_HCR_BASE 0x80680
#define MTHCA_HCR_SIZE 0x0001c
#define MTHCA_ECR_BASE 0x80700
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 9ef611f6dd3..7e6a6d64ad4 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -357,6 +357,7 @@ struct mthca_dev {
struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
spinlock_t sm_lock;
u8 rate[MTHCA_MAX_PORTS];
+ bool active;
};
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 90e4e450a12..8c31fa36e95 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
static const char *eq_name[] = {
- [MTHCA_EQ_COMP] = DRV_NAME " (comp)",
- [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
- [MTHCA_EQ_CMD] = DRV_NAME " (cmd)"
+ [MTHCA_EQ_COMP] = DRV_NAME "-comp",
+ [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
+ [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
};
for (i = 0; i < MTHCA_NUM_EQ; ++i) {
+ snprintf(dev->eq_table.eq[i].irq_name,
+ IB_DEVICE_NAME_MAX,
+ "%s@pci:%s", eq_name[i],
+ pci_name(dev->pdev));
err = request_irq(dev->eq_table.eq[i].msi_x_vector,
mthca_is_memfree(dev) ?
mthca_arbel_msi_x_interrupt :
mthca_tavor_msi_x_interrupt,
- 0, eq_name[i], dev->eq_table.eq + i);
+ 0, dev->eq_table.eq[i].irq_name,
+ dev->eq_table.eq + i);
if (err)
goto err_out_cmd;
dev->eq_table.eq[i].have_irq = 1;
}
} else {
+ snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
+ DRV_NAME "@pci:%s", pci_name(dev->pdev));
err = request_irq(dev->pdev->irq,
mthca_is_memfree(dev) ?
mthca_arbel_interrupt :
mthca_tavor_interrupt,
- IRQF_SHARED, DRV_NAME, dev);
+ IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
if (err)
goto err_out_cmd;
dev->eq_table.have_irq = 1;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 13da9f1d24c..b01b2898787 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
pci_set_drvdata(pdev, mdev);
mdev->hca_type = hca_type;
+ mdev->active = true;
+
return 0;
err_unregister:
@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
static int __devinit mthca_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static int mthca_version_printed = 0;
int ret;
mutex_lock(&mthca_device_mutex);
- if (!mthca_version_printed) {
- printk(KERN_INFO "%s", mthca_version);
- ++mthca_version_printed;
- }
+ printk_once(KERN_INFO "%s", mthca_version);
if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 87ad889e367..bcf7a401482 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
struct mthca_ucontext *context;
int err;
+ if (!(to_mdev(ibdev)->active))
+ return ERR_PTR(-EAGAIN);
+
memset(&uresp, 0, sizeof uresp);
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index c621f8794b8..90f4c4d2e98 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -113,6 +113,7 @@ struct mthca_eq {
int nent;
struct mthca_buf_list *page_list;
struct mthca_mr mr;
+ char irq_name[IB_DEVICE_NAME_MAX];
};
struct mthca_av;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f5081bfde6d..c10576fa60c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
}
static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
- if (send_cq == recv_cq)
+ if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
- else if (send_cq->cqn < recv_cq->cqn) {
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
}
static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+ __releases(&send_cq->lock) __releases(&recv_cq->lock)
{
- if (send_cq == recv_cq)
+ if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
- else if (send_cq->cqn < recv_cq->cqn) {
+ } else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index acb6817f606..2a13a163d33 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bf1720f7f35..bcc6abc4faf 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
void nes_cm_disconn_worker(void *);
/* nes_verbs.c */
-int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
+int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
struct nes_ib_device *nes_init_ofa_device(struct net_device *);
void nes_destroy_ofa_device(struct nes_ib_device *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 114b802771a..73473db1986 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
*/
int nes_cm_disconn(struct nes_qp *nesqp)
{
- unsigned long flags;
-
- spin_lock_irqsave(&nesqp->lock, flags);
- if (nesqp->disconn_pending == 0) {
- nesqp->disconn_pending++;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- /* init our disconnect work element, to */
- INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
+ struct disconn_work *work;
- queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
- } else
- spin_unlock_irqrestore(&nesqp->lock, flags);
+ work = kzalloc(sizeof *work, GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM; /* Timer will clean up */
+ nes_add_ref(&nesqp->ibqp);
+ work->nesqp = nesqp;
+ INIT_WORK(&work->work, nes_disconnect_worker);
+ queue_work(g_cm_core->disconn_wq, &work->work);
return 0;
}
@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
*/
static void nes_disconnect_worker(struct work_struct *work)
{
- struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
+ struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+ struct nes_qp *nesqp = dwork->nesqp;
+ kfree(dwork);
nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
nesqp->last_aeq, nesqp->hwqp.qp_id);
nes_cm_disconn_true(nesqp);
+ nes_rem_ref(&nesqp->ibqp);
}
@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
u16 last_ae;
u8 original_hw_tcp_state;
u8 original_ibqp_state;
- u8 issued_disconnect_reset = 0;
+ enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
+ int issue_disconn = 0;
+ int issue_close = 0;
+ int issue_flush = 0;
+ u32 flush_q = NES_CQP_FLUSH_RQ;
+ struct ib_event ibevent;
if (!nesqp) {
nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
original_ibqp_state = nesqp->ibqp_state;
last_ae = nesqp->last_aeq;
+ if (nesqp->term_flags) {
+ issue_disconn = 1;
+ issue_close = 1;
+ nesqp->cm_id = NULL;
+ if (nesqp->flush_issued == 0) {
+ nesqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ issue_disconn = 1;
+ if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
+ disconn_status = IW_CM_EVENT_STATUS_RESET;
+ }
+
+ if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
+ (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
+ (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
+ (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ issue_close = 1;
+ nesqp->cm_id = NULL;
+ if (nesqp->flush_issued == 0) {
+ nesqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
+ if ((issue_flush) && (nesqp->destroyed == 0)) {
+ /* Flush the queue(s) */
+ if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
+ flush_q |= NES_CQP_FLUSH_SQ;
+ flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
- if ((nesqp->cm_id) && (cm_id->event_handler)) {
- if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- ((original_ibqp_state == IB_QPS_RTS) &&
- (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ if (nesqp->term_flags) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.event = nesqp->terminate_eventtype;
+ ibevent.element.qp = &nesqp->ibqp;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ }
+
+ if ((cm_id) && (cm_id->event_handler)) {
+ if (issue_disconn) {
atomic_inc(&cm_disconnects);
cm_event.event = IW_CM_EVENT_DISCONNECT;
- if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
- cm_event.status = IW_CM_EVENT_STATUS_RESET;
- nes_debug(NES_DBG_CM, "Generating a CM "
- "Disconnect Event (status reset) for "
- "QP%u, cm_id = %p. \n",
- nesqp->hwqp.qp_id, cm_id);
- } else
- cm_event.status = IW_CM_EVENT_STATUS_OK;
-
+ cm_event.status = disconn_status;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
nesqp->hwqp.sq_tail, cm_id,
atomic_read(&nesqp->refcount));
- spin_unlock_irqrestore(&nesqp->lock, flags);
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
nes_debug(NES_DBG_CM, "OFA CM event_handler "
"returned, ret=%d\n", ret);
- spin_lock_irqsave(&nesqp->lock, flags);
}
- nesqp->disconn_pending = 0;
- /* There might have been another AE while the lock was released */
- original_hw_tcp_state = nesqp->hw_tcp_state;
- original_ibqp_state = nesqp->ibqp_state;
- last_ae = nesqp->last_aeq;
-
- if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
- ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
- (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
- (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
- (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ if (issue_close) {
atomic_inc(&cm_closes);
- nesqp->cm_id = NULL;
- nesqp->in_disconnect = 0;
- spin_unlock_irqrestore(&nesqp->lock, flags);
nes_disconnect(nesqp, 1);
cm_id->provider_data = nesqp;
@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
}
cm_id->rem_ref(cm_id);
-
- spin_lock_irqsave(&nesqp->lock, flags);
- if (nesqp->flush_issued == 0) {
- nesqp->flush_issued = 1;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- flush_wqes(nesvnic->nesdev, nesqp,
- NES_CQP_FLUSH_RQ, 1);
- } else
- spin_unlock_irqrestore(&nesqp->lock, flags);
- } else {
- cm_id = nesqp->cm_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- /* check to see if the inbound reset beat the outbound reset */
- if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
- nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
- "due to inbound reset beating the "
- "outbound reset.\n", nesqp->hwqp.qp_id);
- }
}
- } else {
- nesqp->disconn_pending = 0;
- spin_unlock_irqrestore(&nesqp->lock, flags);
}
return 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 8b7e7c0e496..90e8e4d8a5c 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -410,8 +410,6 @@ struct nes_cm_ops {
int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
enum nes_timer_type, int, int);
-int nes_cm_disconn(struct nes_qp *);
-
int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
int nes_reject(struct iw_cm_id *, const void *, u8);
int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 4a84d02ece0..63a1a8e1e8a 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
static void process_critical_error(struct nes_device *nesdev);
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
+static void nes_terminate_timeout(unsigned long context);
+static void nes_terminate_start_timer(struct nes_qp *nesqp);
#ifdef CONFIG_INFINIBAND_NES_DEBUG
static unsigned char *nes_iwarp_state_str[] = {
@@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
}
+static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
+{
+ u16 pkt_len;
+
+ if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
+ /* skip over ethernet header */
+ pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
+ pkt += ETH_HLEN;
+
+ /* Skip over IP and TCP headers */
+ pkt += 4 * (pkt[0] & 0x0f);
+ pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+ }
+ return pkt;
+}
+
+/* Determine if incoming error pkt is rdma layer */
+static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
+{
+ u8 *pkt;
+ u16 *mpa;
+ u32 opcode = 0xffffffff;
+
+ if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+ pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+ mpa = (u16 *)locate_mpa(pkt, aeq_info);
+ opcode = be16_to_cpu(mpa[1]) & 0xf;
+ }
+
+ return opcode;
+}
+
+/* Build iWARP terminate header */
+static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
+{
+ u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+ u16 ddp_seg_len;
+ int copy_len = 0;
+ u8 is_tagged = 0;
+ u8 flush_code = 0;
+ struct nes_terminate_hdr *termhdr;
+
+ termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
+ memset(termhdr, 0, 64);
+
+ if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+
+ /* Use data from offending packet to fill in ddp & rdma hdrs */
+ pkt = locate_mpa(pkt, aeq_info);
+ ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
+ if (ddp_seg_len) {
+ copy_len = 2;
+ termhdr->hdrct = DDP_LEN_FLAG;
+ if (pkt[2] & 0x80) {
+ is_tagged = 1;
+ if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+ copy_len += TERM_DDP_LEN_TAGGED;
+ termhdr->hdrct |= DDP_HDR_FLAG;
+ }
+ } else {
+ if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+ copy_len += TERM_DDP_LEN_UNTAGGED;
+ termhdr->hdrct |= DDP_HDR_FLAG;
+ }
+
+ if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
+ if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
+ copy_len += TERM_RDMA_LEN;
+ termhdr->hdrct |= RDMA_HDR_FLAG;
+ }
+ }
+ }
+ }
+ }
+
+ switch (async_event_id) {
+ case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
+ switch (iwarp_opcode(nesqp, aeq_info)) {
+ case IWARP_OPCODE_WRITE:
+ flush_code = IB_WC_LOC_PROT_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+ termhdr->error_code = DDP_TAGGED_INV_STAG;
+ break;
+ default:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_INV_STAG;
+ }
+ break;
+ case NES_AEQE_AEID_AMP_INVALID_STAG:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_INV_STAG;
+ break;
+ case NES_AEQE_AEID_AMP_BAD_QP:
+ flush_code = IB_WC_LOC_QP_OP_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_QN;
+ break;
+ case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
+ case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
+ switch (iwarp_opcode(nesqp, aeq_info)) {
+ case IWARP_OPCODE_SEND_INV:
+ case IWARP_OPCODE_SEND_SE_INV:
+ flush_code = IB_WC_REM_OP_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+ termhdr->error_code = RDMAP_CANT_INV_STAG;
+ break;
+ default:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_INV_STAG;
+ }
+ break;
+ case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
+ if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
+ flush_code = IB_WC_LOC_PROT_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+ termhdr->error_code = DDP_TAGGED_BOUNDS;
+ } else {
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_INV_BOUNDS;
+ }
+ break;
+ case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
+ case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_ACCESS;
+ break;
+ case NES_AEQE_AEID_AMP_TO_WRAP:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_TO_WRAP;
+ break;
+ case NES_AEQE_AEID_AMP_BAD_PD:
+ switch (iwarp_opcode(nesqp, aeq_info)) {
+ case IWARP_OPCODE_WRITE:
+ flush_code = IB_WC_LOC_PROT_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+ termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
+ break;
+ case IWARP_OPCODE_SEND_INV:
+ case IWARP_OPCODE_SEND_SE_INV:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_CANT_INV_STAG;
+ break;
+ default:
+ flush_code = IB_WC_REM_ACCESS_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+ termhdr->error_code = RDMAP_UNASSOC_STAG;
+ }
+ break;
+ case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+ flush_code = IB_WC_LOC_LEN_ERR;
+ termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
+ termhdr->error_code = MPA_MARKER;
+ break;
+ case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
+ termhdr->error_code = MPA_CRC;
+ break;
+ case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
+ case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
+ flush_code = IB_WC_LOC_LEN_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
+ termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
+ break;
+ case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
+ case NES_AEQE_AEID_DDP_NO_L_BIT:
+ flush_code = IB_WC_FATAL_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
+ termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
+ break;
+ case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
+ case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
+ break;
+ case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ flush_code = IB_WC_LOC_LEN_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
+ flush_code = IB_WC_GENERAL_ERR;
+ if (is_tagged) {
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+ termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
+ } else {
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
+ }
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_MO;
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ flush_code = IB_WC_REM_OP_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+ termhdr->error_code = DDP_UNTAGGED_INV_QN;
+ break;
+ case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ flush_code = IB_WC_GENERAL_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+ termhdr->error_code = RDMAP_INV_RDMAP_VER;
+ break;
+ case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
+ flush_code = IB_WC_LOC_QP_OP_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+ termhdr->error_code = RDMAP_UNEXPECTED_OP;
+ break;
+ default:
+ flush_code = IB_WC_FATAL_ERR;
+ termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+ termhdr->error_code = RDMAP_UNSPECIFIED;
+ break;
+ }
+
+ if (copy_len)
+ memcpy(termhdr + 1, pkt, copy_len);
+
+ if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
+ if (aeq_info & NES_AEQE_SQ)
+ nesqp->term_sq_flush_code = flush_code;
+ else
+ nesqp->term_rq_flush_code = flush_code;
+ }
+
+ return sizeof(struct nes_terminate_hdr) + copy_len;
+}
+
+static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
+ struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
+{
+ u64 context;
+ unsigned long flags;
+ u32 aeq_info;
+ u16 async_event_id;
+ u8 tcp_state;
+ u8 iwarp_state;
+ u32 termlen = 0;
+ u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
+ NES_CQP_QP_TERM_DONT_SEND_FIN;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+ if (nesqp->term_flags & NES_TERM_SENT)
+ return; /* Sanity check */
+
+ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+ iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+ async_event_id = (u16)aeq_info;
+
+ context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
+ aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
+ if (!context) {
+ WARN_ON(!context);
+ return;
+ }
+
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ nesqp->terminate_eventtype = eventtype;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ if (nesadapter->send_term_ok)
+ termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
+ else
+ mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
+
+ nes_terminate_start_timer(nesqp);
+ nesqp->term_flags |= NES_TERM_SENT;
+ nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
+}
+
+static void nes_terminate_send_fin(struct nes_device *nesdev,
+ struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
+{
+ u32 aeq_info;
+ u16 async_event_id;
+ u8 tcp_state;
+ u8 iwarp_state;
+ unsigned long flags;
+
+ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+ iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+ async_event_id = (u16)aeq_info;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ /* Send the fin only */
+ nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
+ NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
+}
+
+/* Cleanup after a terminate sent or received */
+static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
+{
+ u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
+ unsigned long flags;
+ struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u8 first_time = 0;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ if (nesqp->hte_added) {
+ nesqp->hte_added = 0;
+ next_iwarp_state |= NES_CQP_QP_DEL_HTE;
+ }
+
+ first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
+ nesqp->term_flags |= NES_TERM_DONE;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ /* Make sure we go through this only once */
+ if (first_time) {
+ if (timeout_occurred == 0)
+ del_timer(&nesqp->terminate_timer);
+ else
+ next_iwarp_state |= NES_CQP_QP_RESET;
+
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ nes_cm_disconn(nesqp);
+ }
+}
+
+static void nes_terminate_received(struct nes_device *nesdev,
+ struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
+{
+ u32 aeq_info;
+ u8 *pkt;
+ u32 *mpa;
+ u8 ddp_ctl;
+ u8 rdma_ctl;
+ u16 aeq_id = 0;
+
+ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+ /* Terminate is not a performance path so the silicon */
+ /* did not validate the frame - do it now */
+ pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+ mpa = (u32 *)locate_mpa(pkt, aeq_info);
+ ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
+ rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
+ if ((ddp_ctl & 0xc0) != 0x40)
+ aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
+ else if ((ddp_ctl & 0x03) != 1)
+ aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
+ else if (be32_to_cpu(mpa[2]) != 2)
+ aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
+ else if (be32_to_cpu(mpa[3]) != 1)
+ aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
+ else if (be32_to_cpu(mpa[4]) != 0)
+ aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
+ else if ((rdma_ctl & 0xc0) != 0x40)
+ aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+ if (aeq_id) {
+ /* Bad terminate recvd - send back a terminate */
+ aeq_info = (aeq_info & 0xffff0000) | aeq_id;
+ aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+ return;
+ }
+ }
+
+ nesqp->term_flags |= NES_TERM_RCVD;
+ nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
+ nes_terminate_start_timer(nesqp);
+ nes_terminate_send_fin(nesdev, nesqp, aeqe);
+}
+
+/* Timeout routine in case terminate fails to complete */
+static void nes_terminate_timeout(unsigned long context)
+{
+ struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+
+ nes_terminate_done(nesqp, 1);
+}
+
+/* Set a timer in case hw cannot complete the terminate sequence */
+static void nes_terminate_start_timer(struct nes_qp *nesqp)
+{
+ init_timer(&nesqp->terminate_timer);
+ nesqp->terminate_timer.function = nes_terminate_timeout;
+ nesqp->terminate_timer.expires = jiffies + HZ;
+ nesqp->terminate_timer.data = (unsigned long)nesqp;
+ add_timer(&nesqp->terminate_timer);
+}
+
/**
* nes_process_iwarp_aeqe
*/
@@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
struct nes_hw_aeqe *aeqe)
{
u64 context;
- u64 aeqe_context = 0;
unsigned long flags;
struct nes_qp *nesqp;
+ struct nes_hw_cq *hw_cq;
+ struct nes_cq *nescq;
int resource_allocated;
- /* struct iw_cm_id *cm_id; */
struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct ib_event ibevent;
- /* struct iw_cm_event cm_event; */
u32 aeq_info;
u32 next_iwarp_state = 0;
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
+ int must_disconn = 1;
+ int must_terminate = 0;
+ struct ib_event ibevent;
nes_debug(NES_DBG_AEQ, "\n");
aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
- if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
+ if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
} else {
- aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
- aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
BUG_ON(!context);
@@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
- nesqp = *((struct nes_qp **)&context);
+ nesqp = (struct nes_qp *)(unsigned long)context;
+
+ if (nesqp->term_flags)
+ return; /* Ignore it, wait for close complete */
+
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
nesqp->cm_id->add_ref(nesqp->cm_id);
schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
@@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
async_event_id, nesqp->last_aeq, tcp_state);
}
+
if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
(nesqp->ibqp_state != IB_QPS_RTS)) {
/* FIN Received but tcp state or IB state moved on,
should expect a close complete */
return;
}
+
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ if (nesqp->term_flags) {
+ nes_terminate_done(nesqp, 0);
+ return;
+ }
+
case NES_AEQE_AEID_LLP_CONNECTION_RESET:
- case NES_AEQE_AEID_TERMINATE_SENT:
- case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
case NES_AEQE_AEID_RESET_SENT:
- nesqp = *((struct nes_qp **)&context);
+ nesqp = (struct nes_qp *)(unsigned long)context;
if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
tcp_state = NES_AEQE_TCP_STATE_CLOSED;
}
@@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
(tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
nesqp->hte_added = 0;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
- nesqp->hwqp.qp_id);
- nes_hw_modify_qp(nesdev, nesqp,
- NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
- spin_lock_irqsave(&nesqp->lock, flags);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
}
if ((nesqp->ibqp_state == IB_QPS_RTS) &&
@@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
break;
case NES_AEQE_IWARP_STATE_TERMINATE:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
- if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
- next_iwarp_state |= 0x02000000;
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- }
+ must_disconn = 0; /* terminate path takes care of disconn */
+ if (nesqp->term_flags == 0)
+ must_terminate = 1;
break;
- default:
- next_iwarp_state = 0;
- }
- spin_unlock_irqrestore(&nesqp->lock, flags);
- if (next_iwarp_state) {
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
- " also added another reference\n",
- nesqp->hwqp.qp_id, next_iwarp_state);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
}
- nes_cm_disconn(nesqp);
} else {
if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
/* FIN Received but ib state not RTS,
close complete will be on its way */
- spin_unlock_irqrestore(&nesqp->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&nesqp->lock, flags);
- if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
- " also added another reference\n",
- nesqp->hwqp.qp_id, next_iwarp_state);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+ must_disconn = 0;
}
- nes_cm_disconn(nesqp);
}
- break;
- case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED"
- " event on QP%u \n Q2 Data:\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_FATAL;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- ((nesqp->ibqp_state == IB_QPS_RTS)&&
- (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+
+ if (must_terminate)
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+ else if (must_disconn) {
+ if (next_iwarp_state) {
+ nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
+ nesqp->hwqp.qp_id, next_iwarp_state);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ }
nes_cm_disconn(nesqp);
- } else {
- nesqp->in_disconnect = 0;
- wake_up(&nesqp->kick_waitq);
}
break;
- case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
- nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- nesqp->last_aeq = async_event_id;
- if (nesqp->cm_id) {
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
- " event on QP%u, remote IP = 0x%08X \n",
- nesqp->hwqp.qp_id,
- ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
- } else {
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
- " event on QP%u \n",
- nesqp->hwqp.qp_id);
- }
- spin_unlock_irqrestore(&nesqp->lock, flags);
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_FATAL;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
+
+ case NES_AEQE_AEID_TERMINATE_SENT:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ nes_terminate_send_fin(nesdev, nesqp, aeqe);
break;
- case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
- if (NES_AEQE_INBOUND_RDMA&aeq_info) {
- nesqp = nesadapter->qp_table[le32_to_cpu(
- aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
- } else {
- /* TODO: get the actual WQE and mask off wqe index */
- context &= ~((u64)511);
- nesqp = *((struct nes_qp **)&context);
- }
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
+
+ case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ nes_terminate_received(nesdev, nesqp, aeqe);
break;
+
+ case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
+ case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- break;
+ case NES_AEQE_AEID_AMP_INVALID_STAG:
+ case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
+ case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
- nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words
- [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u,"
- " nesqp = %p, AE reported %p\n",
- nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context));
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
+ case NES_AEQE_AEID_AMP_TO_WRAP:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
+ break;
+
+ case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
+ case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
+ case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
+ aeq_info &= 0xffff0000;
+ aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
+ aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
}
+
+ case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
+ case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+ case NES_AEQE_AEID_AMP_BAD_QP:
+ case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+ case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
+ case NES_AEQE_AEID_DDP_NO_L_BIT:
+ case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
+ case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+ case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
+ case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case NES_AEQE_AEID_AMP_BAD_PD:
+ case NES_AEQE_AEID_AMP_FASTREG_SHARED:
+ case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG:
+ case NES_AEQE_AEID_AMP_FASTREG_MW_STAG:
+ case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS:
+ case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW:
+ case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH:
+ case NES_AEQE_AEID_AMP_INVALIDATE_SHARED:
+ case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS:
+ case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG:
+ case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG:
+ case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG:
+ case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG:
+ case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS:
+ case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS:
+ case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT:
+ case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED:
+ case NES_AEQE_AEID_BAD_CLOSE:
+ case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO:
+ case NES_AEQE_AEID_STAG_ZERO_INVALID:
+ case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
+ case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ nesqp = (struct nes_qp *)(unsigned long)context;
+ nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
break;
+
case NES_AEQE_AEID_CQ_OPERATION_ERROR:
context <<= 1;
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
@@ -3153,83 +3525,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
if (resource_allocated) {
printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
__func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
+ hw_cq = (struct nes_hw_cq *)(unsigned long)context;
+ if (hw_cq) {
+ nescq = container_of(hw_cq, struct nes_cq, hw_cq);
+ if (nescq->ibcq.event_handler) {
+ ibevent.device = nescq->ibcq.device;
+ ibevent.event = IB_EVENT_CQ_ERR;
+ ibevent.element.cq = &nescq->ibcq;
+ nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context);
+ }
+ }
}
break;
- case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- nesqp = nesadapter->qp_table[le32_to_cpu(
- aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
- "_FOR_AVAILABLE_BUFFER event on QP%u\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- /* tell cm to disconnect, cm will queue work to thread */
- nes_cm_disconn(nesqp);
- break;
- case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
- "_NO_BUFFER_AVAILABLE event on QP%u\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_FATAL;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- /* tell cm to disconnect, cm will queue work to thread */
- nes_cm_disconn(nesqp);
- break;
- case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
- " event on QP%u \n Q2 Data:\n",
- nesqp->hwqp.qp_id);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_FATAL;
- nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
- }
- /* tell cm to disconnect, cm will queue work to thread */
- nes_cm_disconn(nesqp);
- break;
- /* TODO: additional AEs need to be here */
- case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
- nesqp = *((struct nes_qp **)&context);
- spin_lock_irqsave(&nesqp->lock, flags);
- nesqp->hw_iwarp_state = iwarp_state;
- nesqp->hw_tcp_state = tcp_state;
- nesqp->last_aeq = async_event_id;
- spin_unlock_irqrestore(&nesqp->lock, flags);
- if (nesqp->ibqp.event_handler) {
- ibevent.device = nesqp->ibqp.device;
- ibevent.element.qp = &nesqp->ibqp;
- ibevent.event = IB_EVENT_QP_ACCESS_ERR;
- nesqp->ibqp.event_handler(&ibevent,
- nesqp->ibqp.qp_context);
- }
- nes_cm_disconn(nesqp);
- break;
+
default:
nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
async_event_id);
@@ -3238,7 +3546,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
}
-
/**
* nes_iwarp_ce_handler
*/
@@ -3373,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
{
struct nes_cqp_request *cqp_request;
struct nes_hw_cqp_wqe *cqp_wqe;
+ u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
+ u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
int ret;
cqp_request = nes_get_cqp_request(nesdev);
@@ -3389,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
cqp_wqe = &cqp_request->cqp_wqe;
nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ /* If wqe in error was identified, set code to be put into cqe */
+ if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) {
+ which_wq |= NES_CQP_FLUSH_MAJ_MIN;
+ sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code;
+ nesqp->term_sq_flush_code = 0;
+ }
+
+ if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) {
+ which_wq |= NES_CQP_FLUSH_MAJ_MIN;
+ rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code;
+ nesqp->term_rq_flush_code = 0;
+ }
+
+ if (which_wq & NES_CQP_FLUSH_MAJ_MIN) {
+ cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code);
+ cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code);
+ }
+
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index c3654c6383f..f28a41ba9fa 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx {
};
#define NES_CQP_OP_IWARP_STATE_SHIFT 28
+#define NES_CQP_OP_TERMLEN_SHIFT 28
enum nes_cqp_qp_bits {
NES_CQP_QP_ARP_VALID = (1<<8),
@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits {
NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
+ NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
NES_CQP_QP_RESET = (1<<31),
};
enum nes_cqp_qp_wqe_word_idx {
NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
+ NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
+ NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
};
@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits {
enum nes_cqp_flush_bits {
NES_CQP_FLUSH_SQ = (1<<30),
NES_CQP_FLUSH_RQ = (1<<31),
+ NES_CQP_FLUSH_MAJ_MIN = (1<<28),
};
enum nes_cqe_opcode_bits {
@@ -633,11 +639,14 @@ enum nes_aeqe_bits {
NES_AEQE_INBOUND_RDMA = (1<<19),
NES_AEQE_IWARP_STATE_MASK = (7<<20),
NES_AEQE_TCP_STATE_MASK = (0xf<<24),
+ NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
NES_AEQE_VALID = (1<<31),
};
#define NES_AEQE_IWARP_STATE_SHIFT 20
#define NES_AEQE_TCP_STATE_SHIFT 24
+#define NES_AEQE_Q2_DATA_ETHERNET (1<<28)
+#define NES_AEQE_Q2_DATA_MPA (1<<29)
enum nes_aeqe_iwarp_state {
NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits {
NES_IWARP_SQ_OP_NOP = 12,
};
+enum nes_iwarp_cqe_major_code {
+ NES_IWARP_CQE_MAJOR_FLUSH = 1,
+ NES_IWARP_CQE_MAJOR_DRV = 0x8000
+};
+
+enum nes_iwarp_cqe_minor_code {
+ NES_IWARP_CQE_MINOR_FLUSH = 1
+};
+
#define NES_EEPROM_READ_REQUEST (1<<16)
#define NES_MAC_ADDR_VALID (1<<20)
@@ -1119,6 +1137,7 @@ struct nes_adapter {
u8 netdev_max; /* from host nic address count in EEPROM */
u8 port_count;
u8 virtwq;
+ u8 send_term_ok;
u8 et_use_adaptive_rx_coalesce;
u8 adapter_fcn_count;
u8 pft_mcast_map[NES_PFT_SIZE];
@@ -1217,6 +1236,90 @@ struct nes_ib_device {
u32 num_pd;
};
+enum nes_hdrct_flags {
+ DDP_LEN_FLAG = 0x80,
+ DDP_HDR_FLAG = 0x40,
+ RDMA_HDR_FLAG = 0x20
+};
+
+enum nes_term_layers {
+ LAYER_RDMA = 0,
+ LAYER_DDP = 1,
+ LAYER_MPA = 2
+};
+
+enum nes_term_error_types {
+ RDMAP_CATASTROPHIC = 0,
+ RDMAP_REMOTE_PROT = 1,
+ RDMAP_REMOTE_OP = 2,
+ DDP_CATASTROPHIC = 0,
+ DDP_TAGGED_BUFFER = 1,
+ DDP_UNTAGGED_BUFFER = 2,
+ DDP_LLP = 3
+};
+
+enum nes_term_rdma_errors {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_INV_BOUNDS = 0x01,
+ RDMAP_ACCESS = 0x02,
+ RDMAP_UNASSOC_STAG = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_RDMAP_VER = 0x05,
+ RDMAP_UNEXPECTED_OP = 0x06,
+ RDMAP_CATASTROPHIC_LOCAL = 0x07,
+ RDMAP_CATASTROPHIC_GLOBAL = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff
+};
+
+enum nes_term_ddp_errors {
+ DDP_CATASTROPHIC_LOCAL = 0x00,
+ DDP_TAGGED_INV_STAG = 0x00,
+ DDP_TAGGED_BOUNDS = 0x01,
+ DDP_TAGGED_UNASSOC_STAG = 0x02,
+ DDP_TAGGED_TO_WRAP = 0x03,
+ DDP_TAGGED_INV_DDP_VER = 0x04,
+ DDP_UNTAGGED_INV_QN = 0x01,
+ DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
+ DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
+ DDP_UNTAGGED_INV_MO = 0x04,
+ DDP_UNTAGGED_INV_TOO_LONG = 0x05,
+ DDP_UNTAGGED_INV_DDP_VER = 0x06
+};
+
+enum nes_term_mpa_errors {
+ MPA_CLOSED = 0x01,
+ MPA_CRC = 0x02,
+ MPA_MARKER = 0x03,
+ MPA_REQ_RSP = 0x04,
+};
+
+struct nes_terminate_hdr {
+ u8 layer_etype;
+ u8 error_code;
+ u8 hdrct;
+ u8 rsvd;
+};
+
+/* Used to determine how to fill in terminate error codes */
+#define IWARP_OPCODE_WRITE 0
+#define IWARP_OPCODE_READREQ 1
+#define IWARP_OPCODE_READRSP 2
+#define IWARP_OPCODE_SEND 3
+#define IWARP_OPCODE_SEND_INV 4
+#define IWARP_OPCODE_SEND_SE 5
+#define IWARP_OPCODE_SEND_SE_INV 6
+#define IWARP_OPCODE_TERM 7
+
+/* These values are used only during terminate processing */
+#define TERM_DDP_LEN_TAGGED 14
+#define TERM_DDP_LEN_UNTAGGED 18
+#define TERM_RDMA_LEN 28
+#define RDMA_OPCODE_MASK 0x0f
+#define RDMA_READ_REQ_OPCODE 1
+#define BAD_FRAME_OFFSET 64
+#define CQE_MAJOR_DRV 0x8000
+
#define nes_vlan_rx vlan_hwaccel_receive_skb
#define nes_netif_rx netif_receive_skb
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index a282031d15c..9687c397ce1 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
} else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
nesadapter->virtwq = 1;
}
+ if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
+ nesadapter->send_term_ok = 1;
+
nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
(u32)((u8)eeprom_data);
@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
}
if (cqp_request == NULL) {
- cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL);
+ cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
if (cqp_request) {
cqp_request->dynamic = 1;
INIT_LIST_HEAD(&cqp_request->list);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 21e0fd336cf..a680c42d6e8 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
*/
static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
{
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct net_device *netdev = nesvnic->netdev;
+
memset(props, 0, sizeof(*props));
- props->max_mtu = IB_MTU_2048;
- props->active_mtu = IB_MTU_2048;
+ props->max_mtu = IB_MTU_4096;
+
+ if (netdev->mtu >= 4096)
+ props->active_mtu = IB_MTU_4096;
+ else if (netdev->mtu >= 2048)
+ props->active_mtu = IB_MTU_2048;
+ else if (netdev->mtu >= 1024)
+ props->active_mtu = IB_MTU_1024;
+ else if (netdev->mtu >= 512)
+ props->active_mtu = IB_MTU_512;
+ else
+ props->active_mtu = IB_MTU_256;
+
props->lid = 1;
props->lmc = 0;
props->sm_lid = 0;
props->sm_sl = 0;
- props->state = IB_PORT_ACTIVE;
+ if (nesvnic->linkup)
+ props->state = IB_PORT_ACTIVE;
+ else
+ props->state = IB_PORT_DOWN;
props->phys_state = 0;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
@@ -1506,12 +1523,45 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
/**
+ * nes_clean_cq
+ */
+static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
+{
+ u32 cq_head;
+ u32 lo;
+ u32 hi;
+ u64 u64temp;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&nescq->lock, flags);
+
+ cq_head = nescq->hw_cq.cq_head;
+ while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
+ rmb();
+ lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+ hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
+ u64temp = (((u64)hi) << 32) | ((u64)lo);
+ u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
+ if (u64temp == (u64)(unsigned long)nesqp) {
+ /* Zero the context value so cqe will be ignored */
+ nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
+ nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
+ }
+
+ if (++cq_head >= nescq->hw_cq.cq_size)
+ cq_head = 0;
+ }
+
+ spin_unlock_irqrestore(&nescq->lock, flags);
+}
+
+
+/**
* nes_destroy_qp
*/
static int nes_destroy_qp(struct ib_qp *ibqp)
{
struct nes_qp *nesqp = to_nesqp(ibqp);
- /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
struct nes_ucontext *nes_ucontext;
struct ib_qp_attr attr;
struct iw_cm_id *cm_id;
@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
}
-
if (nesqp->user_mode) {
if ((ibqp->uobject)&&(ibqp->uobject->context)) {
nes_ucontext = to_nesucontext(ibqp->uobject->context);
@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
}
if (nesqp->pbl_pbase)
kunmap(nesqp->page);
+ } else {
+ /* Clean any pending completions from the cq(s) */
+ if (nesqp->nesscq)
+ nes_clean_cq(nesqp, nesqp->nesscq);
+
+ if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
+ nes_clean_cq(nesqp, nesqp->nesrcq);
}
nes_rem_ref(&nesqp->ibqp);
@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
* nes_hw_modify_qp
*/
int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
- u32 next_iwarp_state, u32 wait_completion)
+ u32 next_iwarp_state, u32 termlen, u32 wait_completion)
{
struct nes_hw_cqp_wqe *cqp_wqe;
/* struct iw_cm_id *cm_id = nesqp->cm_id; */
@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
+ /* If sending a terminate message, fill in the length (in words) */
+ if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
+ !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
+ termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
+ }
+
atomic_set(&cqp_request->refcount, 2);
nes_post_cqp_request(nesdev, cqp_request);
@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
nesqp->hwqp.qp_id);
+ if (nesqp->term_flags)
+ del_timer(&nesqp->terminate_timer);
+
next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
/* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
if (nesqp->hte_added) {
@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (issue_modify_qp) {
nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
- ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1);
+ ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
if (ret)
nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
" failed for QP%u.\n",
@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
head = nesqp->hwqp.sq_head;
while (ib_wr) {
+ /* Check for QP error */
+ if (nesqp->term_flags) {
+ err = -EINVAL;
+ break;
+ }
+
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
err = -EINVAL;
@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
head = nesqp->hwqp.rq_head;
while (ib_wr) {
+ /* Check for QP error */
+ if (nesqp->term_flags) {
+ err = -EINVAL;
+ break;
+ }
+
if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
err = -EINVAL;
break;
@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
u64 u64temp;
u64 wrid;
- /* u64 u64temp; */
unsigned long flags = 0;
struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
struct nes_qp *nesqp;
struct nes_hw_cqe cqe;
u32 head;
- u32 wq_tail;
+ u32 wq_tail = 0;
u32 cq_size;
u32 cqe_count = 0;
u32 wqe_index;
u32 u32temp;
- /* u32 counter; */
+ u32 move_cq_head = 1;
+ u32 err_code;
nes_debug(NES_DBG_CQ, "\n");
@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
cq_size = nescq->hw_cq.cq_size;
while (cqe_count < num_entries) {
- if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
- NES_CQE_VALID) {
- /*
- * Make sure we read CQ entry contents *after*
- * we've checked the valid bit.
- */
- rmb();
-
- cqe = nescq->hw_cq.cq_vbase[head];
- nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
- u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
- wqe_index = u32temp &
- (nesdev->nesadapter->max_qp_wr - 1);
- u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
- /* parse CQE, get completion context from WQE (either rq or sq */
- u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
- ((u64)u32temp);
- nesqp = *((struct nes_qp **)&u64temp);
+ if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
+ NES_CQE_VALID) == 0)
+ break;
+
+ /*
+ * Make sure we read CQ entry contents *after*
+ * we've checked the valid bit.
+ */
+ rmb();
+
+ cqe = nescq->hw_cq.cq_vbase[head];
+ u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+ wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
+ u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
+ /* parse CQE, get completion context from WQE (either rq or sq) */
+ u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
+ ((u64)u32temp);
+
+ if (u64temp) {
+ nesqp = (struct nes_qp *)(unsigned long)u64temp;
memset(entry, 0, sizeof *entry);
if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
entry->status = IB_WC_SUCCESS;
} else {
- entry->status = IB_WC_WR_FLUSH_ERR;
+ err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
+ if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
+ entry->status = err_code & 0x0000ffff;
+
+ /* The rest of the cqe's will be marked as flushed */
+ nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
+ cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
+ NES_IWARP_CQE_MINOR_FLUSH);
+ } else
+ entry->status = IB_WC_WR_FLUSH_ERR;
}
entry->qp = &nesqp->ibqp;
@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
if (nesqp->skip_lsmm) {
nesqp->skip_lsmm = 0;
- wq_tail = nesqp->hwqp.sq_tail++;
+ nesqp->hwqp.sq_tail++;
}
/* Working on a SQ Completion*/
- wq_tail = wqe_index;
- nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
- wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
- ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
- entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
- switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
case NES_IWARP_SQ_OP_RDMAW:
nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
case NES_IWARP_SQ_OP_RDMAR:
nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
entry->opcode = IB_WC_RDMA_READ;
- entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
break;
case NES_IWARP_SQ_OP_SENDINV:
@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
entry->opcode = IB_WC_SEND;
break;
}
+
+ nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
+ if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
+ move_cq_head = 0;
+ wq_tail = nesqp->hwqp.sq_tail;
+ }
} else {
/* Working on a RQ Completion*/
- wq_tail = wqe_index;
- nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
- wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
- ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+ wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
+ ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
entry->opcode = IB_WC_RECV;
+
+ nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
+ if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
+ move_cq_head = 0;
+ wq_tail = nesqp->hwqp.rq_tail;
+ }
}
+
entry->wr_id = wrid;
+ entry++;
+ cqe_count++;
+ }
+ if (move_cq_head) {
+ nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
if (++head >= cq_size)
head = 0;
- cqe_count++;
nescq->polled_completions++;
+
if ((nescq->polled_completions > (cq_size / 2)) ||
(nescq->polled_completions == 255)) {
nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
- " are pending %u of %u.\n",
- nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
+ " are pending %u of %u.\n",
+ nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
nes_write32(nesdev->regs+NES_CQE_ALLOC,
- nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
+ nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
nescq->polled_completions = 0;
}
- entry++;
- } else
- break;
+ } else {
+ /* Update the wqe index and set status to flush */
+ wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+ wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
+ nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
+ cpu_to_le32(wqe_index);
+ move_cq_head = 1; /* ready for next pass */
+ }
}
if (nescq->polled_completions) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 41c07f29f7c..89822d75f82 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -40,6 +40,10 @@ struct nes_device;
#define NES_MAX_USER_DB_REGIONS 4096
#define NES_MAX_USER_WQ_REGIONS 4096
+#define NES_TERM_SENT 0x01
+#define NES_TERM_RCVD 0x02
+#define NES_TERM_DONE 0x04
+
struct nes_ucontext {
struct ib_ucontext ibucontext;
struct nes_device *nesdev;
@@ -119,6 +123,11 @@ struct nes_wq {
spinlock_t lock;
};
+struct disconn_work {
+ struct work_struct work;
+ struct nes_qp *nesqp;
+};
+
struct iw_cm_id;
struct ietf_mpa_frame;
@@ -127,7 +136,6 @@ struct nes_qp {
void *allocated_buffer;
struct iw_cm_id *cm_id;
struct workqueue_struct *wq;
- struct work_struct disconn_work;
struct nes_cq *nesscq;
struct nes_cq *nesrcq;
struct nes_pd *nespd;
@@ -155,9 +163,13 @@ struct nes_qp {
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
+ struct timer_list terminate_timer;
+ enum ib_event_type terminate_eventtype;
wait_queue_head_t kick_waitq;
u16 in_disconnect;
u16 private_data_len;
+ u16 term_sq_flush_code;
+ u16 term_rq_flush_code;
u8 active_conn;
u8 skip_lsmm;
u8 user_mode;
@@ -165,7 +177,7 @@ struct nes_qp {
u8 hw_iwarp_state;
u8 flush_issued;
u8 hw_tcp_state;
- u8 disconn_pending;
+ u8 term_flags;
u8 destroyed;
};
#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 181b1f32325..8f4b4fca2a1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -31,7 +31,6 @@
*/
#include <rdma/ib_cm.h>
-#include <rdma/ib_cache.h>
#include <net/dst.h>
#include <net/icmp.h>
#include <linux/icmpv6.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index e7e5adf84e8..e35f4a0ea9d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -36,7 +36,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <rdma/ib_cache.h>
#include <linux/ip.h>
#include <linux/tcp.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e319d91f60a..2bf5116deec 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
skb_queue_len(&neigh->queue));
goto err_drop;
}
- } else
+ } else {
+ spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
+ return;
+ }
} else {
neigh->ah = NULL;
@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
ipoib_dbg(priv, "Send unicast ARP to %04x\n",
be16_to_cpu(path->pathrec.dlid));
+ spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+ return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
/* put pseudoheader back on for next time */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index a0e97532e71..25874fc680c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -720,7 +720,9 @@ out:
}
}
+ spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
+ return;
}
unlock:
@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
+static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
+ const u8 *broadcast)
+{
+ if (addrlen != INFINIBAND_ALEN)
+ return 0;
+ /* reserved QPN, prefix, scope */
+ if (memcmp(addr, broadcast, 6))
+ return 0;
+ /* signature lower, pkey */
+ if (memcmp(addr + 7, broadcast + 7, 3))
+ return 0;
+ return 1;
+}
+
void ipoib_mcast_restart_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work)
for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
union ib_gid mgid;
+ if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
+ mclist->dmi_addrlen,
+ dev->broadcast))
+ continue;
+
memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
mcast = __ipoib_mcast_find(dev, &mgid);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 4cfd084fa89..9a1d55b74d7 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -456,8 +456,11 @@ static int joydev_ioctl_common(struct joydev *joydev,
unsigned int cmd, void __user *argp)
{
struct input_dev *dev = joydev->handle.dev;
+ size_t len;
int i, j;
+ const char *name;
+ /* Process fixed-sized commands. */
switch (cmd) {
case JS_SET_CAL:
@@ -499,9 +502,22 @@ static int joydev_ioctl_common(struct joydev *joydev,
return copy_to_user(argp, joydev->corr,
sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
- case JSIOCSAXMAP:
- if (copy_from_user(joydev->abspam, argp,
- sizeof(__u8) * (ABS_MAX + 1)))
+ }
+
+ /*
+ * Process variable-sized commands (the axis and button map commands
+ * are considered variable-sized to decouple them from the values of
+ * ABS_MAX and KEY_MAX).
+ */
+ switch (cmd & ~IOCSIZE_MASK) {
+
+ case (JSIOCSAXMAP & ~IOCSIZE_MASK):
+ len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam));
+ /*
+ * FIXME: we should not copy into our axis map before
+ * validating the data.
+ */
+ if (copy_from_user(joydev->abspam, argp, len))
return -EFAULT;
for (i = 0; i < joydev->nabs; i++) {
@@ -511,13 +527,17 @@ static int joydev_ioctl_common(struct joydev *joydev,
}
return 0;
- case JSIOCGAXMAP:
- return copy_to_user(argp, joydev->abspam,
- sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0;
-
- case JSIOCSBTNMAP:
- if (copy_from_user(joydev->keypam, argp,
- sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)))
+ case (JSIOCGAXMAP & ~IOCSIZE_MASK):
+ len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam));
+ return copy_to_user(argp, joydev->abspam, len) ? -EFAULT : 0;
+
+ case (JSIOCSBTNMAP & ~IOCSIZE_MASK):
+ len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam));
+ /*
+ * FIXME: we should not copy into our keymap before
+ * validating the data.
+ */
+ if (copy_from_user(joydev->keypam, argp, len))
return -EFAULT;
for (i = 0; i < joydev->nkey; i++) {
@@ -529,25 +549,19 @@ static int joydev_ioctl_common(struct joydev *joydev,
return 0;
- case JSIOCGBTNMAP:
- return copy_to_user(argp, joydev->keypam,
- sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0;
+ case (JSIOCGBTNMAP & ~IOCSIZE_MASK):
+ len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam));
+ return copy_to_user(argp, joydev->keypam, len) ? -EFAULT : 0;
- default:
- if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) {
- int len;
- const char *name = dev->name;
-
- if (!name)
- return 0;
- len = strlen(name) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- if (copy_to_user(argp, name, len))
- return -EFAULT;
- return len;
- }
+ case JSIOCGNAME(0):
+ name = dev->name;
+ if (!name)
+ return 0;
+
+ len = min_t(size_t, _IOC_SIZE(cmd), strlen(name) + 1);
+ return copy_to_user(argp, name, len) ? -EFAULT : len;
}
+
return -EINVAL;
}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index baabf830264..f6c688cae33 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -74,6 +74,7 @@ static struct iforce_device iforce_device[] = {
{ 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
+ { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
{ 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index f83185aeb51..9f289d8f52c 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -223,6 +223,7 @@ static struct usb_device_id iforce_usb_ids [] = {
{ USB_DEVICE(0x05ef, 0x8884) }, /* AVB Mag Turbo Force */
{ USB_DEVICE(0x05ef, 0x8888) }, /* AVB Top Shot FFB Racing Wheel */
{ USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */
+ { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */
{ USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */
{ USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */
{ USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 95fe0452dae..6c6a09b1c0f 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -880,6 +880,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
};
/*
+ * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
+ * release for their volume buttons
+ */
+static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
+ 0xae, 0xb0, -1U
+};
+
+/*
* Samsung NC10,NC20 with Fn+F? key release not working
*/
static unsigned int atkbd_samsung_forced_release_keys[] = {
@@ -1537,6 +1545,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
.driver_data = atkbd_hp_zv6100_forced_release_keys,
},
{
+ .ident = "HP Presario R4000",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
+ },
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_hp_r4000_forced_release_keys,
+ },
+ {
+ .ident = "HP Presario R4100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
+ },
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_hp_r4000_forced_release_keys,
+ },
+ {
+ .ident = "HP Presario R4200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
+ },
+ .callback = atkbd_setup_forced_release,
+ .driver_data = atkbd_hp_r4000_forced_release_keys,
+ },
+ {
.ident = "Inventec Symphony",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"),
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ae04d8a494e..ccbf23ece8e 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -382,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
},
},
+ {
+ .ident = "Acer Aspire 5536",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ },
+ },
{ }
};
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index a9d5031b855..ea30c983a33 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -388,6 +388,32 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
return result;
}
+static int wacom_query_tablet_data(struct usb_interface *intf)
+{
+ unsigned char *rep_data;
+ int limit = 0;
+ int error;
+
+ rep_data = kmalloc(2, GFP_KERNEL);
+ if (!rep_data)
+ return -ENOMEM;
+
+ do {
+ rep_data[0] = 2;
+ rep_data[1] = 2;
+ error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
+ 2, rep_data, 2);
+ if (error >= 0)
+ error = usb_get_report(intf,
+ WAC_HID_FEATURE_REPORT, 2,
+ rep_data, 2);
+ } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
+
+ kfree(rep_data);
+
+ return error < 0 ? error : 0;
+}
+
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -398,7 +424,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
struct wacom_features *features;
struct input_dev *input_dev;
int error = -ENOMEM;
- char rep_data[2], limit = 0;
struct hid_descriptor *hid_desc;
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
@@ -489,20 +514,10 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
/*
* Ask the tablet to report tablet data if it is not a Tablet PC.
- * Repeat until it succeeds
+ * Note that if query fails it is not a hard failure.
*/
- if (wacom_wac->features->type != TABLETPC) {
- do {
- rep_data[0] = 2;
- rep_data[1] = 2;
- error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
- 2, rep_data, 2);
- if (error >= 0)
- error = usb_get_report(intf,
- WAC_HID_FEATURE_REPORT, 2,
- rep_data, 2);
- } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
- }
+ if (wacom_wac->features->type != TABLETPC)
+ wacom_query_tablet_data(intf);
usb_set_intfdata(intf, wacom);
return 0;
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 6954f550010..3a7a58222f8 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -170,11 +170,11 @@ static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr);
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
- if (isr & UCB_IE_TSPX) {
+ if (isr & UCB_IE_TSPX)
ucb1400_ts_irq_disable(ucb->ac97);
- enable_irq(ucb->irq);
- } else
- printk(KERN_ERR "ucb1400: unexpected IE_STATUS = %#x\n", isr);
+ else
+ dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr);
+ enable_irq(ucb->irq);
}
static int ucb1400_ts_thread(void *_ucb)
@@ -345,6 +345,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
static int ucb1400_ts_probe(struct platform_device *dev)
{
int error, x_res, y_res;
+ u16 fcsr;
struct ucb1400_ts *ucb = dev->dev.platform_data;
ucb->ts_idev = input_allocate_device();
@@ -382,6 +383,14 @@ static int ucb1400_ts_probe(struct platform_device *dev)
ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ /*
+ * Enable ADC filter to prevent horrible jitter on Colibri.
+ * This also further reduces jitter on boards where ADCSYNC
+ * pin is connected.
+ */
+ fcsr = ucb1400_reg_read(ucb->ac97, UCB_FCSR);
+ ucb1400_reg_write(ucb->ac97, UCB_FCSR, fcsr | UCB_FCSR_AVE);
+
ucb1400_adc_enable(ucb->ac97);
x_res = ucb1400_ts_read_xres(ucb);
y_res = ucb1400_ts_read_yres(ucb);
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index a247ae63374..1bc5db4ece0 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -117,6 +117,9 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
gpio_data->inverted = !!inverted;
+ /* After inverting, we need to update the LED. */
+ schedule_work(&gpio_data->work);
+
return n;
}
static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show,
@@ -146,20 +149,26 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
return -EINVAL;
}
+ if (gpio_data->gpio == gpio)
+ return n;
+
if (!gpio) {
- free_irq(gpio_to_irq(gpio_data->gpio), led);
+ if (gpio_data->gpio != 0)
+ free_irq(gpio_to_irq(gpio_data->gpio), led);
+ gpio_data->gpio = 0;
return n;
}
- if (gpio_data->gpio > 0 && gpio_data->gpio != gpio)
- free_irq(gpio_to_irq(gpio_data->gpio), led);
-
- gpio_data->gpio = gpio;
ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq,
IRQF_SHARED | IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING, "ledtrig-gpio", led);
- if (ret)
+ if (ret) {
dev_err(dev, "request_irq failed with error %d\n", ret);
+ } else {
+ if (gpio_data->gpio != 0)
+ free_irq(gpio_to_irq(gpio_data->gpio), led);
+ gpio_data->gpio = gpio;
+ }
return ret ? ret : n;
}
@@ -211,7 +220,8 @@ static void gpio_trig_deactivate(struct led_classdev *led)
device_remove_file(led->dev, &dev_attr_inverted);
device_remove_file(led->dev, &dev_attr_desired_brightness);
flush_work(&gpio_data->work);
- free_irq(gpio_to_irq(gpio_data->gpio),led);
+ if (gpio_data->gpio != 0)
+ free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
}
}
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
index 4d686c0bdea..9ab5b0c34f0 100644
--- a/drivers/macintosh/via-maciisi.c
+++ b/drivers/macintosh/via-maciisi.c
@@ -288,7 +288,7 @@ static void maciisi_sync(struct adb_request *req)
}
/* This could be BAD... when the ADB controller doesn't respond
* for this long, it's probably not coming back :-( */
- if(count >= 50) /* Hopefully shouldn't happen */
+ if (count > 50) /* Hopefully shouldn't happen */
printk(KERN_ERR "maciisi_send_request: poll timed out!\n");
}
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 3710ff88fc1..556acff3952 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store,
*/
chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
+ return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
+ error);
+}
+
+int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+ unsigned long chunk_size_ulong,
+ char **error)
+{
/* Check chunk_size is a power of 2 */
if (!is_power_of_2(chunk_size_ulong)) {
*error = "Chunk size is not a power of 2";
@@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store,
return -EINVAL;
}
+ if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
+ *error = "Chunk size is too high";
+ return -EINVAL;
+ }
+
store->chunk_size = chunk_size_ulong;
store->chunk_mask = chunk_size_ulong - 1;
store->chunk_shift = ffs(chunk_size_ulong) - 1;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 2442c8c0789..812c71872ba 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
int dm_exception_store_type_register(struct dm_exception_store_type *type);
int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
+int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+ unsigned long chunk_size_ulong,
+ char **error);
+
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
unsigned *args_used,
struct dm_exception_store **store);
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index e69b9656099..652bd33109e 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -21,6 +21,7 @@ struct log_c {
struct dm_target *ti;
uint32_t region_size;
region_t region_count;
+ uint64_t luid;
char uuid[DM_UUID_LEN];
char *usr_argv_str;
@@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid,
* restored.
*/
retry:
- r = dm_consult_userspace(uuid, request_type, data,
+ r = dm_consult_userspace(uuid, lc->luid, request_type, data,
data_size, rdata, rdata_size);
if (r != -ESRCH)
@@ -74,14 +75,15 @@ retry:
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(2*HZ);
DMWARN("Attempting to contact userspace log server...");
- r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str,
+ r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
+ lc->usr_argv_str,
strlen(lc->usr_argv_str) + 1,
NULL, NULL);
if (!r)
break;
}
DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
- r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL,
+ r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
0, NULL, NULL);
if (!r)
goto retry;
@@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti,
return -ENOMEM;
}
- for (i = 0, str_size = 0; i < argc; i++)
- str_size += sprintf(str + str_size, "%s ", argv[i]);
- str_size += sprintf(str + str_size, "%llu",
- (unsigned long long)ti->len);
+ str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
+ for (i = 0; i < argc; i++)
+ str_size += sprintf(str + str_size, " %s", argv[i]);
*ctr_str = str;
return str_size;
@@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
return -ENOMEM;
}
+ /* The ptr value is sufficient for local unique id */
+ lc->luid = (uint64_t)lc;
+
lc->ti = ti;
if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
@@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
}
/* Send table string */
- r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR,
+ r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
ctr_str, str_size, NULL, NULL);
if (r == -ESRCH) {
@@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
/* Since the region size does not change, get it now */
rdata_size = sizeof(rdata);
- r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE,
+ r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
NULL, 0, (char *)&rdata, &rdata_size);
if (r) {
@@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log)
int r;
struct log_c *lc = log->context;
- r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR,
+ r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
NULL, 0,
NULL, NULL);
@@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log)
int r;
struct log_c *lc = log->context;
- r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND,
+ r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
NULL, 0,
NULL, NULL);
@@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log)
int r;
struct log_c *lc = log->context;
- r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND,
+ r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
NULL, 0,
NULL, NULL);
@@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log)
struct log_c *lc = log->context;
lc->in_sync_hint = 0;
- r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME,
+ r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
NULL, 0,
NULL, NULL);
@@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
char *result, unsigned maxlen)
{
int r = 0;
+ char *table_args;
size_t sz = (size_t)maxlen;
struct log_c *lc = log->context;
@@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
break;
case STATUSTYPE_TABLE:
sz = 0;
- DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1,
- lc->uuid, lc->usr_argv_str);
+ table_args = strchr(lc->usr_argv_str, ' ');
+ BUG_ON(!table_args); /* There will always be a ' ' */
+ table_args++;
+
+ DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc,
+ lc->uuid, table_args);
break;
}
return (r) ? 0 : (int)sz;
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 0ca1ee768a1..ba0edad2d04 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -108,7 +108,7 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
*(pkg->data_size) = 0;
} else if (tfr->data_size > *(pkg->data_size)) {
DMERR("Insufficient space to receive package [%u] "
- "(%u vs %lu)", tfr->request_type,
+ "(%u vs %zu)", tfr->request_type,
tfr->data_size, *(pkg->data_size));
*(pkg->data_size) = 0;
@@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data)
/**
* dm_consult_userspace
- * @uuid: log's uuid (must be DM_UUID_LEN in size)
+ * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
+ * @luid: log's local unique identifier
* @request_type: found in include/linux/dm-log-userspace.h
* @data: data to tx to the server
* @data_size: size of data in bytes
@@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data)
*
* Returns: 0 on success, -EXXX on failure
**/
-int dm_consult_userspace(const char *uuid, int request_type,
+int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
char *data, size_t data_size,
char *rdata, size_t *rdata_size)
{
@@ -190,6 +191,7 @@ resend:
memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+ tfr->luid = luid;
tfr->seq = dm_ulog_seq++;
/*
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
index c26d8e4e271..04ee874f915 100644
--- a/drivers/md/dm-log-userspace-transfer.h
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -11,7 +11,7 @@
int dm_ulog_tfr_init(void);
void dm_ulog_tfr_exit(void);
-int dm_consult_userspace(const char *uuid, int request_type,
+int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
char *data, size_t data_size,
char *rdata, size_t *rdata_size);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9726577cde4..33f179e66bf 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
*/
dm_rh_inc_pending(ms->rh, &sync);
dm_rh_inc_pending(ms->rh, &nosync);
- ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
+
+ /*
+ * If the flush fails on a previous call and succeeds here,
+ * we must not reset the log_failure variable. We need
+ * userspace interaction to do that.
+ */
+ ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
/*
* Dispatch io.
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 6e3fe4f1493..d5b2e08750d 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -106,6 +106,13 @@ struct pstore {
void *zero_area;
/*
+ * An area used for header. The header can be written
+ * concurrently with metadata (when invalidating the snapshot),
+ * so it needs a separate buffer.
+ */
+ void *header_area;
+
+ /*
* Used to keep track of which metadata area the data in
* 'chunk' refers to.
*/
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps)
*/
ps->area = vmalloc(len);
if (!ps->area)
- return r;
+ goto err_area;
ps->zero_area = vmalloc(len);
- if (!ps->zero_area) {
- vfree(ps->area);
- return r;
- }
+ if (!ps->zero_area)
+ goto err_zero_area;
memset(ps->zero_area, 0, len);
+ ps->header_area = vmalloc(len);
+ if (!ps->header_area)
+ goto err_header_area;
+
return 0;
+
+err_header_area:
+ vfree(ps->zero_area);
+
+err_zero_area:
+ vfree(ps->area);
+
+err_area:
+ return r;
}
static void free_area(struct pstore *ps)
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps)
if (ps->zero_area)
vfree(ps->zero_area);
ps->zero_area = NULL;
+
+ if (ps->header_area)
+ vfree(ps->header_area);
+ ps->header_area = NULL;
}
struct mdata_req {
@@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work)
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
-static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
+ int metadata)
{
struct dm_io_region where = {
.bdev = ps->store->cow->bdev,
@@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
struct dm_io_request io_req = {
.bi_rw = rw,
.mem.type = DM_IO_VMA,
- .mem.ptr.vma = ps->area,
+ .mem.ptr.vma = area,
.client = ps->io_client,
.notify.fn = NULL,
};
@@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw)
chunk = area_location(ps, ps->current_area);
- r = chunk_io(ps, chunk, rw, 0);
+ r = chunk_io(ps, ps->area, chunk, rw, 0);
if (r)
return r;
@@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps)
static int zero_disk_area(struct pstore *ps, chunk_t area)
{
- struct dm_io_region where = {
- .bdev = ps->store->cow->bdev,
- .sector = ps->store->chunk_size * area_location(ps, area),
- .count = ps->store->chunk_size,
- };
- struct dm_io_request io_req = {
- .bi_rw = WRITE,
- .mem.type = DM_IO_VMA,
- .mem.ptr.vma = ps->zero_area,
- .client = ps->io_client,
- .notify.fn = NULL,
- };
-
- return dm_io(&io_req, 1, &where, NULL);
+ return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
}
static int read_header(struct pstore *ps, int *new_snapshot)
@@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
struct disk_header *dh;
chunk_t chunk_size;
int chunk_size_supplied = 1;
+ char *chunk_err;
/*
* Use default chunk size (or hardsect_size, if larger) if none supplied
@@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot)
if (r)
return r;
- r = chunk_io(ps, 0, READ, 1);
+ r = chunk_io(ps, ps->header_area, 0, READ, 1);
if (r)
goto bad;
- dh = (struct disk_header *) ps->area;
+ dh = ps->header_area;
if (le32_to_cpu(dh->magic) == 0) {
*new_snapshot = 1;
@@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot)
ps->version = le32_to_cpu(dh->version);
chunk_size = le32_to_cpu(dh->chunk_size);
- if (!chunk_size_supplied || ps->store->chunk_size == chunk_size)
+ if (ps->store->chunk_size == chunk_size)
return 0;
- DMWARN("chunk size %llu in device metadata overrides "
- "table chunk size of %llu.",
- (unsigned long long)chunk_size,
- (unsigned long long)ps->store->chunk_size);
+ if (chunk_size_supplied)
+ DMWARN("chunk size %llu in device metadata overrides "
+ "table chunk size of %llu.",
+ (unsigned long long)chunk_size,
+ (unsigned long long)ps->store->chunk_size);
/* We had a bogus chunk_size. Fix stuff up. */
free_area(ps);
- ps->store->chunk_size = chunk_size;
- ps->store->chunk_mask = chunk_size - 1;
- ps->store->chunk_shift = ffs(chunk_size) - 1;
+ r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
+ &chunk_err);
+ if (r) {
+ DMERR("invalid on-disk chunk size %llu: %s.",
+ (unsigned long long)chunk_size, chunk_err);
+ return r;
+ }
r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
ps->io_client);
@@ -351,15 +367,15 @@ static int write_header(struct pstore *ps)
{
struct disk_header *dh;
- memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
+ memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
- dh = (struct disk_header *) ps->area;
+ dh = ps->header_area;
dh->magic = cpu_to_le32(SNAP_MAGIC);
dh->valid = cpu_to_le32(ps->valid);
dh->version = cpu_to_le32(ps->version);
dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
- return chunk_io(ps, 0, WRITE, 1);
+ return chunk_io(ps, ps->header_area, 0, WRITE, 1);
}
/*
@@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store,
ps->valid = 1;
ps->version = SNAPSHOT_DISK_VERSION;
ps->area = NULL;
+ ps->zero_area = NULL;
+ ps->header_area = NULL;
ps->next_free = 2; /* skipping the header and first area */
ps->current_committed = 0;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d573165cd2b..57f1bf7f3b7 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
return 0;
}
+static int snapshot_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct dm_snapshot *snap = ti->private;
+
+ return fn(ti, snap->origin, 0, ti->len, data);
+}
+
+
/*-----------------------------------------------------------------
* Origin methods
*---------------------------------------------------------------*/
@@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
return 0;
}
+static int origin_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct dm_dev *dev = ti->private;
+
+ return fn(ti, dev, 0, ti->len, data);
+}
+
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
.map = origin_map,
.resume = origin_resume,
.status = origin_status,
+ .iterate_devices = origin_iterate_devices,
};
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = {
.end_io = snapshot_end_io,
.resume = snapshot_resume,
.status = snapshot_status,
+ .iterate_devices = snapshot_iterate_devices,
};
static int __init dm_snapshot_init(void)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4e0e5937e42..3e563d25173 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti,
return ret;
}
+static void stripe_io_hints(struct dm_target *ti,
+ struct queue_limits *limits)
+{
+ struct stripe_c *sc = ti->private;
+ unsigned chunk_size = (sc->chunk_mask + 1) << 9;
+
+ blk_limits_io_min(limits, chunk_size);
+ limits->io_opt = chunk_size * sc->stripes;
+}
+
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
@@ -339,6 +349,7 @@ static struct target_type stripe_target = {
.end_io = stripe_end_io,
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
+ .io_hints = stripe_io_hints,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d952b344191..1a6cb3c7822 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
}
/*
- * If possible, this checks an area of a destination device is valid.
+ * If possible, this checks an area of a destination device is invalid.
*/
-static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
@@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
char b[BDEVNAME_SIZE];
if (!dev_size)
- return 1;
+ return 0;
if ((start >= dev_size) || (start + len > dev_size)) {
- DMWARN("%s: %s too small for target",
- dm_device_name(ti->table->md), bdevname(bdev, b));
- return 0;
+ DMWARN("%s: %s too small for target: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+ (unsigned long long)start,
+ (unsigned long long)len,
+ (unsigned long long)dev_size);
+ return 1;
}
if (logical_block_size_sectors <= 1)
- return 1;
+ return 0;
if (start & (logical_block_size_sectors - 1)) {
DMWARN("%s: start=%llu not aligned to h/w "
- "logical block size %hu of %s",
+ "logical block size %u of %s",
dm_device_name(ti->table->md),
(unsigned long long)start,
limits->logical_block_size, bdevname(bdev, b));
- return 0;
+ return 1;
}
if (len & (logical_block_size_sectors - 1)) {
DMWARN("%s: len=%llu not aligned to h/w "
- "logical block size %hu of %s",
+ "logical block size %u of %s",
dm_device_name(ti->table->md),
(unsigned long long)len,
limits->logical_block_size, bdevname(bdev, b));
- return 0;
+ return 1;
}
- return 1;
+ return 0;
}
/*
@@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
}
if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
- DMWARN("%s: target device %s is misaligned",
- dm_device_name(ti->table->md), bdevname(bdev, b));
+ DMWARN("%s: target device %s is misaligned: "
+ "physical_block_size=%u, logical_block_size=%u, "
+ "alignment_offset=%u, start=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+ q->limits.physical_block_size,
+ q->limits.logical_block_size,
+ q->limits.alignment_offset,
+ (unsigned long long) start << 9);
+
/*
* Check if merge fn is supported.
@@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
if (remaining) {
DMWARN("%s: table line %u (start sect %llu len %llu) "
- "not aligned to h/w logical block size %hu",
+ "not aligned to h/w logical block size %u",
dm_device_name(table->md), i,
(unsigned long long) ti->begin,
(unsigned long long) ti->len,
@@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table,
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
+ /* Set I/O hints portion of queue limits */
+ if (ti->type->io_hints)
+ ti->type->io_hints(ti, &ti_limits);
+
/*
* Check each device area is consistent with the target's
* overall queue limits.
*/
- if (!ti->type->iterate_devices(ti, device_area_is_valid,
- &ti_limits))
+ if (ti->type->iterate_devices(ti, device_area_is_invalid,
+ &ti_limits))
return -EINVAL;
combine_limits:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8a311ea0d44..b4845b14740 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue)
dm_put(md);
}
+static void free_rq_clone(struct request *clone)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ blk_rq_unprep_clone(clone);
+ free_rq_tio(tio);
+}
+
static void dm_unprep_request(struct request *rq)
{
struct request *clone = rq->special;
- struct dm_rq_target_io *tio = clone->end_io_data;
rq->special = NULL;
rq->cmd_flags &= ~REQ_DONTPREP;
- blk_rq_unprep_clone(clone);
- free_rq_tio(tio);
+ free_rq_clone(clone);
}
/*
@@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error)
rq->sense_len = clone->sense_len;
}
- BUG_ON(clone->bio);
- free_rq_tio(tio);
+ free_rq_clone(clone);
blk_end_request_all(rq, error);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 103f2d33fa8..9dd872000ce 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4364,6 +4364,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
if (mode == 1)
set_disk_ro(disk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ err = 0;
}
out:
mutex_unlock(&mddev->open_mutex);
diff --git a/drivers/media/common/tuners/qt1010.c b/drivers/media/common/tuners/qt1010.c
index 825aa1412e6..9f5dba244cb 100644
--- a/drivers/media/common/tuners/qt1010.c
+++ b/drivers/media/common/tuners/qt1010.c
@@ -64,24 +64,22 @@ static int qt1010_writereg(struct qt1010_priv *priv, u8 reg, u8 val)
/* dump all registers */
static void qt1010_dump_regs(struct qt1010_priv *priv)
{
- char buf[52], buf2[4];
u8 reg, val;
for (reg = 0; ; reg++) {
if (reg % 16 == 0) {
if (reg)
- printk("%s\n", buf);
- sprintf(buf, "%02x: ", reg);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG "%02x:", reg);
}
if (qt1010_readreg(priv, reg, &val) == 0)
- sprintf(buf2, "%02x ", val);
+ printk(KERN_CONT " %02x", val);
else
- strcpy(buf2, "-- ");
- strcat(buf, buf2);
+ printk(KERN_CONT " --");
if (reg == 0x2f)
break;
}
- printk("%s\n", buf);
+ printk(KERN_CONT "\n");
}
static int qt1010_set_params(struct dvb_frontend *fe,
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index aa20ce8cc66..f270e605da8 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -1119,8 +1119,8 @@ static int xc2028_sleep(struct dvb_frontend *fe)
struct xc2028_data *priv = fe->tuner_priv;
int rc = 0;
- /* Avoid firmware reload on slow devices */
- if (no_poweroff)
+ /* Avoid firmware reload on slow devices or if PM disabled */
+ if (no_poweroff || priv->ctrl.disable_power_mgmt)
return 0;
tuner_dbg("Putting xc2028/3028 into poweroff mode.\n");
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index 19de7928a74..a90c35d50ad 100644
--- a/drivers/media/common/tuners/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -38,6 +38,7 @@ struct xc2028_ctrl {
unsigned int input1:1;
unsigned int vhfbw7:1;
unsigned int uhfbw8:1;
+ unsigned int disable_power_mgmt:1;
unsigned int demod;
enum firmware_type type:2;
};
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 4cb31e7c13c..26690dfb326 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -81,7 +81,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req)
switch (req->cmd) {
case GET_CONFIG:
- case BOOT:
case READ_MEMORY:
case RECONNECT_USB:
case GET_IR_CODE:
@@ -100,6 +99,7 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req)
case WRITE_VIRTUAL_MEMORY:
case COPY_FIRMWARE:
case DOWNLOAD_FIRMWARE:
+ case BOOT:
break;
default:
err("unknown command:%d", req->cmd);
diff --git a/drivers/media/dvb/frontends/cx22700.c b/drivers/media/dvb/frontends/cx22700.c
index ace5cb17165..fbd838eca26 100644
--- a/drivers/media/dvb/frontends/cx22700.c
+++ b/drivers/media/dvb/frontends/cx22700.c
@@ -380,7 +380,7 @@ struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
struct cx22700_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct cx22700_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct cx22700_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index 5d1abe34bdd..00b5c7e91d5 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -580,7 +580,7 @@ struct dvb_frontend *cx22702_attach(const struct cx22702_config *config,
struct cx22702_state *state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct cx22702_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct cx22702_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 87ae29db024..ffbcfabd83f 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -598,7 +598,7 @@ struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
int ret;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct cx24110_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct cx24110_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/dvb_dummy_fe.c b/drivers/media/dvb/frontends/dvb_dummy_fe.c
index db8a937cc63..a7fc7e53a55 100644
--- a/drivers/media/dvb/frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb/frontends/dvb_dummy_fe.c
@@ -117,7 +117,7 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
struct dvb_dummy_fe_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
if (state == NULL) goto error;
/* create dvb_frontend */
@@ -137,7 +137,7 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
struct dvb_dummy_fe_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
if (state == NULL) goto error;
/* create dvb_frontend */
@@ -157,7 +157,7 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
struct dvb_dummy_fe_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
if (state == NULL) goto error;
/* create dvb_frontend */
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c
index e1e70e9e0cb..3051b64aa17 100644
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -501,7 +501,7 @@ struct dvb_frontend* l64781_attach(const struct l64781_config* config,
{ .addr = config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct l64781_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct l64781_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/lgs8gl5.c b/drivers/media/dvb/frontends/lgs8gl5.c
index 855852fddf2..bb37ed289a0 100644
--- a/drivers/media/dvb/frontends/lgs8gl5.c
+++ b/drivers/media/dvb/frontends/lgs8gl5.c
@@ -387,7 +387,7 @@ lgs8gl5_attach(const struct lgs8gl5_config *config, struct i2c_adapter *i2c)
dprintk("%s\n", __func__);
/* Allocate memory for the internal state */
- state = kmalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c
index a621f727935..f69daaac78c 100644
--- a/drivers/media/dvb/frontends/mt312.c
+++ b/drivers/media/dvb/frontends/mt312.c
@@ -782,7 +782,7 @@ struct dvb_frontend *mt312_attach(const struct mt312_config *config,
struct mt312_state *state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct mt312_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct mt312_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/nxt6000.c b/drivers/media/dvb/frontends/nxt6000.c
index 0eef22dbf8a..a763ec756f7 100644
--- a/drivers/media/dvb/frontends/nxt6000.c
+++ b/drivers/media/dvb/frontends/nxt6000.c
@@ -545,7 +545,7 @@ struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config,
struct nxt6000_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct nxt6000_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct nxt6000_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index 8133ea3cddd..38e67accb8c 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -562,7 +562,7 @@ struct dvb_frontend* or51132_attach(const struct or51132_config* config,
struct or51132_state* state = NULL;
/* Allocate memory for the internal state */
- state = kmalloc(sizeof(struct or51132_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct or51132_state), GFP_KERNEL);
if (state == NULL)
return NULL;
diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
index 16cf2fdd5d7..c709ce6771c 100644
--- a/drivers/media/dvb/frontends/or51211.c
+++ b/drivers/media/dvb/frontends/or51211.c
@@ -527,7 +527,7 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config,
struct or51211_state* state = NULL;
/* Allocate memory for the internal state */
- state = kmalloc(sizeof(struct or51211_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct or51211_state), GFP_KERNEL);
if (state == NULL)
return NULL;
diff --git a/drivers/media/dvb/frontends/s5h1409.c b/drivers/media/dvb/frontends/s5h1409.c
index 3e08d985d6e..fb301151842 100644
--- a/drivers/media/dvb/frontends/s5h1409.c
+++ b/drivers/media/dvb/frontends/s5h1409.c
@@ -796,7 +796,7 @@ struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config,
u16 reg;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct s5h1409_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct s5h1409_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
index 66e2dd6d6fe..d8adf1e3201 100644
--- a/drivers/media/dvb/frontends/s5h1411.c
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -844,7 +844,7 @@ struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
u16 reg;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct s5h1411_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct s5h1411_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/si21xx.c b/drivers/media/dvb/frontends/si21xx.c
index 0bd16af8a6c..9552a22ccff 100644
--- a/drivers/media/dvb/frontends/si21xx.c
+++ b/drivers/media/dvb/frontends/si21xx.c
@@ -928,7 +928,7 @@ struct dvb_frontend *si21xx_attach(const struct si21xx_config *config,
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct si21xx_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct si21xx_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/sp8870.c b/drivers/media/dvb/frontends/sp8870.c
index 1c9a9b4051b..b85eb60a893 100644
--- a/drivers/media/dvb/frontends/sp8870.c
+++ b/drivers/media/dvb/frontends/sp8870.c
@@ -557,7 +557,7 @@ struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
struct sp8870_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct sp8870_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct sp8870_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/sp887x.c b/drivers/media/dvb/frontends/sp887x.c
index 559509ab4da..4a7c3d84260 100644
--- a/drivers/media/dvb/frontends/sp887x.c
+++ b/drivers/media/dvb/frontends/sp887x.c
@@ -557,7 +557,7 @@ struct dvb_frontend* sp887x_attach(const struct sp887x_config* config,
struct sp887x_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct sp887x_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct sp887x_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/stv0288.c b/drivers/media/dvb/frontends/stv0288.c
index ff1194de34c..2930a5d6768 100644
--- a/drivers/media/dvb/frontends/stv0288.c
+++ b/drivers/media/dvb/frontends/stv0288.c
@@ -570,7 +570,7 @@ struct dvb_frontend *stv0288_attach(const struct stv0288_config *config,
int id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct stv0288_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct stv0288_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/stv0297.c b/drivers/media/dvb/frontends/stv0297.c
index 62caf802ed9..4fd7479bb62 100644
--- a/drivers/media/dvb/frontends/stv0297.c
+++ b/drivers/media/dvb/frontends/stv0297.c
@@ -663,7 +663,7 @@ struct dvb_frontend *stv0297_attach(const struct stv0297_config *config,
struct stv0297_state *state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct stv0297_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct stv0297_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c
index 6c1cb1973c6..96887446972 100644
--- a/drivers/media/dvb/frontends/stv0299.c
+++ b/drivers/media/dvb/frontends/stv0299.c
@@ -667,7 +667,7 @@ struct dvb_frontend* stv0299_attach(const struct stv0299_config* config,
int id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct stv0299_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct stv0299_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index f648fdb64bb..f5d7b3277a2 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -413,7 +413,7 @@ struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
u8 id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda10021_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda10021_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index cc8862ce4aa..4e2a7c8b2f6 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -1095,7 +1095,7 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
dprintk(1, "%s()\n", __func__);
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda10048_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda10048_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index 4981cef8b44..f2a8abe0a24 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -1269,7 +1269,7 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
int id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
if (!state) {
printk(KERN_ERR "Can't alocate memory for tda10045 state\n");
return NULL;
@@ -1339,7 +1339,7 @@ struct dvb_frontend* tda10046_attach(const struct tda1004x_config* config,
int id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
if (!state) {
printk(KERN_ERR "Can't alocate memory for tda10046 state\n");
return NULL;
diff --git a/drivers/media/dvb/frontends/tda10086.c b/drivers/media/dvb/frontends/tda10086.c
index a17ce3c4ad8..f2c8faac6f3 100644
--- a/drivers/media/dvb/frontends/tda10086.c
+++ b/drivers/media/dvb/frontends/tda10086.c
@@ -745,7 +745,7 @@ struct dvb_frontend* tda10086_attach(const struct tda10086_config* config,
dprintk ("%s\n", __func__);
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda10086_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda10086_state), GFP_KERNEL);
if (!state)
return NULL;
diff --git a/drivers/media/dvb/frontends/tda8083.c b/drivers/media/dvb/frontends/tda8083.c
index 5b843b2e67e..9369f7442f2 100644
--- a/drivers/media/dvb/frontends/tda8083.c
+++ b/drivers/media/dvb/frontends/tda8083.c
@@ -417,7 +417,7 @@ struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
struct tda8083_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda8083_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda8083_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/ves1820.c b/drivers/media/dvb/frontends/ves1820.c
index a184597f1d9..6e78e486551 100644
--- a/drivers/media/dvb/frontends/ves1820.c
+++ b/drivers/media/dvb/frontends/ves1820.c
@@ -374,7 +374,7 @@ struct dvb_frontend* ves1820_attach(const struct ves1820_config* config,
struct ves1820_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct ves1820_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct ves1820_state), GFP_KERNEL);
if (state == NULL)
goto error;
diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c
index bd558960bd8..8d7854c2fb0 100644
--- a/drivers/media/dvb/frontends/ves1x93.c
+++ b/drivers/media/dvb/frontends/ves1x93.c
@@ -456,7 +456,7 @@ struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config,
u8 identity;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct ves1x93_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct ves1x93_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c
index 148b6f7f6cb..66f5c1fb307 100644
--- a/drivers/media/dvb/frontends/zl10353.c
+++ b/drivers/media/dvb/frontends/zl10353.c
@@ -98,7 +98,6 @@ static int zl10353_read_register(struct zl10353_state *state, u8 reg)
static void zl10353_dump_regs(struct dvb_frontend *fe)
{
struct zl10353_state *state = fe->demodulator_priv;
- char buf[52], buf2[4];
int ret;
u8 reg;
@@ -106,19 +105,18 @@ static void zl10353_dump_regs(struct dvb_frontend *fe)
for (reg = 0; ; reg++) {
if (reg % 16 == 0) {
if (reg)
- printk(KERN_DEBUG "%s\n", buf);
- sprintf(buf, "%02x: ", reg);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG "%02x:", reg);
}
ret = zl10353_read_register(state, reg);
if (ret >= 0)
- sprintf(buf2, "%02x ", (u8)ret);
+ printk(KERN_CONT " %02x", (u8)ret);
else
- strcpy(buf2, "-- ");
- strcat(buf, buf2);
+ printk(KERN_CONT " --");
if (reg == 0xff)
break;
}
- printk(KERN_DEBUG "%s\n", buf);
+ printk(KERN_CONT "\n");
}
static void zl10353_calc_nominal_rate(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb/siano/Kconfig b/drivers/media/dvb/siano/Kconfig
index dd863f26167..8c1aed77ea3 100644
--- a/drivers/media/dvb/siano/Kconfig
+++ b/drivers/media/dvb/siano/Kconfig
@@ -2,25 +2,33 @@
# Siano Mobile Silicon Digital TV device configuration
#
-config DVB_SIANO_SMS1XXX
- tristate "Siano SMS1XXX USB dongle support"
- depends on DVB_CORE && USB
+config SMS_SIANO_MDTV
+ tristate "Siano SMS1xxx based MDTV receiver"
+ depends on DVB_CORE && INPUT
---help---
- Choose Y here if you have a USB dongle with a SMS1XXX chipset.
+ Choose Y or M here if you have MDTV receiver with a Siano chipset.
- To compile this driver as a module, choose M here: the
- module will be called sms1xxx.
+ To compile this driver as a module, choose M here
+ (The module will be called smsmdtv).
-config DVB_SIANO_SMS1XXX_SMS_IDS
- bool "Enable support for Siano Mobile Silicon default USB IDs"
- depends on DVB_SIANO_SMS1XXX
- default y
- ---help---
- Choose Y here if you have a USB dongle with a SMS1XXX chipset
- that uses Siano Mobile Silicon's default usb vid:pid.
+ Further documentation on this driver can be found on the WWW
+ at http://www.siano-ms.com/
+
+if SMS_SIANO_MDTV
+menu "Siano module components"
- Choose N here if you would prefer to use Siano's external driver.
+# Hardware interfaces support
- Further documentation on this driver can be found on the WWW at
- <http://www.siano-ms.com/>.
+config SMS_USB_DRV
+ tristate "USB interface support"
+ depends on DVB_CORE && USB
+ ---help---
+ Choose if you would like to have Siano's support for USB interface
+config SMS_SDIO_DRV
+ tristate "SDIO interface support"
+ depends on DVB_CORE && MMC
+ ---help---
+ Choose if you would like to have Siano's support for SDIO interface
+endmenu
+endif # SMS_SIANO_MDTV
diff --git a/drivers/media/dvb/siano/Makefile b/drivers/media/dvb/siano/Makefile
index c6644d90943..c54140b5ab5 100644
--- a/drivers/media/dvb/siano/Makefile
+++ b/drivers/media/dvb/siano/Makefile
@@ -1,8 +1,9 @@
-sms1xxx-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
-obj-$(CONFIG_DVB_SIANO_SMS1XXX) += sms1xxx.o
-obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsusb.o
-obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsdvb.o
+smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
+
+obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o
+obj-$(CONFIG_SMS_USB_DRV) += smsusb.o
+obj-$(CONFIG_SMS_SDIO_DRV) += smssdio.o
EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index d8b15d583bd..0420e2885e7 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -116,99 +116,21 @@ static inline void sms_gpio_assign_11xx_default_led_config(
int sms_board_event(struct smscore_device_t *coredev,
enum SMS_BOARD_EVENTS gevent) {
- int board_id = smscore_get_board_id(coredev);
- struct sms_board *board = sms_get_board(board_id);
struct smscore_gpio_config MyGpioConfig;
sms_gpio_assign_11xx_default_led_config(&MyGpioConfig);
switch (gevent) {
case BOARD_EVENT_POWER_INIT: /* including hotplug */
- switch (board_id) {
- case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
- /* set I/O and turn off all LEDs */
- smscore_gpio_configure(coredev,
- board->board_cfg.leds_power,
- &MyGpioConfig);
- smscore_gpio_set_level(coredev,
- board->board_cfg.leds_power, 0);
- smscore_gpio_configure(coredev, board->board_cfg.led0,
- &MyGpioConfig);
- smscore_gpio_set_level(coredev,
- board->board_cfg.led0, 0);
- smscore_gpio_configure(coredev, board->board_cfg.led1,
- &MyGpioConfig);
- smscore_gpio_set_level(coredev,
- board->board_cfg.led1, 0);
- break;
- case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
- case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
- /* set I/O and turn off LNA */
- smscore_gpio_configure(coredev,
- board->board_cfg.foreign_lna0_ctrl,
- &MyGpioConfig);
- smscore_gpio_set_level(coredev,
- board->board_cfg.foreign_lna0_ctrl,
- 0);
- break;
- }
break; /* BOARD_EVENT_BIND */
case BOARD_EVENT_POWER_SUSPEND:
- switch (board_id) {
- case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
- smscore_gpio_set_level(coredev,
- board->board_cfg.leds_power, 0);
- smscore_gpio_set_level(coredev,
- board->board_cfg.led0, 0);
- smscore_gpio_set_level(coredev,
- board->board_cfg.led1, 0);
- break;
- case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
- case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
- smscore_gpio_set_level(coredev,
- board->board_cfg.foreign_lna0_ctrl,
- 0);
- break;
- }
break; /* BOARD_EVENT_POWER_SUSPEND */
case BOARD_EVENT_POWER_RESUME:
- switch (board_id) {
- case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
- smscore_gpio_set_level(coredev,
- board->board_cfg.leds_power, 1);
- smscore_gpio_set_level(coredev,
- board->board_cfg.led0, 1);
- smscore_gpio_set_level(coredev,
- board->board_cfg.led1, 0);
- break;
- case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
- case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
- smscore_gpio_set_level(coredev,
- board->board_cfg.foreign_lna0_ctrl,
- 1);
- break;
- }
break; /* BOARD_EVENT_POWER_RESUME */
case BOARD_EVENT_BIND:
- switch (board_id) {
- case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
- smscore_gpio_set_level(coredev,
- board->board_cfg.leds_power, 1);
- smscore_gpio_set_level(coredev,
- board->board_cfg.led0, 1);
- smscore_gpio_set_level(coredev,
- board->board_cfg.led1, 0);
- break;
- case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
- case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
- smscore_gpio_set_level(coredev,
- board->board_cfg.foreign_lna0_ctrl,
- 1);
- break;
- }
break; /* BOARD_EVENT_BIND */
case BOARD_EVENT_SCAN_PROG:
@@ -218,20 +140,8 @@ int sms_board_event(struct smscore_device_t *coredev,
case BOARD_EVENT_EMERGENCY_WARNING_SIGNAL:
break; /* BOARD_EVENT_EMERGENCY_WARNING_SIGNAL */
case BOARD_EVENT_FE_LOCK:
- switch (board_id) {
- case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
- smscore_gpio_set_level(coredev,
- board->board_cfg.led1, 1);
- break;
- }
break; /* BOARD_EVENT_FE_LOCK */
case BOARD_EVENT_FE_UNLOCK:
- switch (board_id) {
- case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
- smscore_gpio_set_level(coredev,
- board->board_cfg.led1, 0);
- break;
- }
break; /* BOARD_EVENT_FE_UNLOCK */
case BOARD_EVENT_DEMOD_LOCK:
break; /* BOARD_EVENT_DEMOD_LOCK */
@@ -248,20 +158,8 @@ int sms_board_event(struct smscore_device_t *coredev,
case BOARD_EVENT_RECEPTION_LOST_0:
break; /* BOARD_EVENT_RECEPTION_LOST_0 */
case BOARD_EVENT_MULTIPLEX_OK:
- switch (board_id) {
- case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
- smscore_gpio_set_level(coredev,
- board->board_cfg.led1, 1);
- break;
- }
break; /* BOARD_EVENT_MULTIPLEX_OK */
case BOARD_EVENT_MULTIPLEX_ERRORS:
- switch (board_id) {
- case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
- smscore_gpio_set_level(coredev,
- board->board_cfg.led1, 0);
- break;
- }
break; /* BOARD_EVENT_MULTIPLEX_ERRORS */
default:
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index a246903c334..bd9ab9d0d12 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -816,7 +816,7 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
sms_debug("set device mode to %d", mode);
if (coredev->device_flags & SMS_DEVICE_FAMILY2) {
- if (mode < DEVICE_MODE_DVBT || mode > DEVICE_MODE_RAW_TUNER) {
+ if (mode < DEVICE_MODE_DVBT || mode >= DEVICE_MODE_RAW_TUNER) {
sms_err("invalid mode specified %d", mode);
return -EINVAL;
}
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index 3ee1c3902c5..266033ae278 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -325,6 +325,16 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client,
0 : -ETIME;
}
+static inline int led_feedback(struct smsdvb_client_t *client)
+{
+ if (client->fe_status & FE_HAS_LOCK)
+ return sms_board_led_feedback(client->coredev,
+ (client->sms_stat_dvb.ReceptionData.BER
+ == 0) ? SMS_LED_HI : SMS_LED_LO);
+ else
+ return sms_board_led_feedback(client->coredev, SMS_LED_OFF);
+}
+
static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat)
{
struct smsdvb_client_t *client;
@@ -332,6 +342,8 @@ static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat)
*stat = client->fe_status;
+ led_feedback(client);
+
return 0;
}
@@ -342,6 +354,8 @@ static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber)
*ber = client->sms_stat_dvb.ReceptionData.BER;
+ led_feedback(client);
+
return 0;
}
@@ -359,6 +373,8 @@ static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
(client->sms_stat_dvb.ReceptionData.InBandPwr
+ 95) * 3 / 2;
+ led_feedback(client);
+
return 0;
}
@@ -369,6 +385,8 @@ static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr)
*snr = client->sms_stat_dvb.ReceptionData.SNR;
+ led_feedback(client);
+
return 0;
}
@@ -379,6 +397,8 @@ static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
*ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets;
+ led_feedback(client);
+
return 0;
}
@@ -404,6 +424,8 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
u32 Data[3];
} Msg;
+ int ret;
+
client->fe_status = FE_HAS_SIGNAL;
client->event_fe_state = -1;
client->event_unc_state = -1;
@@ -426,6 +448,23 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
case BANDWIDTH_AUTO: return -EOPNOTSUPP;
default: return -EINVAL;
}
+ /* Disable LNA, if any. An error is returned if no LNA is present */
+ ret = sms_board_lna_control(client->coredev, 0);
+ if (ret == 0) {
+ fe_status_t status;
+
+ /* tune with LNA off at first */
+ ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
+ &client->tune_done);
+
+ smsdvb_read_status(fe, &status);
+
+ if (status & FE_HAS_LOCK)
+ return ret;
+
+ /* previous tune didnt lock - enable LNA and tune again */
+ sms_board_lna_control(client->coredev, 1);
+ }
return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
&client->tune_done);
@@ -451,6 +490,8 @@ static int smsdvb_init(struct dvb_frontend *fe)
struct smsdvb_client_t *client =
container_of(fe, struct smsdvb_client_t, frontend);
+ sms_board_power(client->coredev, 1);
+
sms_board_dvb3_event(client, DVB3_EVENT_INIT);
return 0;
}
@@ -460,6 +501,9 @@ static int smsdvb_sleep(struct dvb_frontend *fe)
struct smsdvb_client_t *client =
container_of(fe, struct smsdvb_client_t, frontend);
+ sms_board_led_feedback(client->coredev, SMS_LED_OFF);
+ sms_board_power(client->coredev, 0);
+
sms_board_dvb3_event(client, DVB3_EVENT_SLEEP);
return 0;
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
index dfaa49a53f3..d1d652e7f89 100644
--- a/drivers/media/dvb/siano/smssdio.c
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -46,6 +46,7 @@
#define SMSSDIO_DATA 0x00
#define SMSSDIO_INT 0x04
+#define SMSSDIO_BLOCK_SIZE 128
static const struct sdio_device_id smssdio_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
@@ -85,7 +86,8 @@ static int smssdio_sendrequest(void *context, void *buffer, size_t size)
sdio_claim_host(smsdev->func);
while (size >= smsdev->func->cur_blksize) {
- ret = sdio_write_blocks(smsdev->func, SMSSDIO_DATA, buffer, 1);
+ ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA,
+ buffer, smsdev->func->cur_blksize);
if (ret)
goto out;
@@ -94,8 +96,8 @@ static int smssdio_sendrequest(void *context, void *buffer, size_t size)
}
if (size) {
- ret = sdio_write_bytes(smsdev->func, SMSSDIO_DATA,
- buffer, size);
+ ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA,
+ buffer, size);
}
out:
@@ -125,23 +127,23 @@ static void smssdio_interrupt(struct sdio_func *func)
*/
isr = sdio_readb(func, SMSSDIO_INT, &ret);
if (ret) {
- dev_err(&smsdev->func->dev,
- "Unable to read interrupt register!\n");
+ sms_err("Unable to read interrupt register!\n");
return;
}
if (smsdev->split_cb == NULL) {
cb = smscore_getbuffer(smsdev->coredev);
if (!cb) {
- dev_err(&smsdev->func->dev,
- "Unable to allocate data buffer!\n");
+ sms_err("Unable to allocate data buffer!\n");
return;
}
- ret = sdio_read_blocks(smsdev->func, cb->p, SMSSDIO_DATA, 1);
+ ret = sdio_memcpy_fromio(smsdev->func,
+ cb->p,
+ SMSSDIO_DATA,
+ SMSSDIO_BLOCK_SIZE);
if (ret) {
- dev_err(&smsdev->func->dev,
- "Error %d reading initial block!\n", ret);
+ sms_err("Error %d reading initial block!\n", ret);
return;
}
@@ -152,7 +154,10 @@ static void smssdio_interrupt(struct sdio_func *func)
return;
}
- size = hdr->msgLength - smsdev->func->cur_blksize;
+ if (hdr->msgLength > smsdev->func->cur_blksize)
+ size = hdr->msgLength - smsdev->func->cur_blksize;
+ else
+ size = 0;
} else {
cb = smsdev->split_cb;
hdr = cb->p;
@@ -162,23 +167,24 @@ static void smssdio_interrupt(struct sdio_func *func)
smsdev->split_cb = NULL;
}
- if (hdr->msgLength > smsdev->func->cur_blksize) {
+ if (size) {
void *buffer;
- size = ALIGN(size, 128);
- buffer = cb->p + hdr->msgLength;
+ buffer = cb->p + (hdr->msgLength - size);
+ size = ALIGN(size, SMSSDIO_BLOCK_SIZE);
- BUG_ON(smsdev->func->cur_blksize != 128);
+ BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE);
/*
* First attempt to transfer all of it in one go...
*/
- ret = sdio_read_blocks(smsdev->func, buffer,
- SMSSDIO_DATA, size / 128);
+ ret = sdio_memcpy_fromio(smsdev->func,
+ buffer,
+ SMSSDIO_DATA,
+ size);
if (ret && ret != -EINVAL) {
smscore_putbuffer(smsdev->coredev, cb);
- dev_err(&smsdev->func->dev,
- "Error %d reading data from card!\n", ret);
+ sms_err("Error %d reading data from card!\n", ret);
return;
}
@@ -191,12 +197,12 @@ static void smssdio_interrupt(struct sdio_func *func)
*/
if (ret == -EINVAL) {
while (size) {
- ret = sdio_read_blocks(smsdev->func,
- buffer, SMSSDIO_DATA, 1);
+ ret = sdio_memcpy_fromio(smsdev->func,
+ buffer, SMSSDIO_DATA,
+ smsdev->func->cur_blksize);
if (ret) {
smscore_putbuffer(smsdev->coredev, cb);
- dev_err(&smsdev->func->dev,
- "Error %d reading "
+ sms_err("Error %d reading "
"data from card!\n", ret);
return;
}
@@ -269,7 +275,7 @@ static int smssdio_probe(struct sdio_func *func,
if (ret)
goto release;
- ret = sdio_set_block_size(func, 128);
+ ret = sdio_set_block_size(func, SMSSDIO_BLOCK_SIZE);
if (ret)
goto disable;
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 84b6fc15519..dcf9fa9264b 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -920,6 +920,8 @@ source "drivers/media/video/pwc/Kconfig"
config USB_ZR364XX
tristate "USB ZR364XX Camera support"
depends on VIDEO_V4L2
+ select VIDEOBUF_GEN
+ select VIDEOBUF_VMALLOC
---help---
Say Y here if you want to connect this type of camera to your
computer's USB port.
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 10dbd4a11b3..9e39bc5f7b0 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -992,7 +992,7 @@ static int accept_bwqcam(struct parport *port)
if (parport[0] && strncmp(parport[0], "auto", 4) != 0) {
/* user gave parport parameters */
- for(n=0; parport[n] && n<MAX_CAMS; n++){
+ for (n = 0; n < MAX_CAMS && parport[n]; n++) {
char *ep;
unsigned long r;
r = simple_strtoul(parport[n], &ep, 0);
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 5136df19833..93f0dae0135 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307 USA
*/
+#include <linux/kernel.h>
#include "cx18-driver.h"
#include "cx18-cards.h"
@@ -317,7 +318,7 @@ int cx18_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
idx = p.audio_properties & 0x03;
/* The audio clock of the digitizer must match the codec sample
rate otherwise you get some very strange effects. */
- if (idx < sizeof(freqs))
+ if (idx < ARRAY_SIZE(freqs))
cx18_call_all(cx, audio, s_clock_freq, freqs[idx]);
return err;
}
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index e0cf21e0b1b..1a1048b18f7 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1715,6 +1715,8 @@ static struct video_device cx23885_mpeg_template = {
.fops = &mpeg_fops,
.ioctl_ops = &mpeg_ioctl_ops,
.minor = -1,
+ .tvnorms = CX23885_NORMS,
+ .current_norm = V4L2_STD_NTSC_M,
};
void cx23885_417_unregister(struct cx23885_dev *dev)
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index a5cc1c1fc2d..39465301ec9 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -3003,6 +3003,14 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
ctl->demod = XC3028_FE_OREN538;
break;
+ case CX88_BOARD_GENIATECH_X8000_MT:
+ /* FIXME: For this board, the xc3028 never recovers after being
+ powered down (the reset GPIO probably is not set properly).
+ We don't have access to the hardware so we cannot determine
+ which GPIO is used for xc3028, so just disable power xc3028
+ power management for now */
+ ctl->disable_power_mgmt = 1;
+ break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index c44e8760021..e237b507659 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -501,6 +501,7 @@ static struct zl10353_config cx88_pinnacle_hybrid_pctv = {
static struct zl10353_config cx88_geniatech_x8000_mt = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
+ .disable_i2c_gate_ctrl = 1,
};
static struct s5h1411_config dvico_fusionhdtv7_config = {
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index da4e3912cd3..7172dcf2a4f 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -116,6 +116,10 @@ static int cx8802_start_dma(struct cx8802_dev *dev,
udelay(100);
break;
case CX88_BOARD_HAUPPAUGE_HVR1300:
+ /* Enable MPEG parallel IO and video signal pins */
+ cx_write(MO_PINMUX_IO, 0x88);
+ cx_write(TS_SOP_STAT, 0);
+ cx_write(TS_VALERR_CNTRL, 0);
break;
case CX88_BOARD_PINNACLE_PCTV_HD_800i:
/* Enable MPEG parallel IO and video signal pins */
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 320f1f60276..1c2e544eda7 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -218,7 +218,7 @@ static struct em28xx_reg_seq silvercrest_reg_seq[] = {
struct em28xx_board em28xx_boards[] = {
[EM2750_BOARD_UNKNOWN] = {
.name = "EM2710/EM2750/EM2751 webcam grabber",
- .xclk = EM28XX_XCLK_FREQUENCY_48MHZ,
+ .xclk = EM28XX_XCLK_FREQUENCY_20MHZ,
.tuner_type = TUNER_ABSENT,
.is_webcam = 1,
.input = { {
@@ -622,22 +622,27 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2861_BOARD_PLEXTOR_PX_TV100U] = {
.name = "Plextor ConvertX PX-TV100U",
- .valid = EM28XX_BOARD_NOT_VALIDATED,
.tuner_type = TUNER_TNF_5335MF,
+ .xclk = EM28XX_XCLK_I2S_MSB_TIMING |
+ EM28XX_XCLK_FREQUENCY_12MHZ,
.tda9887_conf = TDA9887_PRESENT,
.decoder = EM28XX_TVP5150,
+ .has_msp34xx = 1,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
.amux = EM28XX_AMUX_LINE_IN,
+ .gpio = pinnacle_hybrid_pro_analog,
}, {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = TVP5150_COMPOSITE1,
.amux = EM28XX_AMUX_LINE_IN,
+ .gpio = pinnacle_hybrid_pro_analog,
}, {
.type = EM28XX_VMUX_SVIDEO,
.vmux = TVP5150_SVIDEO,
.amux = EM28XX_AMUX_LINE_IN,
+ .gpio = pinnacle_hybrid_pro_analog,
} },
},
@@ -1544,6 +1549,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2750_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2800),
.driver_info = EM2800_BOARD_UNKNOWN },
+ { USB_DEVICE(0xeb1a, 0x2710),
+ .driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2820),
.driver_info = EM2820_BOARD_UNKNOWN },
{ USB_DEVICE(0xeb1a, 0x2821),
@@ -1723,6 +1730,25 @@ static inline void em28xx_set_model(struct em28xx *dev)
EM28XX_I2C_FREQ_100_KHZ;
}
+
+/* FIXME: Should be replaced by a proper mt9m111 driver */
+static int em28xx_initialize_mt9m111(struct em28xx *dev)
+{
+ int i;
+ unsigned char regs[][3] = {
+ { 0x0d, 0x00, 0x01, }, /* reset and use defaults */
+ { 0x0d, 0x00, 0x00, },
+ { 0x0a, 0x00, 0x21, },
+ { 0x21, 0x04, 0x00, }, /* full readout speed, no row/col skipping */
+ };
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ i2c_master_send(&dev->i2c_client, &regs[i][0], 3);
+
+ return 0;
+}
+
+
/* FIXME: Should be replaced by a proper mt9m001 driver */
static int em28xx_initialize_mt9m001(struct em28xx *dev)
{
@@ -1751,7 +1777,7 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev)
/* HINT method: webcam I2C chips
*
- * This method work for webcams with Micron sensors
+ * This method works for webcams with Micron sensors
*/
static int em28xx_hint_sensor(struct em28xx *dev)
{
@@ -1761,6 +1787,7 @@ static int em28xx_hint_sensor(struct em28xx *dev)
__be16 version_be;
u16 version;
+ /* Micron sensor detection */
dev->i2c_client.addr = 0xba >> 1;
cmd = 0;
i2c_master_send(&dev->i2c_client, &cmd, 1);
@@ -1769,23 +1796,54 @@ static int em28xx_hint_sensor(struct em28xx *dev)
return -EINVAL;
version = be16_to_cpu(version_be);
-
switch (version) {
- case 0x8243: /* mt9v011 640x480 1.3 Mpix sensor */
+ case 0x8232: /* mt9v011 640x480 1.3 Mpix sensor */
+ case 0x8243: /* mt9v011 rev B 640x480 1.3 Mpix sensor */
dev->model = EM2820_BOARD_SILVERCREST_WEBCAM;
+ em28xx_set_model(dev);
+
sensor_name = "mt9v011";
dev->em28xx_sensor = EM28XX_MT9V011;
dev->sensor_xres = 640;
dev->sensor_yres = 480;
- dev->sensor_xtal = 6300000;
+ /*
+ * FIXME: mt9v011 uses I2S speed as xtal clk - at least with
+ * the Silvercrest cam I have here for testing - for higher
+ * resolutions, a high clock cause horizontal artifacts, so we
+ * need to use a lower xclk frequency.
+ * Yet, it would be possible to adjust xclk depending on the
+ * desired resolution, since this affects directly the
+ * frame rate.
+ */
+ dev->board.xclk = EM28XX_XCLK_FREQUENCY_4_3MHZ;
+ dev->sensor_xtal = 4300000;
/* probably means GRGB 16 bit bayer */
dev->vinmode = 0x0d;
dev->vinctl = 0x00;
break;
+
+ case 0x143a: /* MT9M111 as found in the ECS G200 */
+ dev->model = EM2750_BOARD_UNKNOWN;
+ em28xx_set_model(dev);
+
+ sensor_name = "mt9m111";
+ dev->board.xclk = EM28XX_XCLK_FREQUENCY_48MHZ;
+ dev->em28xx_sensor = EM28XX_MT9M111;
+ em28xx_initialize_mt9m111(dev);
+ dev->sensor_xres = 640;
+ dev->sensor_yres = 512;
+
+ dev->vinmode = 0x0a;
+ dev->vinctl = 0x00;
+
+ break;
+
case 0x8431:
dev->model = EM2750_BOARD_UNKNOWN;
+ em28xx_set_model(dev);
+
sensor_name = "mt9m001";
dev->em28xx_sensor = EM28XX_MT9M001;
em28xx_initialize_mt9m001(dev);
@@ -1798,10 +1856,13 @@ static int em28xx_hint_sensor(struct em28xx *dev)
break;
default:
- printk("Unknown Micron Sensor 0x%04x\n", be16_to_cpu(version));
+ printk("Unknown Micron Sensor 0x%04x\n", version);
return -EINVAL;
}
+ /* Setup webcam defaults */
+ em28xx_pre_card_setup(dev);
+
em28xx_errdev("Sensor is %s, using model %s entry.\n",
sensor_name, em28xx_boards[dev->model].name);
@@ -1813,60 +1874,6 @@ static int em28xx_hint_sensor(struct em28xx *dev)
*/
void em28xx_pre_card_setup(struct em28xx *dev)
{
- int rc;
-
- em28xx_set_model(dev);
-
- em28xx_info("Identified as %s (card=%d)\n",
- dev->board.name, dev->model);
-
- /* Set the default GPO/GPIO for legacy devices */
- dev->reg_gpo_num = EM2880_R04_GPO;
- dev->reg_gpio_num = EM28XX_R08_GPIO;
-
- dev->wait_after_write = 5;
-
- /* Based on the Chip ID, set the device configuration */
- rc = em28xx_read_reg(dev, EM28XX_R0A_CHIPID);
- if (rc > 0) {
- dev->chip_id = rc;
-
- switch (dev->chip_id) {
- case CHIP_ID_EM2750:
- em28xx_info("chip ID is em2750\n");
- break;
- case CHIP_ID_EM2820:
- em28xx_info("chip ID is em2710 or em2820\n");
- break;
- case CHIP_ID_EM2840:
- em28xx_info("chip ID is em2840\n");
- break;
- case CHIP_ID_EM2860:
- em28xx_info("chip ID is em2860\n");
- break;
- case CHIP_ID_EM2870:
- em28xx_info("chip ID is em2870\n");
- dev->wait_after_write = 0;
- break;
- case CHIP_ID_EM2874:
- em28xx_info("chip ID is em2874\n");
- dev->reg_gpio_num = EM2874_R80_GPIO;
- dev->wait_after_write = 0;
- break;
- case CHIP_ID_EM2883:
- em28xx_info("chip ID is em2882/em2883\n");
- dev->wait_after_write = 0;
- break;
- default:
- em28xx_info("em28xx chip ID = %d\n", dev->chip_id);
- }
- }
-
- /* Prepopulate cached GPO register content */
- rc = em28xx_read_reg(dev, dev->reg_gpo_num);
- if (rc >= 0)
- dev->reg_gpo = rc;
-
/* Set the initial XCLK and I2C clock values based on the board
definition */
em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk & 0x7f);
@@ -1876,9 +1883,8 @@ void em28xx_pre_card_setup(struct em28xx *dev)
/* request some modules */
switch (dev->model) {
case EM2861_BOARD_PLEXTOR_PX_TV100U:
- /* FIXME guess */
- /* Turn on analog audio output */
- em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd);
+ /* Sets the msp34xx I2S speed */
+ dev->i2s_speed = 2048000;
break;
case EM2861_BOARD_KWORLD_PVRTV_300U:
case EM2880_BOARD_KWORLD_DVB_305U:
@@ -2216,7 +2222,20 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
void em28xx_card_setup(struct em28xx *dev)
{
- em28xx_set_model(dev);
+ /*
+ * If the device can be a webcam, seek for a sensor.
+ * If sensor is not found, then it isn't a webcam.
+ */
+ if (dev->board.is_webcam) {
+ if (em28xx_hint_sensor(dev) < 0)
+ dev->board.is_webcam = 0;
+ else
+ dev->progressive = 1;
+ } else
+ em28xx_set_model(dev);
+
+ em28xx_info("Identified as %s (card=%d)\n",
+ dev->board.name, dev->model);
dev->tuner_type = em28xx_boards[dev->model].tuner_type;
if (em28xx_boards[dev->model].tuner_addr)
@@ -2290,10 +2309,6 @@ void em28xx_card_setup(struct em28xx *dev)
em28xx_gpio_set(dev, dev->board.tuner_gpio);
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
break;
- case EM2820_BOARD_SILVERCREST_WEBCAM:
- /* FIXME: need to document the registers bellow */
- em28xx_write_reg(dev, 0x0d, 0x42);
- em28xx_write_reg(dev, 0x13, 0x08);
}
if (dev->board.has_snapshot_button)
@@ -2367,7 +2382,9 @@ void em28xx_card_setup(struct em28xx *dev)
}
em28xx_tuner_setup(dev);
- em28xx_ir_init(dev);
+
+ if(!disable_ir)
+ em28xx_ir_init(dev);
}
@@ -2433,7 +2450,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
int minor)
{
struct em28xx *dev = *devhandle;
- int retval = -ENOMEM;
+ int retval;
int errCode;
dev->udev = udev;
@@ -2450,6 +2467,58 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
dev->em28xx_read_reg_req = em28xx_read_reg_req;
dev->board.is_em2800 = em28xx_boards[dev->model].is_em2800;
+ em28xx_set_model(dev);
+
+ /* Set the default GPO/GPIO for legacy devices */
+ dev->reg_gpo_num = EM2880_R04_GPO;
+ dev->reg_gpio_num = EM28XX_R08_GPIO;
+
+ dev->wait_after_write = 5;
+
+ /* Based on the Chip ID, set the device configuration */
+ retval = em28xx_read_reg(dev, EM28XX_R0A_CHIPID);
+ if (retval > 0) {
+ dev->chip_id = retval;
+
+ switch (dev->chip_id) {
+ case CHIP_ID_EM2710:
+ em28xx_info("chip ID is em2710\n");
+ break;
+ case CHIP_ID_EM2750:
+ em28xx_info("chip ID is em2750\n");
+ break;
+ case CHIP_ID_EM2820:
+ em28xx_info("chip ID is em2820 (or em2710)\n");
+ break;
+ case CHIP_ID_EM2840:
+ em28xx_info("chip ID is em2840\n");
+ break;
+ case CHIP_ID_EM2860:
+ em28xx_info("chip ID is em2860\n");
+ break;
+ case CHIP_ID_EM2870:
+ em28xx_info("chip ID is em2870\n");
+ dev->wait_after_write = 0;
+ break;
+ case CHIP_ID_EM2874:
+ em28xx_info("chip ID is em2874\n");
+ dev->reg_gpio_num = EM2874_R80_GPIO;
+ dev->wait_after_write = 0;
+ break;
+ case CHIP_ID_EM2883:
+ em28xx_info("chip ID is em2882/em2883\n");
+ dev->wait_after_write = 0;
+ break;
+ default:
+ em28xx_info("em28xx chip ID = %d\n", dev->chip_id);
+ }
+ }
+
+ /* Prepopulate cached GPO register content */
+ retval = em28xx_read_reg(dev, dev->reg_gpo_num);
+ if (retval >= 0)
+ dev->reg_gpo = retval;
+
em28xx_pre_card_setup(dev);
if (!dev->board.is_em2800) {
@@ -2484,14 +2553,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
dev->vinmode = 0x10;
dev->vinctl = 0x11;
- /*
- * If the device can be a webcam, seek for a sensor.
- * If sensor is not found, then it isn't a webcam.
- */
- if (dev->board.is_webcam)
- if (em28xx_hint_sensor(dev) < 0)
- dev->board.is_webcam = 0;
-
/* Do board specific init and eeprom reading */
em28xx_card_setup(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 5b78e199abd..98e140b5d95 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -632,6 +632,9 @@ int em28xx_capture_start(struct em28xx *dev, int start)
return rc;
}
+ if (dev->board.is_webcam)
+ rc = em28xx_write_reg(dev, 0x13, 0x0c);
+
/* enable video capture */
rc = em28xx_write_reg(dev, 0x48, 0x00);
@@ -720,7 +723,10 @@ int em28xx_resolution_set(struct em28xx *dev)
{
int width, height;
width = norm_maxw(dev);
- height = norm_maxh(dev) >> 1;
+ height = norm_maxh(dev);
+
+ if (!dev->progressive)
+ height >>= norm_maxh(dev);
em28xx_set_outfmt(dev);
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index cf0ac7f2a30..d603575431b 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -478,7 +478,6 @@ static int dvb_init(struct em28xx *dev)
}
break;
case EM2880_BOARD_KWORLD_DVB_310U:
- case EM2880_BOARD_EMPIRE_DUAL_TV:
dvb->frontend = dvb_attach(zl10353_attach,
&em28xx_zl10353_with_xc3028,
&dev->i2c_adap);
@@ -488,6 +487,7 @@ static int dvb_init(struct em28xx *dev)
}
break;
case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
+ case EM2880_BOARD_EMPIRE_DUAL_TV:
dvb->frontend = dvb_attach(zl10353_attach,
&em28xx_zl10353_xc3028_no_i2c_gate,
&dev->i2c_adap);
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index a2676d63cfd..6bf84bd787d 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -176,7 +176,8 @@
/* FIXME: Need to be populated with the other chip ID's */
enum em28xx_chip_id {
- CHIP_ID_EM2820 = 18, /* Also used by em2710 */
+ CHIP_ID_EM2710 = 17,
+ CHIP_ID_EM2820 = 18, /* Also used by some em2710 */
CHIP_ID_EM2840 = 20,
CHIP_ID_EM2750 = 33,
CHIP_ID_EM2860 = 34,
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index ff37b4c15f4..ab079d9256c 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -194,15 +194,24 @@ static void em28xx_copy_video(struct em28xx *dev,
startread = p;
remain = len;
- /* Interlaces frame */
- if (buf->top_field)
+ if (dev->progressive)
fieldstart = outp;
- else
- fieldstart = outp + bytesperline;
+ else {
+ /* Interlaces two half frames */
+ if (buf->top_field)
+ fieldstart = outp;
+ else
+ fieldstart = outp + bytesperline;
+ }
linesdone = dma_q->pos / bytesperline;
currlinedone = dma_q->pos % bytesperline;
- offset = linesdone * bytesperline * 2 + currlinedone;
+
+ if (dev->progressive)
+ offset = linesdone * bytesperline + currlinedone;
+ else
+ offset = linesdone * bytesperline * 2 + currlinedone;
+
startwrite = fieldstart + offset;
lencopy = bytesperline - currlinedone;
lencopy = lencopy > remain ? remain : lencopy;
@@ -376,7 +385,7 @@ static inline int em28xx_isoc_copy(struct em28xx *dev, struct urb *urb)
em28xx_isocdbg("Video frame %d, length=%i, %s\n", p[2],
len, (p[2] & 1) ? "odd" : "even");
- if (!(p[2] & 1)) {
+ if (dev->progressive || !(p[2] & 1)) {
if (buf != NULL)
buffer_filled(dev, dma_q, buf);
get_next_buf(dma_q, &buf);
@@ -689,7 +698,10 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
/* FIXME: TOP? NONE? BOTTOM? ALTENATE? */
- f->fmt.pix.field = dev->interlaced ?
+ if (dev->progressive)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ else
+ f->fmt.pix.field = dev->interlaced ?
V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP;
mutex_unlock(&dev->lock);
@@ -753,7 +765,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.bytesperline = (dev->width * fmt->depth + 7) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * height;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ if (dev->progressive)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ else
+ f->fmt.pix.field = dev->interlaced ?
+ V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP;
return 0;
}
@@ -846,6 +862,41 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
return 0;
}
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *p)
+{
+ struct em28xx_fh *fh = priv;
+ struct em28xx *dev = fh->dev;
+ int rc = 0;
+
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (dev->board.is_webcam)
+ rc = v4l2_device_call_until_err(&dev->v4l2_dev, 0,
+ video, g_parm, p);
+ else
+ v4l2_video_std_frame_period(dev->norm,
+ &p->parm.capture.timeperframe);
+
+ return rc;
+}
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *p)
+{
+ struct em28xx_fh *fh = priv;
+ struct em28xx *dev = fh->dev;
+
+ if (!dev->board.is_webcam)
+ return -EINVAL;
+
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return v4l2_device_call_until_err(&dev->v4l2_dev, 0, video, s_parm, p);
+}
+
static const char *iname[] = {
[EM28XX_VMUX_COMPOSITE1] = "Composite1",
[EM28XX_VMUX_COMPOSITE2] = "Composite2",
@@ -1624,6 +1675,7 @@ static int em28xx_v4l2_open(struct file *filp)
struct em28xx *dev;
enum v4l2_buf_type fh_type;
struct em28xx_fh *fh;
+ enum v4l2_field field;
dev = em28xx_get_device(minor, &fh_type, &radio);
@@ -1665,8 +1717,13 @@ static int em28xx_v4l2_open(struct file *filp)
dev->users++;
+ if (dev->progressive)
+ field = V4L2_FIELD_NONE;
+ else
+ field = V4L2_FIELD_INTERLACED;
+
videobuf_queue_vmalloc_init(&fh->vb_vidq, &em28xx_video_qops,
- NULL, &dev->slock, fh->type, V4L2_FIELD_INTERLACED,
+ NULL, &dev->slock, fh->type, field,
sizeof(struct em28xx_buffer), fh);
mutex_unlock(&dev->lock);
@@ -1885,6 +1942,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_s_std = vidioc_s_std,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 45bd513f62d..a2add61f7d5 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -367,6 +367,7 @@ enum em28xx_sensor {
EM28XX_NOSENSOR = 0,
EM28XX_MT9V011,
EM28XX_MT9M001,
+ EM28XX_MT9M111,
};
enum em28xx_adecoder {
@@ -484,6 +485,9 @@ struct em28xx {
int sensor_xres, sensor_yres;
int sensor_xtal;
+ /* Allows progressive (e. g. non-interlaced) mode */
+ int progressive;
+
/* Vinmode/Vinctl used at the driver */
int vinmode, vinctl;
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 34f46f2bc04..e994dcac43f 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -114,7 +114,7 @@ config USB_GSPCA_SN9C20X
config USB_GSPCA_SN9C20X_EVDEV
bool "Enable evdev support"
- depends on USB_GSPCA_SN9C20X
+ depends on USB_GSPCA_SN9C20X && INPUT
---help---
Say Y here in order to enable evdev support for sn9c20x webcam button.
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index ccd47f57f42..d678765cbba 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -1220,6 +1220,8 @@ static const struct video_device hdpvr_video_template = {
V4L2_STD_PAL_G | V4L2_STD_PAL_H | V4L2_STD_PAL_I |
V4L2_STD_PAL_D | V4L2_STD_PAL_M | V4L2_STD_PAL_N |
V4L2_STD_PAL_60,
+ .current_norm = V4L2_STD_NTSC | V4L2_STD_PAL_M |
+ V4L2_STD_PAL_60,
};
int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
index a3b77ed3f08..4a9c8ce0ecb 100644
--- a/drivers/media/video/ivtv/ivtv-controls.c
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -17,6 +17,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/kernel.h>
#include "ivtv-driver.h"
#include "ivtv-cards.h"
@@ -281,7 +282,7 @@ int ivtv_s_ext_ctrls(struct file *file, void *fh, struct v4l2_ext_controls *c)
idx = p.audio_properties & 0x03;
/* The audio clock of the digitizer must match the codec sample
rate otherwise you get some very strange effects. */
- if (idx < sizeof(freqs))
+ if (idx < ARRAY_SIZE(freqs))
ivtv_call_all(itv, audio, s_clock_freq, freqs[idx]);
return err;
}
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index b2260de645f..cc85f77a570 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -52,13 +52,34 @@ static struct v4l2_queryctrl mt9v011_qctrl[] = {
.step = 1,
.default_value = 0,
.flags = 0,
- },
+ }, {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Vflip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ }, {
+ }
};
struct mt9v011 {
struct v4l2_subdev sd;
unsigned width, height;
unsigned xtal;
+ unsigned hflip:1;
+ unsigned vflip:1;
u16 global_gain, red_bal, blue_bal;
};
@@ -131,7 +152,6 @@ static const struct i2c_reg_value mt9v011_init_default[] = {
{ R0A_MT9V011_CLK_SPEED, 0x0000 },
{ R1E_MT9V011_DIGITAL_ZOOM, 0x0000 },
- { R20_MT9V011_READ_MODE, 0x1000 },
{ R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */
};
@@ -156,7 +176,7 @@ static void set_balance(struct v4l2_subdev *sd)
mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
}
-static void calc_fps(struct v4l2_subdev *sd)
+static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned height, width, hblank, vblank, speed;
@@ -179,6 +199,51 @@ static void calc_fps(struct v4l2_subdev *sd)
v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n",
tmp / 1000, tmp % 1000, t_time);
+
+ if (numerator && denominator) {
+ *numerator = 1000;
+ *denominator = (u32)frames_per_ms;
+ }
+}
+
+static u16 calc_speed(struct v4l2_subdev *sd, u32 numerator, u32 denominator)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+ unsigned height, width, hblank, vblank;
+ unsigned row_time, line_time;
+ u64 t_time, speed;
+
+ /* Avoid bogus calculus */
+ if (!numerator || !denominator)
+ return 0;
+
+ height = mt9v011_read(sd, R03_MT9V011_HEIGHT);
+ width = mt9v011_read(sd, R04_MT9V011_WIDTH);
+ hblank = mt9v011_read(sd, R05_MT9V011_HBLANK);
+ vblank = mt9v011_read(sd, R06_MT9V011_VBLANK);
+
+ row_time = width + 113 + hblank;
+ line_time = height + vblank + 1;
+
+ t_time = core->xtal * ((u64)numerator);
+ /* round to the closest value */
+ t_time += denominator / 2;
+ do_div(t_time, denominator);
+
+ speed = t_time;
+ do_div(speed, row_time * line_time);
+
+ /* Avoid having a negative value for speed */
+ if (speed < 2)
+ speed = 0;
+ else
+ speed -= 2;
+
+ /* Avoid speed overflow */
+ if (speed > 15)
+ return 15;
+
+ return (u16)speed;
}
static void set_res(struct v4l2_subdev *sd)
@@ -207,9 +272,23 @@ static void set_res(struct v4l2_subdev *sd)
mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height);
mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height);
- calc_fps(sd);
+ calc_fps(sd, NULL, NULL);
};
+static void set_read_mode(struct v4l2_subdev *sd)
+{
+ struct mt9v011 *core = to_mt9v011(sd);
+ unsigned mode = 0x1000;
+
+ if (core->hflip)
+ mode |= 0x4000;
+
+ if (core->vflip)
+ mode |= 0x8000;
+
+ mt9v011_write(sd, R20_MT9V011_READ_MODE, mode);
+}
+
static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
{
int i;
@@ -220,6 +299,7 @@ static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
set_balance(sd);
set_res(sd);
+ set_read_mode(sd);
return 0;
};
@@ -240,6 +320,12 @@ static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_BLUE_BALANCE:
ctrl->value = core->blue_bal;
return 0;
+ case V4L2_CID_HFLIP:
+ ctrl->value = core->hflip ? 1 : 0;
+ return 0;
+ case V4L2_CID_VFLIP:
+ ctrl->value = core->vflip ? 1 : 0;
+ return 0;
}
return -EINVAL;
}
@@ -288,6 +374,14 @@ static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_BLUE_BALANCE:
core->blue_bal = ctrl->value;
break;
+ case V4L2_CID_HFLIP:
+ core->hflip = ctrl->value;
+ set_read_mode(sd);
+ return 0;
+ case V4L2_CID_VFLIP:
+ core->vflip = ctrl->value;
+ set_read_mode(sd);
+ return 0;
default:
return -EINVAL;
}
@@ -322,6 +416,44 @@ static int mt9v011_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
return 0;
}
+static int mt9v011_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
+{
+ struct v4l2_captureparm *cp = &parms->parm.capture;
+
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ memset(cp, 0, sizeof(struct v4l2_captureparm));
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
+ calc_fps(sd,
+ &cp->timeperframe.numerator,
+ &cp->timeperframe.denominator);
+
+ return 0;
+}
+
+static int mt9v011_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
+{
+ struct v4l2_captureparm *cp = &parms->parm.capture;
+ struct v4l2_fract *tpf = &cp->timeperframe;
+ u16 speed;
+
+ if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (cp->extendedmode != 0)
+ return -EINVAL;
+
+ speed = calc_speed(sd, tpf->numerator, tpf->denominator);
+
+ mt9v011_write(sd, R0A_MT9V011_CLK_SPEED, speed);
+ v4l2_dbg(1, debug, sd, "Setting speed to %d\n", speed);
+
+ /* Recalculate and update fps info */
+ calc_fps(sd, &tpf->numerator, &tpf->denominator);
+
+ return 0;
+}
+
static int mt9v011_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
{
struct v4l2_pix_format *pix = &fmt->fmt.pix;
@@ -393,10 +525,13 @@ static int mt9v011_s_register(struct v4l2_subdev *sd,
static int mt9v011_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
+ u16 version;
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
+
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9V011,
- MT9V011_VERSION);
+ version);
}
static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
@@ -416,6 +551,8 @@ static const struct v4l2_subdev_video_ops mt9v011_video_ops = {
.enum_fmt = mt9v011_enum_fmt,
.try_fmt = mt9v011_try_fmt,
.s_fmt = mt9v011_s_fmt,
+ .g_parm = mt9v011_g_parm,
+ .s_parm = mt9v011_s_parm,
};
static const struct v4l2_subdev_ops mt9v011_ops = {
@@ -449,8 +586,9 @@ static int mt9v011_probe(struct i2c_client *c,
/* Check if the sensor is really a MT9V011 */
version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
- if (version != MT9V011_VERSION) {
- v4l2_info(sd, "*** unknown micron chip detected (0x%04x.\n",
+ if ((version != MT9V011_VERSION) &&
+ (version != MT9V011_REV_B_VERSION)) {
+ v4l2_info(sd, "*** unknown micron chip detected (0x%04x).\n",
version);
kfree(core);
return -EINVAL;
@@ -461,8 +599,8 @@ static int mt9v011_probe(struct i2c_client *c,
core->height = 480;
core->xtal = 27000000; /* Hz */
- v4l_info(c, "chip found @ 0x%02x (%s)\n",
- c->addr << 1, c->adapter->name);
+ v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n",
+ c->addr << 1, c->adapter->name, version);
return 0;
}
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
index 9e443ee3055..3350fd6083c 100644
--- a/drivers/media/video/mt9v011.h
+++ b/drivers/media/video/mt9v011.h
@@ -30,6 +30,7 @@
#define R35_MT9V011_GLOBAL_GAIN 0x35
#define RF1_MT9V011_CHIP_ENABLE 0xf1
-#define MT9V011_VERSION 0x8243
+#define MT9V011_VERSION 0x8232
+#define MT9V011_REV_B_VERSION 0x8243
#endif
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 2d075205bdf..736c31d2319 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -234,6 +234,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
return ret;
}
+/* Called under spinlock_irqsave(&pcdev->lock, ...) */
static void mx1_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
@@ -241,13 +242,10 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq,
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct mx1_camera_dev *pcdev = ici->priv;
struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
- unsigned long flags;
dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
- spin_lock_irqsave(&pcdev->lock, flags);
-
list_add_tail(&vb->queue, &pcdev->capture);
vb->state = VIDEOBUF_ACTIVE;
@@ -264,8 +262,6 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq,
__raw_writel(temp, pcdev->base + CSICR1);
}
}
-
- spin_unlock_irqrestore(&pcdev->lock, flags);
}
static void mx1_videobuf_release(struct videobuf_queue *vq,
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index e605c076ed8..9770cb7932c 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -332,7 +332,10 @@ static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc)
}
}
-/* Called with .vb_lock held */
+/*
+ * Called with .vb_lock mutex held and
+ * under spinlock_irqsave(&mx3_cam->lock, ...)
+ */
static void mx3_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
@@ -346,7 +349,8 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq,
struct idmac_video_param *video = &ichan->params.video;
const struct soc_camera_data_format *data_fmt = icd->current_fmt;
dma_cookie_t cookie;
- unsigned long flags;
+
+ BUG_ON(!irqs_disabled());
/* This is the configuration of one sg-element */
video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc);
@@ -359,8 +363,6 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq,
memset((void *)vb->baddr, 0xaa, vb->bsize);
#endif
- spin_lock_irqsave(&mx3_cam->lock, flags);
-
list_add_tail(&vb->queue, &mx3_cam->capture);
if (!mx3_cam->active) {
@@ -370,24 +372,23 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq,
vb->state = VIDEOBUF_QUEUED;
}
- spin_unlock_irqrestore(&mx3_cam->lock, flags);
+ spin_unlock_irq(&mx3_cam->lock);
cookie = txd->tx_submit(txd);
dev_dbg(&icd->dev, "Submitted cookie %d DMA 0x%08x\n", cookie, sg_dma_address(&buf->sg));
+
+ spin_lock_irq(&mx3_cam->lock);
+
if (cookie >= 0)
return;
/* Submit error */
vb->state = VIDEOBUF_PREPARED;
- spin_lock_irqsave(&mx3_cam->lock, flags);
-
list_del_init(&vb->queue);
if (mx3_cam->active == buf)
mx3_cam->active = NULL;
-
- spin_unlock_irqrestore(&mx3_cam->lock, flags);
}
/* Called with .vb_lock held */
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 46e0d8ad880..016bb45ba0c 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -612,6 +612,7 @@ static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
dev_dbg(pcdev->soc_host.dev, "%s\n", __func__);
}
+/* Called under spinlock_irqsave(&pcdev->lock, ...) */
static void pxa_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
@@ -619,13 +620,10 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq,
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct pxa_camera_dev *pcdev = ici->priv;
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
- unsigned long flags;
dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d active=%p\n", __func__,
vb, vb->baddr, vb->bsize, pcdev->active);
- spin_lock_irqsave(&pcdev->lock, flags);
-
list_add_tail(&vb->queue, &pcdev->capture);
vb->state = VIDEOBUF_ACTIVE;
@@ -633,8 +631,6 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq,
if (!pcdev->active)
pxa_camera_start_capture(pcdev);
-
- spin_unlock_irqrestore(&pcdev->lock, flags);
}
static void pxa_videobuf_release(struct videobuf_queue *vq,
@@ -1579,6 +1575,7 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev)
pcdev->mclk = 20000000;
}
+ pcdev->soc_host.dev = &pdev->dev;
pcdev->mclk_divisor = mclk_get_divisor(pcdev);
INIT_LIST_HEAD(&pcdev->capture);
@@ -1644,7 +1641,6 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev)
pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME;
pcdev->soc_host.ops = &pxa_soc_camera_host_ops;
pcdev->soc_host.priv = pcdev;
- pcdev->soc_host.dev = &pdev->dev;
pcdev->soc_host.nr = pdev->id;
err = soc_camera_host_register(&pcdev->soc_host);
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 06861b782b9..6eebe3ef97d 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -3331,8 +3331,8 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0200100,
},
},
- [SAA7134_BOARD_HAUPPAUGE_HVR1120] = {
- .name = "Hauppauge WinTV-HVR1120 ATSC/QAM-Hybrid",
+ [SAA7134_BOARD_HAUPPAUGE_HVR1150] = {
+ .name = "Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_TDA8290,
.radio_type = UNSET,
@@ -3363,8 +3363,8 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0800100, /* GPIO 23 HI for FM */
},
},
- [SAA7134_BOARD_HAUPPAUGE_HVR1110R3] = {
- .name = "Hauppauge WinTV-HVR1110r3 DVB-T/Hybrid",
+ [SAA7134_BOARD_HAUPPAUGE_HVR1120] = {
+ .name = "Hauppauge WinTV-HVR1120 DVB-T/Hybrid",
.audio_clock = 0x00187de7,
.tuner_type = TUNER_PHILIPS_TDA8290,
.radio_type = UNSET,
@@ -5862,31 +5862,31 @@ struct pci_device_id saa7134_pci_tbl[] = {
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x0070,
.subdevice = 0x6706,
- .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120,
+ .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1150,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x0070,
.subdevice = 0x6707,
- .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3,
+ .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x0070,
.subdevice = 0x6708,
- .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120,
+ .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1150,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x0070,
.subdevice = 0x6709,
- .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3,
+ .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x0070,
.subdevice = 0x670a,
- .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110R3,
+ .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120,
},{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
@@ -6363,8 +6363,8 @@ static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev,
switch (command) {
case TDA18271_CALLBACK_CMD_AGC_ENABLE: /* 0 */
switch (dev->board) {
+ case SAA7134_BOARD_HAUPPAUGE_HVR1150:
case SAA7134_BOARD_HAUPPAUGE_HVR1120:
- case SAA7134_BOARD_HAUPPAUGE_HVR1110R3:
ret = saa7134_tda18271_hvr11x0_toggle_agc(dev, arg);
break;
default:
@@ -6384,8 +6384,8 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev,
int ret;
switch (dev->board) {
+ case SAA7134_BOARD_HAUPPAUGE_HVR1150:
case SAA7134_BOARD_HAUPPAUGE_HVR1120:
- case SAA7134_BOARD_HAUPPAUGE_HVR1110R3:
/* tda8290 + tda18271 */
ret = saa7134_tda8290_18271_callback(dev, command, arg);
break;
@@ -6427,7 +6427,7 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data)
switch (tv.model) {
case 67019: /* WinTV-HVR1110 (Retail, IR Blaster, hybrid, FM, SVid/Comp, 3.5mm audio in) */
case 67109: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */
- case 67201: /* WinTV-HVR1120 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */
+ case 67201: /* WinTV-HVR1150 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */
case 67301: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */
case 67209: /* WinTV-HVR1110 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */
case 67559: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */
@@ -6435,7 +6435,7 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data)
case 67579: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM) */
case 67589: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */
case 67599: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */
- case 67651: /* WinTV-HVR1120 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */
+ case 67651: /* WinTV-HVR1150 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */
case 67659: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */
break;
default:
@@ -6625,8 +6625,8 @@ int saa7134_board_init1(struct saa7134_dev *dev)
saa_writeb (SAA7134_PRODUCTION_TEST_MODE, 0x00);
break;
+ case SAA7134_BOARD_HAUPPAUGE_HVR1150:
case SAA7134_BOARD_HAUPPAUGE_HVR1120:
- case SAA7134_BOARD_HAUPPAUGE_HVR1110R3:
/* GPIO 26 high for digital, low for analog */
saa7134_set_gpio(dev, 26, 0);
msleep(1);
@@ -6891,8 +6891,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
dev->name, saa7134_boards[dev->board].name);
}
break;
+ case SAA7134_BOARD_HAUPPAUGE_HVR1150:
case SAA7134_BOARD_HAUPPAUGE_HVR1120:
- case SAA7134_BOARD_HAUPPAUGE_HVR1110R3:
hauppauge_eeprom(dev, dev->eedata+0x80);
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 31930f26ffc..98f3efd1e94 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1119,7 +1119,7 @@ static int dvb_init(struct saa7134_dev *dev)
&tda827x_cfg_2) < 0)
goto dettach_frontend;
break;
- case SAA7134_BOARD_HAUPPAUGE_HVR1110R3:
+ case SAA7134_BOARD_HAUPPAUGE_HVR1120:
fe0->dvb.frontend = dvb_attach(tda10048_attach,
&hcw_tda10048_config,
&dev->i2c_adap);
@@ -1147,7 +1147,7 @@ static int dvb_init(struct saa7134_dev *dev)
&tda827x_cfg_1) < 0)
goto dettach_frontend;
break;
- case SAA7134_BOARD_HAUPPAUGE_HVR1120:
+ case SAA7134_BOARD_HAUPPAUGE_HVR1150:
fe0->dvb.frontend = dvb_attach(lgdt3305_attach,
&hcw_lgdt3305_config,
&dev->i2c_adap);
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 82268848f26..fb564f14887 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -278,8 +278,8 @@ struct saa7134_format {
#define SAA7134_BOARD_ASUSTeK_TIGER 152
#define SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG 153
#define SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS 154
-#define SAA7134_BOARD_HAUPPAUGE_HVR1120 155
-#define SAA7134_BOARD_HAUPPAUGE_HVR1110R3 156
+#define SAA7134_BOARD_HAUPPAUGE_HVR1150 155
+#define SAA7134_BOARD_HAUPPAUGE_HVR1120 156
#define SAA7134_BOARD_AVERMEDIA_STUDIO_507UA 157
#define SAA7134_BOARD_AVERMEDIA_CARDBUS_501 158
#define SAA7134_BOARD_BEHOLD_505RDS 159
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 0db88a53d92..e86878deea7 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -282,27 +282,24 @@ out:
return ret;
}
+/* Called under spinlock_irqsave(&pcdev->lock, ...) */
static void sh_mobile_ceu_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- unsigned long flags;
dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %zd\n", __func__,
vb, vb->baddr, vb->bsize);
vb->state = VIDEOBUF_QUEUED;
- spin_lock_irqsave(&pcdev->lock, flags);
list_add_tail(&vb->queue, &pcdev->capture);
if (!pcdev->active) {
pcdev->active = vb;
sh_mobile_ceu_capture(pcdev);
}
-
- spin_unlock_irqrestore(&pcdev->lock, flags);
}
static void sh_mobile_ceu_videobuf_release(struct videobuf_queue *vq,
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 4d6785e6345..b154bd961e3 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1050,8 +1050,8 @@ static int stk_setup_format(struct stk_camera *dev)
depth = 1;
else
depth = 2;
- while (stk_sizes[i].m != dev->vsettings.mode
- && i < ARRAY_SIZE(stk_sizes))
+ while (i < ARRAY_SIZE(stk_sizes) &&
+ stk_sizes[i].m != dev->vsettings.mode)
i++;
if (i == ARRAY_SIZE(stk_sizes)) {
STK_ERROR("Something is broken in %s\n", __func__);
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 89927b7aec2..04b47832fa0 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1845,11 +1845,29 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_STREAM_NO_FID },
- /* ViMicro */
- { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ /* ViMicro Vega */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0ac8,
+ .idProduct = 0x332d,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_FIX_BANDWIDTH },
+ /* ViMicro - Minoru3D */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x0ac8,
+ .idProduct = 0x3410,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_FIX_BANDWIDTH },
+ /* ViMicro Venus - Minoru3D */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0ac8,
- .idProduct = 0x0000,
+ .idProduct = 0x3420,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c
index f152a990386..1ca6dff7361 100644
--- a/drivers/media/video/uvc/uvc_status.c
+++ b/drivers/media/video/uvc/uvc_status.c
@@ -145,8 +145,8 @@ static void uvc_status_complete(struct urb *urb)
break;
default:
- uvc_printk(KERN_INFO, "unknown event type %u.\n",
- dev->status[0]);
+ uvc_trace(UVC_TRACE_STATUS, "Unknown status event "
+ "type %u.\n", dev->status[0]);
break;
}
}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index be64a502ea2..f2afc4e0837 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1081,8 +1081,10 @@ static long __video_do_ioctl(struct file *file,
/* Calls the specific handler */
if (ops->vidioc_g_std)
ret = ops->vidioc_g_std(file, fh, id);
- else
+ else if (vfd->current_norm)
*id = vfd->current_norm;
+ else
+ ret = -EINVAL;
if (!ret)
dbgarg(cmd, "std=0x%08Lx\n", (long long unsigned)*id);
@@ -1553,12 +1555,19 @@ static long __video_do_ioctl(struct file *file,
break;
ret = ops->vidioc_g_parm(file, fh, p);
} else {
+ v4l2_std_id std = vfd->current_norm;
+
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- v4l2_video_std_frame_period(vfd->current_norm,
- &p->parm.capture.timeperframe);
ret = 0;
+ if (ops->vidioc_g_std)
+ ret = ops->vidioc_g_std(file, fh, &std);
+ else if (std == 0)
+ ret = -EINVAL;
+ if (ret == 0)
+ v4l2_video_std_frame_period(std,
+ &p->parm.capture.timeperframe);
}
dbgarg(cmd, "type=%d\n", p->type);
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index fc976f42f43..2622a6e63da 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -695,7 +695,7 @@ static int zr364xx_release(struct file *file)
for (i = 0; i < 2; i++) {
err =
send_control_msg(udev, 1, init[cam->method][i].value,
- 0, init[i][cam->method].bytes,
+ 0, init[cam->method][i].bytes,
init[cam->method][i].size);
if (err < 0) {
dev_err(&udev->dev, "error during release sequence\n");
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ae5fe91867e..10ed195c0c1 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -736,7 +736,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->partitioned = 1;
return add_mtd_partitions(&flash->mtd, parts, nr_parts);
}
- } else if (data->nr_parts)
+ } else if (data && data->nr_parts)
dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
data->nr_parts, data->name);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 7ad972229db..0d9d4bc9c76 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -61,7 +61,7 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
buf64 = (uint64_t *)buf;
while (i < len/8) {
uint64_t x;
- asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base));
+ asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
buf64[i++] = x;
}
i *= 8;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index fb86cacd5bd..1002e188299 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -135,16 +135,17 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
+ loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops;
int res;
ops.mode = MTD_OOB_PLACE;
- ops.ooboffs = offs & (mtd->writesize - 1);
+ ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd->read_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -155,16 +156,17 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
+ loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops;
int res;
ops.mode = MTD_OOB_PLACE;
- ops.ooboffs = offs & (mtd->writesize - 1);
+ ops.ooboffs = offs & mask;
ops.ooblen = len;
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd->write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -177,17 +179,18 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf, uint8_t *oob)
{
+ loff_t mask = mtd->writesize - 1;
struct mtd_oob_ops ops;
int res;
ops.mode = MTD_OOB_PLACE;
- ops.ooboffs = offs;
+ ops.ooboffs = offs & mask;
ops.ooblen = mtd->oobsize;
ops.oobbuf = oob;
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd->write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.retlen;
return res;
}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c2041685094..45675889850 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -235,6 +235,7 @@ enum vortex_chips {
CH_3C900B_FL,
CH_3C905_1,
CH_3C905_2,
+ CH_3C905B_TX,
CH_3C905B_1,
CH_3C905B_2,
@@ -307,6 +308,8 @@ static struct vortex_chip_info {
PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
{"3c905 Boomerang 100baseT4",
PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
+ {"3C905B-TX Fast Etherlink XL PCI",
+ PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
{"3c905B Cyclone 100baseTx",
PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
@@ -389,6 +392,7 @@ static struct pci_device_id vortex_pci_tbl[] = {
{ 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
{ 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
{ 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
+ { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
{ 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
{ 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 50efde11ea6..d0dbbf39349 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -515,7 +515,7 @@ rx_status_loop:
dma_addr_t mapping;
struct sk_buff *skb, *new_skb;
struct cp_desc *desc;
- unsigned buflen;
+ const unsigned buflen = cp->rx_buf_sz;
skb = cp->rx_skb[rx_tail];
BUG_ON(!skb);
@@ -549,8 +549,7 @@ rx_status_loop:
pr_debug("%s: rx slot %d status 0x%x len %d\n",
dev->name, rx_tail, status, len);
- buflen = cp->rx_buf_sz + NET_IP_ALIGN;
- new_skb = netdev_alloc_skb(dev, buflen);
+ new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN);
if (!new_skb) {
dev->stats.rx_dropped++;
goto rx_next;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5f6509a5f64..5ce7cbabd7a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1727,12 +1727,14 @@ config KS8842
tristate "Micrel KSZ8842"
depends on HAS_IOMEM
help
- This platform driver is for Micrel KSZ8842 chip.
+ This platform driver is for Micrel KSZ8842 / KS8842
+ 2-port ethernet switch chip (managed, VLAN, QoS).
config KS8851
tristate "Micrel KS8851 SPI"
depends on SPI
select MII
+ select CRC32
help
SPI driver for Micrel KS8851 SPI attached network chip.
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 616fb7985a3..ddd231cb54b 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -1080,7 +1080,7 @@ static struct platform_driver w90p910_ether_driver = {
.probe = w90p910_ether_probe,
.remove = __devexit_p(w90p910_ether_remove),
.driver = {
- .name = "w90p910-emc",
+ .name = "nuc900-emc",
.owner = THIS_MODULE,
},
};
@@ -1101,5 +1101,5 @@ module_exit(w90p910_ether_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 MAC driver!");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:w90p910-emc");
+MODULE_ALIAS("platform:nuc900-emc");
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 607007d75b6..00d11b480af 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -232,11 +232,11 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strncpy(drvinfo->version, atl1c_driver_version,
+ strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl1c_driver_version,
sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 94d7325caf4..8bca12f7139 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3378,11 +3378,11 @@ static void atl1_get_drvinfo(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
+ strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->eedump_len = ATL1_EEDUMP_LEN;
}
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 36d4d377ec2..bafca672ea7 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -952,9 +952,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
int rc = NETDEV_TX_OK;
dma_addr_t mapping;
u32 len, entry, ctrl;
+ unsigned long flags;
len = skb->len;
- spin_lock_irq(&bp->lock);
+ spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
@@ -1027,7 +1028,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
out_unlock:
- spin_unlock_irq(&bp->lock);
+ spin_unlock_irqrestore(&bp->lock, flags);
return rc;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b70cc99962f..06b901152d4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -399,9 +399,11 @@ static int bnx2_unregister_cnic(struct net_device *dev)
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ mutex_lock(&bp->cnic_lock);
cp->drv_state = 0;
bnapi->cnic_present = 0;
rcu_assign_pointer(bp->cnic_ops, NULL);
+ mutex_unlock(&bp->cnic_lock);
synchronize_rcu();
return 0;
}
@@ -429,13 +431,13 @@ bnx2_cnic_stop(struct bnx2 *bp)
struct cnic_ops *c_ops;
struct cnic_ctl_info info;
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
+ mutex_lock(&bp->cnic_lock);
+ c_ops = bp->cnic_ops;
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
}
- rcu_read_unlock();
+ mutex_unlock(&bp->cnic_lock);
}
static void
@@ -444,8 +446,8 @@ bnx2_cnic_start(struct bnx2 *bp)
struct cnic_ops *c_ops;
struct cnic_ctl_info info;
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
+ mutex_lock(&bp->cnic_lock);
+ c_ops = bp->cnic_ops;
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -455,7 +457,7 @@ bnx2_cnic_start(struct bnx2 *bp)
info.cmd = CNIC_CTL_START_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
}
- rcu_read_unlock();
+ mutex_unlock(&bp->cnic_lock);
}
#else
@@ -7663,6 +7665,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
spin_lock_init(&bp->phy_lock);
spin_lock_init(&bp->indirect_lock);
+#ifdef BCM_CNIC
+ mutex_init(&bp->cnic_lock);
+#endif
INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index f1edfaa9e56..a4f12fd0ecd 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6902,6 +6902,7 @@ struct bnx2 {
u32 idle_chk_status_idx;
#ifdef BCM_CNIC
+ struct mutex cnic_lock;
struct cnic_eth_dev cnic_eth_dev;
#endif
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 9e4283aff82..e1a4f821423 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -611,11 +611,18 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int can_newlink(struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ return -EOPNOTSUPP;
+}
+
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
.policy = can_policy,
.setup = can_setup,
+ .newlink = can_newlink,
.changelink = can_changelink,
.fill_info = can_fill_info,
.fill_xstats = can_fill_xstats,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4869d77cbe9..74c342959b7 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
return NULL;
}
+static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
+{
+ atomic_inc(&ulp_ops->ref_count);
+}
+
+static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
+{
+ atomic_dec(&ulp_ops->ref_count);
+}
+
static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
}
read_unlock(&cnic_dev_lock);
+ atomic_set(&ulp_ops->ref_count, 0);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
mutex_unlock(&cnic_lock);
@@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
int cnic_unregister_driver(int ulp_type)
{
struct cnic_dev *dev;
+ struct cnic_ulp_ops *ulp_ops;
+ int i = 0;
if (ulp_type >= MAX_CNIC_ULP_TYPE) {
printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
@@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (!cnic_ulp_tbl[ulp_type]) {
+ ulp_ops = cnic_ulp_tbl[ulp_type];
+ if (!ulp_ops) {
printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
"been registered\n", ulp_type);
goto out_unlock;
@@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type)
mutex_unlock(&cnic_lock);
synchronize_rcu();
+ while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
+ msleep(100);
+ i++;
+ }
+
+ if (atomic_read(&ulp_ops->ref_count) != 0)
+ printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
+ " to zero.\n", dev->netdev->name);
return 0;
out_unlock:
@@ -466,6 +488,7 @@ EXPORT_SYMBOL(cnic_register_driver);
static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
{
struct cnic_local *cp = dev->cnic_priv;
+ int i = 0;
if (ulp_type >= MAX_CNIC_ULP_TYPE) {
printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
@@ -486,6 +509,15 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
synchronize_rcu();
+ while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
+ i < 20) {
+ msleep(100);
+ i++;
+ }
+ if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
+ printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call"
+ " to complete.\n", dev->netdev->name);
+
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
@@ -1076,18 +1108,23 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
if (cp->cnic_uinfo)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
- rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
- ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
- if (!ulp_ops)
+ mutex_lock(&cnic_lock);
+ ulp_ops = cp->ulp_ops[if_type];
+ if (!ulp_ops) {
+ mutex_unlock(&cnic_lock);
continue;
+ }
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
- rcu_read_unlock();
}
static void cnic_ulp_start(struct cnic_dev *dev)
@@ -1095,18 +1132,23 @@ static void cnic_ulp_start(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
int if_type;
- rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
- ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
- if (!ulp_ops || !ulp_ops->cnic_start)
+ mutex_lock(&cnic_lock);
+ ulp_ops = cp->ulp_ops[if_type];
+ if (!ulp_ops || !ulp_ops->cnic_start) {
+ mutex_unlock(&cnic_lock);
continue;
+ }
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
- rcu_read_unlock();
}
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
@@ -1116,22 +1158,18 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
switch (info->cmd) {
case CNIC_CTL_STOP_CMD:
cnic_hold(dev);
- mutex_lock(&cnic_lock);
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
- mutex_unlock(&cnic_lock);
cnic_put(dev);
break;
case CNIC_CTL_START_CMD:
cnic_hold(dev);
- mutex_lock(&cnic_lock);
if (!cnic_start_hw(dev))
cnic_ulp_start(dev);
- mutex_unlock(&cnic_lock);
cnic_put(dev);
break;
default:
@@ -1145,19 +1183,23 @@ static void cnic_ulp_init(struct cnic_dev *dev)
int i;
struct cnic_local *cp = dev->cnic_priv;
- rcu_read_lock();
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops;
- ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
- if (!ulp_ops || !ulp_ops->cnic_init)
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl[i];
+ if (!ulp_ops || !ulp_ops->cnic_init) {
+ mutex_unlock(&cnic_lock);
continue;
+ }
+ ulp_get(ulp_ops);
+ mutex_unlock(&cnic_lock);
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_init(dev);
+ ulp_put(ulp_ops);
}
- rcu_read_unlock();
}
static void cnic_ulp_exit(struct cnic_dev *dev)
@@ -1165,19 +1207,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
int i;
struct cnic_local *cp = dev->cnic_priv;
- rcu_read_lock();
for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
struct cnic_ulp_ops *ulp_ops;
- ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
- if (!ulp_ops || !ulp_ops->cnic_exit)
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl[i];
+ if (!ulp_ops || !ulp_ops->cnic_exit) {
+ mutex_unlock(&cnic_lock);
continue;
+ }
+ ulp_get(ulp_ops);
+ mutex_unlock(&cnic_lock);
if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
ulp_ops->cnic_exit(dev);
+ ulp_put(ulp_ops);
}
- rcu_read_unlock();
}
static int cnic_cm_offload_pg(struct cnic_sock *csk)
@@ -2393,21 +2439,45 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return 0;
}
-static int cnic_start_hw(struct cnic_dev *dev)
+static int cnic_register_netdev(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
int err;
- if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
- return -EALREADY;
+ if (!ethdev)
+ return -ENODEV;
+
+ if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
+ return 0;
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
- if (err) {
+ if (err)
printk(KERN_ERR PFX "%s: register_cnic failed\n",
dev->netdev->name);
- goto err2;
- }
+
+ return err;
+}
+
+static void cnic_unregister_netdev(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+
+ if (!ethdev)
+ return;
+
+ ethdev->drv_unregister_cnic(dev->netdev);
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ int err;
+
+ if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ return -EALREADY;
dev->regview = ethdev->io_base;
cp->chip_id = ethdev->chip_id;
@@ -2438,18 +2508,13 @@ static int cnic_start_hw(struct cnic_dev *dev)
return 0;
err1:
- ethdev->drv_unregister_cnic(dev->netdev);
cp->free_resc(dev);
pci_dev_put(dev->pcidev);
-err2:
return err;
}
static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
{
- struct cnic_local *cp = dev->cnic_priv;
- struct cnic_eth_dev *ethdev = cp->ethdev;
-
cnic_disable_bnx2_int_sync(dev);
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
@@ -2461,8 +2526,6 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
cnic_setup_5709_context(dev, 0);
cnic_free_irq(dev);
- ethdev->drv_unregister_cnic(dev->netdev);
-
cnic_free_resc(dev);
}
@@ -2543,7 +2606,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
probe = symbol_get(bnx2_cnic_probe);
if (probe) {
ethdev = (*probe)(dev);
- symbol_put_addr(probe);
+ symbol_put(bnx2_cnic_probe);
}
if (!ethdev)
return NULL;
@@ -2646,10 +2709,12 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
else if (event == NETDEV_UP) {
- mutex_lock(&cnic_lock);
+ if (cnic_register_netdev(dev) != 0) {
+ cnic_put(dev);
+ goto done;
+ }
if (!cnic_start_hw(dev))
cnic_ulp_start(dev);
- mutex_unlock(&cnic_lock);
}
rcu_read_lock();
@@ -2668,10 +2733,9 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
rcu_read_unlock();
if (event == NETDEV_GOING_DOWN) {
- mutex_lock(&cnic_lock);
cnic_ulp_stop(dev);
cnic_stop_hw(dev);
- mutex_unlock(&cnic_lock);
+ cnic_unregister_netdev(dev);
} else if (event == NETDEV_UNREGISTER) {
write_lock(&cnic_dev_lock);
list_del_init(&dev->list);
@@ -2703,6 +2767,7 @@ static void cnic_release(void)
}
cnic_ulp_exit(dev);
+ cnic_unregister_netdev(dev);
list_del_init(&dev->list);
cnic_free_dev(dev);
}
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 5192d4a9df5..a94b302bb46 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -176,6 +176,7 @@ struct cnic_local {
unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
#define ULP_F_INIT 0
#define ULP_F_START 1
+#define ULP_F_CALL_PENDING 2
struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
/* protected by ulp_lock */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index d1bce27ee99..a49235739ee 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -290,6 +290,7 @@ struct cnic_ulp_ops {
void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
char *data, u16 data_size);
struct module *owner;
+ atomic_t ref_count;
};
extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index fb5df5c6203..c97ab82ec74 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1286,6 +1286,7 @@ static int cxgb_open(struct net_device *dev)
if (!other_ports)
schedule_chk_task(adapter);
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
return 0;
}
@@ -1318,6 +1319,7 @@ static int cxgb_close(struct net_device *dev)
if (!adapter->open_device_map)
cxgb_down(adapter);
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
return 0;
}
@@ -2717,7 +2719,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
if (is_offload(adapter) &&
test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
- cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
offload_close(&adapter->tdev);
}
@@ -2782,7 +2784,7 @@ static void t3_resume_ports(struct adapter *adapter)
}
if (is_offload(adapter) && !ofld_disable)
- cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
}
/*
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index f9f54b57b28..75064eea1d8 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -153,14 +153,14 @@ void cxgb3_remove_clients(struct t3cdev *tdev)
mutex_unlock(&cxgb3_db_lock);
}
-void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error)
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
{
struct cxgb3_client *client;
mutex_lock(&cxgb3_db_lock);
list_for_each_entry(client, &client_list, client_list) {
- if (client->err_handler)
- client->err_handler(tdev, status, error);
+ if (client->event_handler)
+ client->event_handler(tdev, event, port);
}
mutex_unlock(&cxgb3_db_lock);
}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 55945f422ae..670aa62042d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -64,14 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client);
void cxgb3_unregister_client(struct cxgb3_client *client);
void cxgb3_add_clients(struct t3cdev *tdev);
void cxgb3_remove_clients(struct t3cdev *tdev);
-void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error);
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
struct sk_buff *skb, void *ctx);
enum {
OFFLOAD_STATUS_UP,
- OFFLOAD_STATUS_DOWN
+ OFFLOAD_STATUS_DOWN,
+ OFFLOAD_PORT_DOWN,
+ OFFLOAD_PORT_UP
};
struct cxgb3_client {
@@ -82,7 +84,7 @@ struct cxgb3_client {
int (*redirect)(void *ctx, struct dst_entry *old,
struct dst_entry *new, struct l2t_entry *l2t);
struct list_head client_list;
- void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error);
+ void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
};
/*
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 41b648a67fe..3a6735dc9f6 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1899,7 +1899,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
nic->ru_running = RU_SUSPENDED;
pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ PCI_DMA_FROMDEVICE);
return -ENODATA;
}
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d56c7473144..99df2abf82a 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,10 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
- union ich8_hws_flash_status hsfsts;
- u32 gfpreg;
- u32 sector_base_addr;
- u32 sector_end_addr;
+ u32 gfpreg, sector_base_addr, sector_end_addr;
u16 i;
/* Can't read flash registers if the register set isn't mapped. */
@@ -375,20 +372,6 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
- /*
- * Make sure the flash bank size does not overwrite the 4k
- * sector ranges. We may have 64k allotted to us but we only care
- * about the first 2 4k sectors. Therefore, if we have anything less
- * than 64k set in the HSFSTS register, we will reduce the bank size
- * down to 4k and let the rest remain unused. If berasesz == 3, then
- * we are working in 64k mode. Otherwise we are not.
- */
- if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) {
- hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
- if (hsfsts.hsf_status.berasesz != 3)
- nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS;
- }
-
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
/* Clear shadow ram */
@@ -594,8 +577,8 @@ static DEFINE_MUTEX(nvm_mutex);
**/
static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
{
- u32 extcnf_ctrl;
- u32 timeout = PHY_CFG_TIMEOUT;
+ u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+ s32 ret_val = 0;
might_sleep();
@@ -603,28 +586,46 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
+ break;
- if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) {
- extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
- ew32(EXTCNF_CTRL, extcnf_ctrl);
+ mdelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ timeout = PHY_CFG_TIMEOUT * 2;
+
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+ while (timeout) {
+ extcnf_ctrl = er32(EXTCNF_CTRL);
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+ break;
- extcnf_ctrl = er32(EXTCNF_CTRL);
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
- break;
- }
mdelay(1);
timeout--;
}
if (!timeout) {
- hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
+ hw_dbg(hw, "Failed to acquire the semaphore.\n");
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
- mutex_unlock(&nvm_mutex);
- return -E1000_ERR_CONFIG;
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
}
- return 0;
+out:
+ if (ret_val)
+ mutex_unlock(&nvm_mutex);
+
+ return ret_val;
}
/**
@@ -1306,7 +1307,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 act_offset;
- s32 ret_val;
+ s32 ret_val = 0;
u32 bank = 0;
u16 i, word;
@@ -1321,12 +1322,15 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
goto out;
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
- if (ret_val)
- goto release;
+ if (ret_val) {
+ hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
act_offset = (bank) ? nvm->flash_bank_size : 0;
act_offset += offset;
+ ret_val = 0;
for (i = 0; i < words; i++) {
if ((dev_spec->shadow_ram) &&
(dev_spec->shadow_ram[offset+i].modified)) {
@@ -1341,7 +1345,6 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
}
-release:
e1000_release_swflag_ich8lan(hw);
out:
@@ -1592,7 +1595,6 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
- s32 ret_val;
u16 i;
if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
@@ -1601,17 +1603,11 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_NVM;
}
- ret_val = e1000_acquire_swflag_ich8lan(hw);
- if (ret_val)
- return ret_val;
-
for (i = 0; i < words; i++) {
dev_spec->shadow_ram[offset+i].modified = 1;
dev_spec->shadow_ram[offset+i].value = data[i];
}
- e1000_release_swflag_ich8lan(hw);
-
return 0;
}
@@ -1652,8 +1648,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
if (ret_val) {
- e1000_release_swflag_ich8lan(hw);
- goto out;
+ hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
}
if (bank == 0) {
@@ -2039,12 +2035,8 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
iteration = 1;
break;
case 2:
- if (hw->mac.type == e1000_ich9lan) {
- sector_size = ICH_FLASH_SEG_SIZE_8K;
- iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K;
- } else {
- return -E1000_ERR_NVM;
- }
+ sector_size = ICH_FLASH_SEG_SIZE_8K;
+ iteration = 1;
break;
case 3:
sector_size = ICH_FLASH_SEG_SIZE_64K;
@@ -2056,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Start with the base address, then add the sector offset. */
flash_linear_addr = hw->nvm.flash_base_addr;
- flash_linear_addr += (bank) ? (sector_size * iteration) : 0;
+ flash_linear_addr += (bank) ? flash_bank_size : 0;
for (j = 0; j < iteration ; j++) {
do {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 63415bb6f48..fa92a683aef 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4538,8 +4538,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
- if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) &&
- !(hw->mac.ops.check_mng_mode(hw))) {
+ if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
if (retval)
@@ -4557,7 +4556,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = !!wufc;
/* make sure adapter isn't asleep if manageability is enabled */
- if (adapter->flags & FLAG_MNG_PT_ENABLED)
+ if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
+ (hw->mac.ops.check_mng_mode(hw)))
*enable_wake = true;
if (adapter->hw.phy.type == e1000_phy_igp_3)
@@ -4670,14 +4670,6 @@ static int e1000_resume(struct pci_dev *pdev)
return err;
}
- /* AER (Advanced Error Reporting) hooks */
- err = pci_enable_pcie_error_reporting(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
- "0x%x\n", err);
- /* non-fatal, continue */
- }
-
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4990,6 +4982,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
goto err_pci_reg;
+ /* AER (Advanced Error Reporting) hooks */
+ err = pci_enable_pcie_error_reporting(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
+ "0x%x\n", err);
+ /* non-fatal, continue */
+ }
+
pci_set_master(pdev);
/* PCI config space info */
err = pci_save_state(pdev);
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index d4b98074b1b..c9fd82d3a80 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -285,6 +285,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *bdp;
+ void *bufaddr;
unsigned short status;
unsigned long flags;
@@ -312,7 +313,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */
- bdp->cbd_bufaddr = __pa(skb->data);
+ bufaddr = skb->data;
bdp->cbd_datlen = skb->len;
/*
@@ -320,11 +321,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
* 4-byte boundaries. Use bounce buffers to copy data
* and get it aligned. Ugh.
*/
- if (bdp->cbd_bufaddr & FEC_ALIGNMENT) {
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
- bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
+ bufaddr = fep->tx_bounce[index];
}
/* Save skb pointer */
@@ -336,7 +337,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+ bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index cc786333d95..c40113f5896 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct bcom_fec_bd *bd;
+ unsigned long flags;
if (bcom_queue_full(priv->tx_dmatsk)) {
if (net_ratelimit())
@@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- spin_lock_irq(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
dev->trans_start = jiffies;
bd = (struct bcom_fec_bd *)
@@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- spin_unlock_irq(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index f8ffcbf0bc3..a00ec639c38 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -491,6 +491,7 @@ static int gfar_remove(struct of_device *ofdev)
dev_set_drvdata(&ofdev->dev, NULL);
+ unregister_netdev(priv->ndev);
iounmap(priv->regs);
free_netdev(priv->ndev);
@@ -936,6 +937,7 @@ int startup_gfar(struct net_device *dev)
struct gfar __iomem *regs = priv->regs;
int err = 0;
u32 rctrl = 0;
+ u32 tctrl = 0;
u32 attrs = 0;
gfar_write(&regs->imask, IMASK_INIT_CLEAR);
@@ -1111,11 +1113,19 @@ int startup_gfar(struct net_device *dev)
rctrl |= RCTRL_PADDING(priv->padding);
}
+ /* keep vlan related bits if it's enabled */
+ if (priv->vlgrp) {
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ tctrl |= TCTRL_VLINS;
+ }
+
/* Init rctrl based on our settings */
gfar_write(&priv->regs->rctrl, rctrl);
if (dev->features & NETIF_F_IP_CSUM)
- gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
+ tctrl |= TCTRL_INIT_CSUM;
+
+ gfar_write(&priv->regs->tctrl, tctrl);
/* Set the extraction length and index */
attrs = ATTRELI_EL(priv->rx_stash_size) |
@@ -1450,7 +1460,6 @@ static void gfar_vlan_rx_register(struct net_device *dev,
/* Enable VLAN tag extraction */
tempval = gfar_read(&priv->regs->rctrl);
- tempval |= RCTRL_VLEX;
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
gfar_write(&priv->regs->rctrl, tempval);
} else {
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index beb84213b67..f0f89080371 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1305,6 +1305,8 @@ static int emac_close(struct net_device *ndev)
free_irq(dev->emac_irq, dev);
+ netif_carrier_off(ndev);
+
return 0;
}
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index c4361d46659..ee1cff5c9b2 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -23,7 +23,6 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
@@ -205,9 +204,6 @@ static const struct net_device_ops au1k_irda_netdev_ops = {
.ndo_start_xmit = au1k_irda_hard_xmit,
.ndo_tx_timeout = au1k_tx_timeout,
.ndo_do_ioctl = au1k_irda_ioctl,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
};
static int au1k_irda_net_init(struct net_device *dev)
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 3376a4f39e0..77d10edefd2 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -803,9 +803,6 @@ static const struct net_device_ops pxa_irda_netdev_ops = {
.ndo_stop = pxa_irda_stop,
.ndo_start_xmit = pxa_irda_hard_xmit,
.ndo_do_ioctl = pxa_irda_ioctl,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
};
static int pxa_irda_probe(struct platform_device *pdev)
@@ -830,6 +827,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
if (!dev)
goto err_mem_3;
+ SET_NETDEV_DEV(dev, &pdev->dev);
si = netdev_priv(dev);
si->dev = &pdev->dev;
si->pdata = pdev->dev.platform_data;
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 2aeb2e6aec1..b039cb081e9 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -24,7 +24,6 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
@@ -881,9 +880,6 @@ static const struct net_device_ops sa1100_irda_netdev_ops = {
.ndo_stop = sa1100_irda_stop,
.ndo_start_xmit = sa1100_irda_hard_xmit,
.ndo_do_ioctl = sa1100_irda_ioctl,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
};
static int sa1100_irda_probe(struct platform_device *pdev)
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index d0883835b0c..fe4f2b2bff9 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -115,7 +115,7 @@ static int __init w83977af_init(void)
IRDA_DEBUG(0, "%s()\n", __func__ );
- for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
+ for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e11d83d5852..2c4dc8221dc 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -136,6 +136,8 @@ struct ixgbe_ring {
u8 queue_index; /* needed for multiqueue queue management */
+#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
+ u8 flags; /* per ring feature flags */
u16 head;
u16 tail;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 79144e950a3..dff8dfac7ed 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1948,6 +1948,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_q_vector *q_vector;
int i;
if (ec->tx_max_coalesced_frames_irq)
@@ -1982,14 +1983,24 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
adapter->itr_setting = 0;
}
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- if (q_vector->txr_count && !q_vector->rxr_count)
- /* tx vector gets half the rate */
- q_vector->eitr = (adapter->eitr_param >> 1);
- else
- /* rx only or mixed */
- q_vector->eitr = adapter->eitr_param;
+ /* MSI/MSIx Interrupt Mode */
+ if (adapter->flags &
+ (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
+ int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ /* tx vector gets half the rate */
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else
+ /* rx only or mixed */
+ q_vector->eitr = adapter->eitr_param;
+ ixgbe_write_eitr(q_vector);
+ }
+ /* Legacy Interrupt Mode */
+ } else {
+ q_vector = adapter->q_vector[0];
+ q_vector->eitr = adapter->eitr_param;
ixgbe_write_eitr(q_vector);
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index fa9f24e2368..28cf104e36c 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -336,7 +336,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
/* return 0 to bypass going to ULD for DDPed data */
if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
rc = 0;
- else
+ else if (ddp->len)
rc = ddp->len;
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 110c65ab5cb..77b0381a2b5 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -492,12 +492,12 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
skb_record_rx_queue(skb, ring->queue_index);
if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
- if (adapter->vlgrp && is_vlan && (tag != 0))
+ if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
else
napi_gro_receive(napi, skb);
} else {
- if (adapter->vlgrp && is_vlan && (tag != 0))
+ if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
else
netif_rx(skb);
@@ -585,7 +585,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
if (!bi->page_dma &&
- (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
@@ -629,7 +629,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
@@ -726,7 +726,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
(*work_done)++;
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT;
@@ -798,7 +798,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_ring->stats.packets++;
rx_ring->stats.bytes += skb->len;
} else {
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
@@ -1898,46 +1898,19 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
-static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
+static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring)
{
- struct ixgbe_ring *rx_ring;
u32 srrctl;
- int queue0 = 0;
- unsigned long mask;
+ int index;
struct ixgbe_ring_feature *feature = adapter->ring_feature;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- int dcb_i = feature[RING_F_DCB].indices;
- if (dcb_i == 8)
- queue0 = index >> 4;
- else if (dcb_i == 4)
- queue0 = index >> 5;
- else
- dev_err(&adapter->pdev->dev, "Invalid DCB "
- "configuration\n");
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- struct ixgbe_ring_feature *f;
-
- rx_ring = &adapter->rx_ring[queue0];
- f = &adapter->ring_feature[RING_F_FCOE];
- if ((queue0 == 0) && (index > rx_ring->reg_idx))
- queue0 = f->mask + index -
- rx_ring->reg_idx - 1;
- }
-#endif /* IXGBE_FCOE */
- } else {
- queue0 = index;
- }
- } else {
+ index = rx_ring->reg_idx;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ unsigned long mask;
mask = (unsigned long) feature[RING_F_RSS].mask;
- queue0 = index & mask;
index = index & mask;
}
-
- rx_ring = &adapter->rx_ring[queue0];
-
srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -1946,7 +1919,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
@@ -2002,6 +1975,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
{
u64 rdba;
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring *rx_ring;
struct net_device *netdev = adapter->netdev;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, j;
@@ -2018,11 +1992,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
/* Decide whether to use packet split mode or not */
adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-#endif /* IXGBE_FCOE */
-
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
@@ -2070,29 +2039,35 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
- rdba = adapter->rx_ring[i].dma;
- j = adapter->rx_ring[i].reg_idx;
+ rx_ring = &adapter->rx_ring[i];
+ rdba = rx_ring->dma;
+ j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
- adapter->rx_ring[i].head = IXGBE_RDH(j);
- adapter->rx_ring[i].tail = IXGBE_RDT(j);
- adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+ rx_ring->head = IXGBE_RDH(j);
+ rx_ring->tail = IXGBE_RDT(j);
+ rx_ring->rx_buf_len = rx_buf_len;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
+ rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
- if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
- (i >= f->mask) && (i < f->mask + f->indices))
- adapter->rx_ring[i].rx_buf_len =
- IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ if ((i >= f->mask) && (i < f->mask + f->indices)) {
+ rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ }
}
#endif /* IXGBE_FCOE */
- ixgbe_configure_srrctl(adapter, j);
+ ixgbe_configure_srrctl(adapter, rx_ring);
}
if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -2168,7 +2143,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
/* Enable 82599 HW-RSC */
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ rx_ring = &adapter->rx_ring[i];
+ j = rx_ring->reg_idx;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
rscctrl |= IXGBE_RSCCTL_RSCEN;
/*
@@ -2176,7 +2152,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* total size of max desc * buf_len is not greater
* than 65535
*/
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 2a0174b62e9..92fb8235c76 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
struct ixpdev_priv *ip = netdev_priv(dev);
struct ixpdev_tx_desc *desc;
int entry;
+ unsigned long flags;
if (unlikely(skb->len > PAGE_SIZE)) {
/* @@@ Count drops. */
@@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
- local_irq_disable();
+ local_irq_save(flags);
ip->tx_queue_entries++;
if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
netif_stop_queue(dev);
- local_irq_enable();
+ local_irq_restore(flags);
return 0;
}
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 5b5c25368d1..e3601cf3f93 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping;
unsigned int len, entry;
u32 ctrl;
+ unsigned long flags;
#ifdef DEBUG
int i;
@@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
len = skb->len;
- spin_lock_irq(&bp->lock);
+ spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(bp) < 1) {
netif_stop_queue(dev);
- spin_unlock_irq(&bp->lock);
+ spin_unlock_irqrestore(&bp->lock, flags);
dev_err(&bp->pdev->dev,
"BUG! Tx Ring full when queue awake!\n");
dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
@@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1)
netif_stop_queue(dev);
- spin_unlock_irq(&bp->lock);
+ spin_unlock_irqrestore(&bp->lock, flags);
dev->trans_start = jiffies;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index ac57b6a42c6..ccfe276943f 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -34,7 +34,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 91bdfdfd431..3ac0404d0d1 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -506,8 +506,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
- skb_frags_rx[nr - 1].size = length -
- priv->frag_info[nr - 1].frag_prefix_size;
+ if (nr > 0)
+ skb_frags_rx[nr - 1].size = length -
+ priv->frag_info[nr - 1].frag_prefix_size;
return nr;
fail:
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 5a88b3f5769..62208401c4d 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -437,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
{
struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+ unsigned long flags;
/* If we don't have a pending timer, set one up to catch our recent
post in case the interface becomes idle */
@@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
- if (spin_trylock_irq(&ring->comp_lock)) {
+ if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
mlx4_en_process_tx_cq(priv->dev, cq);
- spin_unlock_irq(&ring->comp_lock);
+ spin_unlock_irqrestore(&ring->comp_lock, flags);
}
}
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index b9ceddde46c..bffb7995cb7 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
@@ -42,6 +41,10 @@
#include "fw.h"
enum {
+ MLX4_IRQNAME_SIZE = 64
+};
+
+enum {
MLX4_NUM_ASYNC_EQE = 0x100,
MLX4_NUM_SPARE_EQE = 0x80,
MLX4_EQ_ENTRY_SIZE = 0x20
@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- int ret;
-
- /*
- * We assume that mapping one page is enough for the whole EQ
- * context table. This is fine with all current HCAs, because
- * we only use 32 EQs and each EQ uses 64 bytes of context
- * memory, or 1 KB total.
- */
- priv->eq_table.icm_virt = icm_virt;
- priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
- if (!priv->eq_table.icm_page)
- return -ENOMEM;
- priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
- __free_page(priv->eq_table.icm_page);
- return -ENOMEM;
- }
-
- ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
- if (ret) {
- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->eq_table.icm_page);
- }
-
- return ret;
-}
-
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
-
- mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->eq_table.icm_page);
-}
-
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.clr_int = priv->clr_base +
(priv->eq_table.inta_pin < 32 ? 4 : 0);
- priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL);
+ priv->eq_table.irq_names =
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
+ GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
goto err_out_bitmap;
@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
goto err_out_comp;
if (dev->flags & MLX4_FLAG_MSI_X) {
- static const char async_eq_name[] = "mlx4-async";
const char *eq_name;
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
if (i < dev->caps.num_comp_vectors) {
- snprintf(priv->eq_table.irq_names + i * 16, 16,
- "mlx4-comp-%d", i);
- eq_name = priv->eq_table.irq_names + i * 16;
- } else
- eq_name = async_eq_name;
+ snprintf(priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-comp-%d@pci:%s", i,
+ pci_name(dev->pdev));
+ } else {
+ snprintf(priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-async@pci:%s",
+ pci_name(dev->pdev));
+ }
+ eq_name = priv->eq_table.irq_names +
+ i * MLX4_IRQNAME_SIZE;
err = request_irq(priv->eq_table.eq[i].irq,
mlx4_msi_x_interrupt, 0, eq_name,
priv->eq_table.eq + i);
@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.eq[i].have_irq = 1;
}
} else {
+ snprintf(priv->eq_table.irq_names,
+ MLX4_IRQNAME_SIZE,
+ DRV_NAME "@pci:%s",
+ pci_name(dev->pdev));
err = request_irq(dev->pdev->irq, mlx4_interrupt,
- IRQF_SHARED, DRV_NAME, dev);
+ IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
goto err_out_async;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index baf4bf66062..04b382fcb8c 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index dac621b1e9f..3dd481e77f9 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
goto err_unmap_aux;
}
- err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
+ err = mlx4_init_icm_table(dev, &priv->eq_table.table,
+ init_hca->eqc_base, dev_cap->eqc_entry_sz,
+ dev->caps.num_eqs, dev->caps.num_eqs,
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt;
@@ -668,7 +671,7 @@ err_unmap_mtt:
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
err_unmap_eq:
- mlx4_unmap_eq_icm(dev);
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
err_unmap_cmpt:
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+ mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
- mlx4_unmap_eq_icm(dev);
mlx4_UNMAP_ICM_AUX(dev);
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0;
err_close:
- mlx4_close_hca(dev);
+ mlx4_CLOSE_HCA(dev, 0);
err_free_icm:
mlx4_free_icms(dev);
@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_pdev;
}
- err = pci_request_region(pdev, 0, DRV_NAME);
+ err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
+ dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
goto err_disable_pdev;
}
- err = pci_request_region(pdev, 2, DRV_NAME);
- if (err) {
- dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
- goto err_release_bar0;
- }
-
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
- goto err_release_bar2;
+ goto err_release_regions;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err) {
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
"aborting.\n");
- goto err_release_bar2;
+ goto err_release_regions;
}
}
@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n");
err = -ENOMEM;
- goto err_release_bar2;
+ goto err_release_regions;
}
dev = &priv->dev;
@@ -1205,11 +1202,8 @@ err_cmd:
err_free_dev:
kfree(priv);
-err_release_bar2:
- pci_release_region(pdev, 2);
-
-err_release_bar0:
- pci_release_region(pdev, 0);
+err_release_regions:
+ pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_disable_msix(pdev);
kfree(priv);
- pci_release_region(pdev, 2);
- pci_release_region(pdev, 0);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 6053c357a47..5ccbce9866f 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5bd79c2b184..bc72d6e4919 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -205,9 +205,7 @@ struct mlx4_eq_table {
void __iomem **uar_map;
u32 clr_mask;
struct mlx4_eq *eq;
- u64 icm_virt;
- struct page *icm_page;
- dma_addr_t icm_dma;
+ struct mlx4_icm_table table;
struct mlx4_icm_table cmpt_table;
int have_irq;
u8 inta_pin;
@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
-
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index f96948be0a4..ca7ab8e7b4c 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -32,7 +32,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 26d1a7a9e37..c4988d6bd5b 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <asm/page.h>
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index bd22df95adf..ca25b9dc837 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -32,8 +32,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
-
#include "mlx4.h"
#include "fw.h"
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 1c565ef8d17..42ab9fc01d3 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -33,8 +33,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
-
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index 3951b884c0f..e5741dab382 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index fe9f218691f..1377d0dc8f1 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -31,8 +31,6 @@
* SOFTWARE.
*/
-#include <linux/init.h>
-
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index f86e05047d1..a9c1fcca5e7 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1254,7 +1254,7 @@ struct netxen_adapter {
u8 mc_enabled;
u8 max_mc_count;
u8 rss_supported;
- u8 resv2;
+ u8 link_changed;
u32 resv3;
u8 has_link_events;
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 7acf204e38c..5d3343ef3d8 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -184,13 +184,6 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
kfree(recv_ctx->rds_rings);
skip_rds:
- if (recv_ctx->sds_rings == NULL)
- goto skip_sds;
-
- for(ring = 0; ring < adapter->max_sds_rings; ring++)
- recv_ctx->sds_rings[ring].consumer = 0;
-
-skip_sds:
if (adapter->tx_ring == NULL)
return;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 3cd8cfcf627..28f270f5ac7 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -94,10 +94,6 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
-static struct workqueue_struct *netxen_workq;
-#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
-#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
-
static void netxen_watchdog(unsigned long);
static uint32_t crb_cmd_producer[4] = {
@@ -171,6 +167,8 @@ netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
{
if (recv_ctx->sds_rings != NULL)
kfree(recv_ctx->sds_rings);
+
+ recv_ctx->sds_rings = NULL;
}
static int
@@ -193,6 +191,21 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
}
static void
+netxen_napi_del(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ netxen_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
netxen_napi_enable(struct netxen_adapter *adapter)
{
int ring;
@@ -260,7 +273,7 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
change = 0;
shift = NXRD32(adapter, CRB_DMA_SHIFT);
- if (shift >= 32)
+ if (shift > 32)
return 0;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
@@ -272,7 +285,7 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
old_mask = pdev->dma_mask;
old_cmask = pdev->dev.coherent_dma_mask;
- mask = (1ULL<<(32+shift)) - 1;
+ mask = DMA_BIT_MASK(32+shift);
err = pci_set_dma_mask(pdev, mask);
if (err)
@@ -880,7 +893,6 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
spin_unlock(&adapter->tx_clean_lock);
del_timer_sync(&adapter->watchdog_timer);
- FLUSH_SCHEDULED_WORK();
}
@@ -894,10 +906,12 @@ netxen_nic_attach(struct netxen_adapter *adapter)
struct nx_host_tx_ring *tx_ring;
err = netxen_init_firmware(adapter);
- if (err != 0) {
- printk(KERN_ERR "Failed to init firmware\n");
- return -EIO;
- }
+ if (err)
+ return err;
+
+ err = netxen_napi_add(adapter, netdev);
+ if (err)
+ return err;
if (adapter->fw_major < 4)
adapter->max_rds_rings = 3;
@@ -961,6 +975,7 @@ netxen_nic_detach(struct netxen_adapter *adapter)
netxen_free_hw_resources(adapter);
netxen_release_rx_buffers(adapter);
netxen_nic_free_irq(adapter);
+ netxen_napi_del(adapter);
netxen_free_sw_resources(adapter);
adapter->is_up = 0;
@@ -1105,9 +1120,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->irq = adapter->msix_entries[0].vector;
- if (netxen_napi_add(adapter, netdev))
- goto err_out_disable_msi;
-
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = &netxen_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -1177,6 +1189,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ cancel_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->tx_timeout_task);
+
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
netxen_nic_detach(adapter);
}
@@ -1185,7 +1200,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
netxen_free_adapter_offload(adapter);
netxen_teardown_intr(adapter);
- netxen_free_sds_rings(&adapter->recv_ctx);
netxen_cleanup_pci_map(adapter);
@@ -1211,6 +1225,9 @@ netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(netdev))
netxen_nic_down(adapter, netdev);
+ cancel_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->tx_timeout_task);
+
if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
netxen_nic_detach(adapter);
@@ -1549,11 +1566,6 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
"%s: Device temperature %d degrees C exceeds"
" maximum allowed. Hardware has been shut down.\n",
netdev->name, temp_val);
-
- netif_device_detach(netdev);
- netxen_nic_down(adapter, netdev);
- netxen_nic_detach(adapter);
-
rv = 1;
} else if (temp_state == NX_TEMP_WARN) {
if (adapter->temp == NX_TEMP_NORMAL) {
@@ -1587,10 +1599,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
-
- if (!adapter->has_link_events)
- netxen_nic_set_link_parameters(adapter);
-
+ adapter->link_changed = !adapter->has_link_events;
} else if (!adapter->ahw.linkup && linkup) {
printk(KERN_INFO "%s: %s NIC Link is up\n",
netxen_nic_driver_name, netdev->name);
@@ -1599,9 +1608,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
-
- if (!adapter->has_link_events)
- netxen_nic_set_link_parameters(adapter);
+ adapter->link_changed = !adapter->has_link_events;
}
}
@@ -1628,11 +1635,36 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
netxen_advert_link_change(adapter, linkup);
}
+static void netxen_nic_thermal_shutdown(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ netxen_nic_down(adapter, netdev);
+ netxen_nic_detach(adapter);
+}
+
static void netxen_watchdog(unsigned long v)
{
struct netxen_adapter *adapter = (struct netxen_adapter *)v;
- SCHEDULE_WORK(&adapter->watchdog_task);
+ if (netxen_nic_check_temp(adapter))
+ goto do_sched;
+
+ if (!adapter->has_link_events) {
+ netxen_nic_handle_phy_intr(adapter);
+
+ if (adapter->link_changed)
+ goto do_sched;
+ }
+
+ if (netif_running(adapter->netdev))
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+
+ return;
+
+do_sched:
+ schedule_work(&adapter->watchdog_task);
}
void netxen_watchdog_task(struct work_struct *work)
@@ -1640,11 +1672,13 @@ void netxen_watchdog_task(struct work_struct *work)
struct netxen_adapter *adapter =
container_of(work, struct netxen_adapter, watchdog_task);
- if (netxen_nic_check_temp(adapter))
+ if (adapter->temp == NX_TEMP_PANIC) {
+ netxen_nic_thermal_shutdown(adapter);
return;
+ }
- if (!adapter->has_link_events)
- netxen_nic_handle_phy_intr(adapter);
+ if (adapter->link_changed)
+ netxen_nic_set_link_parameters(adapter);
if (netif_running(adapter->netdev))
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
@@ -1652,9 +1686,8 @@ void netxen_watchdog_task(struct work_struct *work)
static void netxen_tx_timeout(struct net_device *netdev)
{
- struct netxen_adapter *adapter = (struct netxen_adapter *)
- netdev_priv(netdev);
- SCHEDULE_WORK(&adapter->tx_timeout_task);
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ schedule_work(&adapter->tx_timeout_task);
}
static void netxen_tx_timeout_task(struct work_struct *work)
@@ -1811,9 +1844,6 @@ static int __init netxen_init_module(void)
{
printk(KERN_INFO "%s\n", netxen_nic_driver_string);
- if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
- return -ENOMEM;
-
return pci_register_driver(&netxen_driver);
}
@@ -1822,7 +1852,6 @@ module_init(netxen_init_module);
static void __exit netxen_exit_module(void)
{
pci_unregister_driver(&netxen_driver);
- destroy_workqueue(netxen_workq);
}
module_exit(netxen_exit_module);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index a646a445fda..23e1a0750fe 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1839,7 +1839,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->chip_version = chip_version;
lp->msg_enable = pcnet32_debug;
if ((cards_found >= MAX_UNITS)
- || (options[cards_found] > sizeof(options_mapping)))
+ || (options[cards_found] >= sizeof(options_mapping)))
lp->options = PCNET32_PORT_ASEL;
else
lp->options = options_mapping[options[cards_found]];
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 1c70e999cc5..7567f510eff 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
/* this enables an interrupt in the interrupt mask register */
#define SMC_ENABLE_INT(lp, x) do { \
unsigned char mask; \
- spin_lock_irq(&lp->lock); \
+ unsigned long smc_enable_flags; \
+ spin_lock_irqsave(&lp->lock, smc_enable_flags); \
mask = SMC_GET_INT_MASK(lp); \
mask |= (x); \
SMC_SET_INT_MASK(lp, mask); \
- spin_unlock_irq(&lp->lock); \
+ spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
} while (0)
/* this disables an interrupt from the interrupt mask register */
#define SMC_DISABLE_INT(lp, x) do { \
unsigned char mask; \
- spin_lock_irq(&lp->lock); \
+ unsigned long smc_disable_flags; \
+ spin_lock_irqsave(&lp->lock, smc_disable_flags); \
mask = SMC_GET_INT_MASK(lp); \
mask &= ~(x); \
SMC_SET_INT_MASK(lp, mask); \
- spin_unlock_irq(&lp->lock); \
+ spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
} while (0)
/*
@@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev)
* any other concurrent access and C would always interrupt B. But life
* isn't that easy in a SMP world...
*/
-#define smc_special_trylock(lock) \
+#define smc_special_trylock(lock, flags) \
({ \
int __ret; \
- local_irq_disable(); \
+ local_irq_save(flags); \
__ret = spin_trylock(lock); \
if (!__ret) \
- local_irq_enable(); \
+ local_irq_restore(flags); \
__ret; \
})
-#define smc_special_lock(lock) spin_lock_irq(lock)
-#define smc_special_unlock(lock) spin_unlock_irq(lock)
+#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
+#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else
-#define smc_special_trylock(lock) (1)
-#define smc_special_lock(lock) do { } while (0)
-#define smc_special_unlock(lock) do { } while (0)
+#define smc_special_trylock(lock, flags) (1)
+#define smc_special_lock(lock, flags) do { } while (0)
+#define smc_special_unlock(lock, flags) do { } while (0)
#endif
/*
@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
struct sk_buff *skb;
unsigned int packet_no, len;
unsigned char *buf;
+ unsigned long flags;
DBG(3, "%s: %s\n", dev->name, __func__);
- if (!smc_special_trylock(&lp->lock)) {
+ if (!smc_special_trylock(&lp->lock, flags)) {
netif_stop_queue(dev);
tasklet_schedule(&lp->tx_task);
return;
@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
skb = lp->pending_tx_skb;
if (unlikely(!skb)) {
- smc_special_unlock(&lp->lock);
+ smc_special_unlock(&lp->lock, flags);
return;
}
lp->pending_tx_skb = NULL;
@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
printk("%s: Memory allocation failed.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
- smc_special_unlock(&lp->lock);
+ smc_special_unlock(&lp->lock, flags);
goto done;
}
@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
/* queue the packet for TX */
SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
- smc_special_unlock(&lp->lock);
+ smc_special_unlock(&lp->lock, flags);
dev->trans_start = jiffies;
dev->stats.tx_packets++;
@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
unsigned int numPages, poll_count, status;
+ unsigned long flags;
DBG(3, "%s: %s\n", dev->name, __func__);
@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
- smc_special_lock(&lp->lock);
+ smc_special_lock(&lp->lock, flags);
/* now, try to allocate the memory */
SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
} while (--poll_count);
- smc_special_unlock(&lp->lock);
+ smc_special_unlock(&lp->lock, flags);
lp->pending_tx_skb = skb;
if (!poll_count) {
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 99a63649f4f..4cf9a658875 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -652,8 +652,9 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
int entry;
u32 flag;
dma_addr_t mapping;
+ unsigned long flags;
- spin_lock_irq(&tp->lock);
+ spin_lock_irqsave(&tp->lock, flags);
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % TX_RING_SIZE;
@@ -688,7 +689,7 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Trigger an immediate transmit demand. */
iowrite32(0, tp->base_addr + CSR1);
- spin_unlock_irq(&tp->lock);
+ spin_unlock_irqrestore(&tp->lock, flags);
dev->trans_start = jiffies;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 027f7aba26a..87214a257d2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -130,17 +130,10 @@ static inline struct tun_sock *tun_sk(struct sock *sk)
static int tun_attach(struct tun_struct *tun, struct file *file)
{
struct tun_file *tfile = file->private_data;
- const struct cred *cred = current_cred();
int err;
ASSERT_RTNL();
- /* Check permissions */
- if (((tun->owner != -1 && cred->euid != tun->owner) ||
- (tun->group != -1 && !in_egroup_p(tun->group))) &&
- !capable(CAP_NET_ADMIN))
- return -EPERM;
-
netif_tx_lock_bh(tun->dev);
err = -EINVAL;
@@ -926,6 +919,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
+ const struct cred *cred = current_cred();
+
if (ifr->ifr_flags & IFF_TUN_EXCL)
return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -935,6 +930,14 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
else
return -EINVAL;
+ if (((tun->owner != -1 && cred->euid != tun->owner) ||
+ (tun->group != -1 && !in_egroup_p(tun->group))) &&
+ !capable(CAP_NET_ADMIN))
+ return -EPERM;
+ err = security_tun_dev_attach(tun->sk);
+ if (err < 0)
+ return err;
+
err = tun_attach(tun, file);
if (err < 0)
return err;
@@ -947,6 +950,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ err = security_tun_dev_create();
+ if (err < 0)
+ return err;
/* Set dev type */
if (ifr->ifr_flags & IFF_TUN) {
@@ -989,6 +995,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->sk = sk;
container_of(sk, struct tun_sock, sk)->tun = tun;
+ security_tun_dev_post_create(sk);
+
tun_net_init(dev);
if (strchr(dev->name, '%')) {
@@ -1048,20 +1056,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return err;
}
-static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
+static int tun_get_iff(struct net *net, struct tun_struct *tun,
+ struct ifreq *ifr)
{
- struct tun_struct *tun = tun_get(file);
-
- if (!tun)
- return -EBADFD;
-
DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
- tun_put(tun);
return 0;
}
@@ -1105,8 +1108,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
return 0;
}
-static int tun_chr_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long tun_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
@@ -1128,34 +1131,32 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
(unsigned int __user*)argp);
}
+ rtnl_lock();
+
tun = __tun_get(tfile);
if (cmd == TUNSETIFF && !tun) {
- int err;
-
ifr.ifr_name[IFNAMSIZ-1] = '\0';
- rtnl_lock();
- err = tun_set_iff(tfile->net, file, &ifr);
- rtnl_unlock();
+ ret = tun_set_iff(tfile->net, file, &ifr);
- if (err)
- return err;
+ if (ret)
+ goto unlock;
if (copy_to_user(argp, &ifr, sizeof(ifr)))
- return -EFAULT;
- return 0;
+ ret = -EFAULT;
+ goto unlock;
}
-
+ ret = -EBADFD;
if (!tun)
- return -EBADFD;
+ goto unlock;
DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
ret = 0;
switch (cmd) {
case TUNGETIFF:
- ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
+ ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
if (ret)
break;
@@ -1201,7 +1202,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
- rtnl_lock();
if (tun->dev->flags & IFF_UP) {
DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
tun->dev->name);
@@ -1211,7 +1211,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
ret = 0;
}
- rtnl_unlock();
break;
#ifdef TUN_DEBUG
@@ -1220,9 +1219,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
break;
#endif
case TUNSETOFFLOAD:
- rtnl_lock();
ret = set_offload(tun->dev, arg);
- rtnl_unlock();
break;
case TUNSETTXFILTER:
@@ -1230,9 +1227,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
- rtnl_lock();
ret = update_filter(&tun->txflt, (void __user *)arg);
- rtnl_unlock();
break;
case SIOCGIFHWADDR:
@@ -1248,9 +1243,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
DBG(KERN_DEBUG "%s: set hw address: %pM\n",
tun->dev->name, ifr.ifr_hwaddr.sa_data);
- rtnl_lock();
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
- rtnl_unlock();
break;
case TUNGETSNDBUF:
@@ -1273,7 +1266,10 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
break;
};
- tun_put(tun);
+unlock:
+ rtnl_unlock();
+ if (tun)
+ tun_put(tun);
return ret;
}
@@ -1361,7 +1357,7 @@ static const struct file_operations tun_fops = {
.write = do_sync_write,
.aio_write = tun_chr_aio_write,
.poll = tun_chr_poll,
- .ioctl = tun_chr_ioctl,
+ .unlocked_ioctl = tun_chr_ioctl,
.open = tun_chr_open,
.release = tun_chr_close,
.fasync = tun_chr_fasync
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 3b957e6412e..8a7b8c7bd78 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3111,10 +3111,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
u8 __iomem *bd; /* BD pointer */
u32 bd_status;
u8 txQ = 0;
+ unsigned long flags;
ugeth_vdbg("%s: IN", __func__);
- spin_lock_irq(&ugeth->lock);
+ spin_lock_irqsave(&ugeth->lock, flags);
dev->stats.tx_bytes += skb->len;
@@ -3171,7 +3172,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
uccf = ugeth->uccf;
out_be16(uccf->p_utodr, UCC_FAST_TOD);
#endif
- spin_unlock_irq(&ugeth->lock);
+ spin_unlock_irqrestore(&ugeth->lock, flags);
return 0;
}
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index c7467823cd1..f968c834ff6 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -250,6 +250,8 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
DEFAULT_GPIO_RESET | PEGASUS_II )
+PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
+ DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
DEFAULT_GPIO_RESET)
PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 88c30a58b4b..934f7671650 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1218,6 +1218,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
unsigned entry;
+ unsigned long flags;
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
@@ -1261,7 +1262,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
/* lock eth irq */
- spin_lock_irq(&rp->lock);
+ spin_lock_irqsave(&rp->lock, flags);
wmb();
rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
wmb();
@@ -1280,7 +1281,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
- spin_unlock_irq(&rp->lock);
+ spin_unlock_irqrestore(&rp->lock, flags);
if (debug > 4) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 3ba35956327..cee08a1e497 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1778,7 +1778,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
* mode
*/
if (vptr->rev_id < REV_ID_VT3216_A0) {
- if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
+ if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
else
BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2a6e81d5b57..bbedf03a212 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -70,6 +70,9 @@ struct virtnet_info
struct sk_buff_head recv;
struct sk_buff_head send;
+ /* Work struct for refilling if we run low on memory. */
+ struct delayed_work refill;
+
/* Chain pages by the private ptr. */
struct page *pages;
};
@@ -273,19 +276,22 @@ drop:
dev_kfree_skb(skb);
}
-static void try_fill_recv_maxbufs(struct virtnet_info *vi)
+static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct scatterlist sg[2+MAX_SKB_FRAGS];
int num, err, i;
+ bool oom = false;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
for (;;) {
struct virtio_net_hdr *hdr;
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ oom = true;
break;
+ }
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, MAX_PACKET_LEN);
@@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- f->page = get_a_page(vi, GFP_ATOMIC);
+ f->page = get_a_page(vi, gfp);
if (!f->page)
break;
@@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
+ return !oom;
}
-static void try_fill_recv(struct virtnet_info *vi)
+/* Returns false if we couldn't fill entirely (OOM). */
+static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct scatterlist sg[1];
int err;
+ bool oom = false;
- if (!vi->mergeable_rx_bufs) {
- try_fill_recv_maxbufs(vi);
- return;
- }
+ if (!vi->mergeable_rx_bufs)
+ return try_fill_recv_maxbufs(vi, gfp);
for (;;) {
skb_frag_t *f;
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ oom = true;
break;
+ }
skb_reserve(skb, NET_IP_ALIGN);
f = &skb_shinfo(skb)->frags[0];
- f->page = get_a_page(vi, GFP_ATOMIC);
+ f->page = get_a_page(vi, gfp);
if (!f->page) {
+ oom = true;
kfree_skb(skb);
break;
}
@@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi)
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
+ return !oom;
}
static void skb_recv_done(struct virtqueue *rvq)
@@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq)
}
}
+static void refill_work(struct work_struct *work)
+{
+ struct virtnet_info *vi;
+ bool still_empty;
+
+ vi = container_of(work, struct virtnet_info, refill.work);
+ napi_disable(&vi->napi);
+ try_fill_recv(vi, GFP_KERNEL);
+ still_empty = (vi->num == 0);
+ napi_enable(&vi->napi);
+
+ /* In theory, this can happen: if we don't get any buffers in
+ * we will *never* try to fill again. */
+ if (still_empty)
+ schedule_delayed_work(&vi->refill, HZ/2);
+}
+
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
@@ -400,10 +428,10 @@ again:
received++;
}
- /* FIXME: If we oom and completely run out of inbufs, we need
- * to start a timer trying to fill more. */
- if (vi->num < vi->max / 2)
- try_fill_recv(vi);
+ if (vi->num < vi->max / 2) {
+ if (!try_fill_recv(vi, GFP_ATOMIC))
+ schedule_delayed_work(&vi->refill, 0);
+ }
/* Out of packets? */
if (received < budget) {
@@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->vdev = vdev;
vdev->priv = vi;
vi->pages = NULL;
+ INIT_DELAYED_WORK(&vi->refill, refill_work);
/* If they give us a callback when all buffers are done, we don't need
* the timer. */
@@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev)
}
/* Last of all, set up some receive buffers. */
- try_fill_recv(vi);
+ try_fill_recv(vi, GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
if (vi->num == 0) {
@@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
+ cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
free:
@@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev)
BUG_ON(vi->num != 0);
unregister_netdev(vi->dev);
+ cancel_delayed_work_sync(&vi->refill);
vdev->config->del_vqs(vi->vdev);
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 9d38cf60a0d..88c3d857386 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1967,13 +1967,14 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
int ret;
mutex_lock(&ar->mutex);
- if ((param) && !(queue > __AR9170_NUM_TXQ)) {
+ if (queue < __AR9170_NUM_TXQ) {
memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
param, sizeof(*param));
ret = ar9170_set_qos(ar);
- } else
+ } else {
ret = -EINVAL;
+ }
mutex_unlock(&ar->mutex);
return ret;
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 754b1f8d8da..007eb85fc67 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -598,11 +598,15 @@ static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
err = request_firmware(&aru->init_values, "ar9170-1.fw",
&aru->udev->dev);
+ if (err) {
+ dev_err(&aru->udev->dev, "file with init values not found.\n");
+ return err;
+ }
err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
if (err) {
release_firmware(aru->init_values);
- dev_err(&aru->udev->dev, "file with init values not found.\n");
+ dev_err(&aru->udev->dev, "firmware file not found.\n");
return err;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 44c29b3f672..f593fbbb4e5 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2874,45 +2874,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
return 0;
}
-static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
- u32 src_phys, u32 dest_address, u32 length)
+static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
+ int nr, u32 dest_address, u32 len)
{
- u32 bytes_left = length;
- u32 src_offset = 0;
- u32 dest_offset = 0;
- int status = 0;
+ int ret, i;
+ u32 size;
+
IPW_DEBUG_FW(">> \n");
- IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
- src_phys, dest_address, length);
- while (bytes_left > CB_MAX_LENGTH) {
- status = ipw_fw_dma_add_command_block(priv,
- src_phys + src_offset,
- dest_address +
- dest_offset,
- CB_MAX_LENGTH, 0, 0);
- if (status) {
+ IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
+ nr, dest_address, len);
+
+ for (i = 0; i < nr; i++) {
+ size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
+ ret = ipw_fw_dma_add_command_block(priv, src_address[i],
+ dest_address +
+ i * CB_MAX_LENGTH, size,
+ 0, 0);
+ if (ret) {
IPW_DEBUG_FW_INFO(": Failed\n");
return -1;
} else
IPW_DEBUG_FW_INFO(": Added new cb\n");
-
- src_offset += CB_MAX_LENGTH;
- dest_offset += CB_MAX_LENGTH;
- bytes_left -= CB_MAX_LENGTH;
- }
-
- /* add the buffer tail */
- if (bytes_left > 0) {
- status =
- ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
- dest_address + dest_offset,
- bytes_left, 0, 0);
- if (status) {
- IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
- return -1;
- } else
- IPW_DEBUG_FW_INFO
- (": Adding new cb - the buffer tail\n");
}
IPW_DEBUG_FW("<< \n");
@@ -3160,59 +3142,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
{
- int rc = -1;
+ int ret = -1;
int offset = 0;
struct fw_chunk *chunk;
- dma_addr_t shared_phys;
- u8 *shared_virt;
+ int total_nr = 0;
+ int i;
+ struct pci_pool *pool;
+ u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
+ dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
IPW_DEBUG_TRACE("<< : \n");
- shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
- if (!shared_virt)
+ pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
+ if (!pool) {
+ IPW_ERROR("pci_pool_create failed\n");
return -ENOMEM;
-
- memmove(shared_virt, data, len);
+ }
/* Start the Dma */
- rc = ipw_fw_dma_enable(priv);
+ ret = ipw_fw_dma_enable(priv);
/* the DMA is already ready this would be a bug. */
BUG_ON(priv->sram_desc.last_cb_index > 0);
do {
+ u32 chunk_len;
+ u8 *start;
+ int size;
+ int nr = 0;
+
chunk = (struct fw_chunk *)(data + offset);
offset += sizeof(struct fw_chunk);
+ chunk_len = le32_to_cpu(chunk->length);
+ start = data + offset;
+
+ nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
+ for (i = 0; i < nr; i++) {
+ virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
+ &phys[total_nr]);
+ if (!virts[total_nr]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
+ CB_MAX_LENGTH);
+ memcpy(virts[total_nr], start, size);
+ start += size;
+ total_nr++;
+ /* We don't support fw chunk larger than 64*8K */
+ BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
+ }
+
/* build DMA packet and queue up for sending */
/* dma to chunk->address, the chunk->length bytes from data +
* offeset*/
/* Dma loading */
- rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
- le32_to_cpu(chunk->address),
- le32_to_cpu(chunk->length));
- if (rc) {
+ ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
+ nr, le32_to_cpu(chunk->address),
+ chunk_len);
+ if (ret) {
IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
goto out;
}
- offset += le32_to_cpu(chunk->length);
+ offset += chunk_len;
} while (offset < len);
/* Run the DMA and wait for the answer */
- rc = ipw_fw_dma_kick(priv);
- if (rc) {
+ ret = ipw_fw_dma_kick(priv);
+ if (ret) {
IPW_ERROR("dmaKick Failed\n");
goto out;
}
- rc = ipw_fw_dma_wait(priv);
- if (rc) {
+ ret = ipw_fw_dma_wait(priv);
+ if (ret) {
IPW_ERROR("dmaWaitSync Failed\n");
goto out;
}
- out:
- pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
- return rc;
+ out:
+ for (i = 0; i < total_nr; i++)
+ pci_pool_free(pool, virts[i], phys[i]);
+
+ pci_pool_destroy(pool);
+
+ return ret;
}
/* stop nic */
@@ -6226,7 +6240,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
};
u8 channel;
- while (channel_index < IPW_SCAN_CHANNELS) {
+ while (channel_index < IPW_SCAN_CHANNELS - 1) {
channel =
priv->speed_scan[priv->speed_scan_pos];
if (channel == 0) {
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index d6997371c27..b9b37411903 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -1,7 +1,6 @@
/* Copyright (C) 2006, Red Hat, Inc. */
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
#include <linux/if_arp.h>
@@ -44,21 +43,21 @@ static int get_common_rates(struct lbs_private *priv,
u16 *rates_size)
{
u8 *card_rates = lbs_bg_rates;
+ size_t num_card_rates = sizeof(lbs_bg_rates);
int ret = 0, i, j;
- u8 tmp[(ARRAY_SIZE(lbs_bg_rates) - 1) * (*rates_size - 1)];
+ u8 tmp[30];
size_t tmp_size = 0;
/* For each rate in card_rates that exists in rate1, copy to tmp */
- for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && card_rates[i]; i++) {
- for (j = 0; j < *rates_size && rates[j]; j++) {
+ for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
+ for (j = 0; rates[j] && (j < *rates_size); j++) {
if (rates[j] == card_rates[i])
tmp[tmp_size++] = card_rates[i];
}
}
lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
- lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates,
- ARRAY_SIZE(lbs_bg_rates));
+ lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
@@ -70,7 +69,10 @@ static int get_common_rates(struct lbs_private *priv,
lbs_pr_alert("Previously set fixed data rate %#x isn't "
"compatible with the network.\n", priv->cur_rate);
ret = -1;
+ goto done;
}
+ ret = 0;
+
done:
memset(rates, 0, *rates_size);
*rates_size = min_t(int, tmp_size, *rates_size);
@@ -320,7 +322,7 @@ static int lbs_associate(struct lbs_private *priv,
rates = (struct mrvl_ie_rates_param_set *) pos;
rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
memcpy(&rates->rates, &bss->rates, MAX_RATES);
- tmplen = min_t(u16, ARRAY_SIZE(rates->rates), MAX_RATES);
+ tmplen = MAX_RATES;
if (get_common_rates(priv, rates->rates, &tmplen)) {
ret = -1;
goto done;
@@ -596,7 +598,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
/* Copy Data rates from the rates recorded in scan response */
memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
- ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), MAX_RATES);
+ ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES);
memcpy(cmd.bss.rates, bss->rates, ratesize);
if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index 0a2e29140ad..c8a1998d474 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -56,8 +56,8 @@ struct rxpd {
u8 bss_type;
/* BSS number */
u8 bss_num;
- } bss;
- } u;
+ } __attribute__ ((packed)) bss;
+ } __attribute__ ((packed)) u;
/* SNR */
u8 snr;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a263d5c84c0..83967afe082 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -261,7 +261,7 @@ struct mwl8k_vif {
*/
};
-#define MWL8K_VIF(_vif) (struct mwl8k_vif *)(&((_vif)->drv_priv))
+#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
static const struct ieee80211_channel mwl8k_channels[] = {
{ .center_freq = 2412, .hw_value = 1, },
@@ -1012,6 +1012,8 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
rmb();
skb = rxq->rx_skb[rxq->rx_head];
+ if (skb == NULL)
+ break;
rxq->rx_skb[rxq->rx_head] = NULL;
rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS;
@@ -1591,6 +1593,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
timeout = wait_for_completion_timeout(&cmd_wait,
msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
+ pci_unmap_single(priv->pdev, dma_addr, dma_size,
+ PCI_DMA_BIDIRECTIONAL);
+
result = &cmd->result;
if (!timeout) {
spin_lock_irq(&priv->fw_lock);
@@ -1610,8 +1615,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
*result);
}
- pci_unmap_single(priv->pdev, dma_addr, dma_size,
- PCI_DMA_BIDIRECTIONAL);
return rc;
}
@@ -1654,18 +1657,18 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma);
- cmd->num_tx_queues = MWL8K_TX_QUEUES;
+ cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma);
- cmd->num_tx_desc_per_queue = MWL8K_TX_DESCS;
- cmd->total_rx_desc = MWL8K_RX_DESCS;
+ cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
+ cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS);
rc = mwl8k_post_cmd(hw, &cmd->header);
if (!rc) {
SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
- priv->fw_rev = cmd->fw_rev;
+ priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
priv->region_code = le16_to_cpu(cmd->region_code);
}
@@ -3216,15 +3219,19 @@ static int mwl8k_configure_filter_wt(struct work_struct *wt)
struct dev_addr_list *mclist = worker->mclist;
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_vif *mv_vif;
int rc = 0;
if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
rc = mwl8k_cmd_set_pre_scan(hw);
else {
- mv_vif = MWL8K_VIF(priv->vif);
- rc = mwl8k_cmd_set_post_scan(hw, mv_vif->bssid);
+ u8 *bssid;
+
+ bssid = "\x00\x00\x00\x00\x00\x00";
+ if (priv->vif != NULL)
+ bssid = MWL8K_VIF(priv->vif)->bssid;
+
+ rc = mwl8k_cmd_set_post_scan(hw, bssid);
}
}
@@ -3726,6 +3733,8 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
ieee80211_stop_queues(hw);
+ ieee80211_unregister_hw(hw);
+
/* Remove tx reclaim tasklet */
tasklet_kill(&priv->tx_reclaim_task);
@@ -3739,8 +3748,6 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_reclaim(hw, i, 1);
- ieee80211_unregister_hw(hw);
-
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 632fac86a30..b3946272c72 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -70,7 +70,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
int err = 0;
u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE];
- if ((key < 0) || (key > 4))
+ if ((key < 0) || (key >= 4))
return -EINVAL;
err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index a498dde024e..49c9e2c1433 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -849,13 +849,15 @@ struct rt2x00_dev {
static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word, u32 *data)
{
- *data = rt2x00dev->rf[word];
+ BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
+ *data = rt2x00dev->rf[word - 1];
}
static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
const unsigned int word, u32 data)
{
- rt2x00dev->rf[word] = data;
+ BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
+ rt2x00dev->rf[word - 1] = data;
}
/*
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 294250e294d..87a95588a8e 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -869,6 +869,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
priv->aifsn[3] = 3; /* AIFSN[AC_BE] */
rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
+ /* ENEDCA flag must always be set, transmit issues? */
+ rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
+
return 0;
}
@@ -1173,13 +1176,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
rtl818x_iowrite8(priv, &priv->map->BSSID[i],
info->bssid[i]);
+ if (priv->is_rtl8187b)
+ reg = RTL818X_MSR_ENEDCA;
+ else
+ reg = 0;
+
if (is_valid_ether_addr(info->bssid)) {
- reg = RTL818X_MSR_INFRA;
- if (priv->is_rtl8187b)
- reg |= RTL818X_MSR_ENEDCA;
+ reg |= RTL818X_MSR_INFRA;
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
} else {
- reg = RTL818X_MSR_NO_LINK;
+ reg |= RTL818X_MSR_NO_LINK;
rtl818x_iowrite8(priv, &priv->map->MSR, reg);
}
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index a07580138e8..c2fd6187773 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int yellowfin_open(struct net_device *dev);
static void yellowfin_timer(unsigned long data);
static void yellowfin_tx_timeout(struct net_device *dev);
-static void yellowfin_init_ring(struct net_device *dev);
+static int yellowfin_init_ring(struct net_device *dev);
static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
static int yellowfin_rx(struct net_device *dev);
@@ -573,19 +573,24 @@ static int yellowfin_open(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
- int i;
+ int i, ret;
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
- i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
- if (i) return i;
+ ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
+ if (ret)
+ return ret;
if (yellowfin_debug > 1)
printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
dev->name, dev->irq);
- yellowfin_init_ring(dev);
+ ret = yellowfin_init_ring(dev);
+ if (ret) {
+ free_irq(dev->irq, dev);
+ return ret;
+ }
iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -725,10 +730,10 @@ static void yellowfin_tx_timeout(struct net_device *dev)
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void yellowfin_init_ring(struct net_device *dev)
+static int yellowfin_init_ring(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
- int i;
+ int i, j;
yp->tx_full = 0;
yp->cur_rx = yp->cur_tx = 0;
@@ -753,6 +758,11 @@ static void yellowfin_init_ring(struct net_device *dev)
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
+ if (i != RX_RING_SIZE) {
+ for (j = 0; j < i; j++)
+ dev_kfree_skb(yp->rx_skbuff[j]);
+ return -ENOMEM;
+ }
yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -769,8 +779,6 @@ static void yellowfin_init_ring(struct net_device *dev)
yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
#else
{
- int j;
-
/* Tx ring needs a pair of descriptors, the second for the status. */
for (i = 0; i < TX_RING_SIZE; i++) {
j = 2*i;
@@ -805,7 +813,7 @@ static void yellowfin_init_ring(struct net_device *dev)
}
#endif
yp->tx_tail_desc = &yp->tx_status[0];
- return;
+ return 0;
}
static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 37c84e3b8be..81c753a617a 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -120,6 +120,9 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
for (i = ARRAY_SIZE(cards)-1; i >= 0; i--)
if (z->id == cards[i].id)
break;
+ if (i < 0)
+ return -ENODEV;
+
board = z->resource.start;
ioaddr = board+cards[i].offset;
dev = alloc_ei_netdev();
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 242257b1944..a7aae24f288 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/oprofile.h>
-#include <linux/vmalloc.h>
#include <linux/errno.h>
#include "event_buffer.h"
@@ -407,6 +406,21 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val)
return op_cpu_buffer_add_data(entry, val);
}
+int oprofile_add_data64(struct op_entry *entry, u64 val)
+{
+ if (!entry->event)
+ return 0;
+ if (op_cpu_buffer_get_size(entry) < 2)
+ /*
+ * the function returns 0 to indicate a too small
+ * buffer, even if there is some space left
+ */
+ return 0;
+ if (!op_cpu_buffer_add_data(entry, (u32)val))
+ return 0;
+ return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
+}
+
int oprofile_write_commit(struct op_entry *entry)
{
if (!entry->event)
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index 3cffce90f82..dc8a0428260 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -12,6 +12,8 @@
#include <linux/init.h>
#include <linux/oprofile.h>
#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/time.h>
#include <asm/mutex.h>
#include "oprof.h"
@@ -87,6 +89,69 @@ out:
return err;
}
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static void switch_worker(struct work_struct *work);
+static DECLARE_DELAYED_WORK(switch_work, switch_worker);
+
+static void start_switch_worker(void)
+{
+ if (oprofile_ops.switch_events)
+ schedule_delayed_work(&switch_work, oprofile_time_slice);
+}
+
+static void stop_switch_worker(void)
+{
+ cancel_delayed_work_sync(&switch_work);
+}
+
+static void switch_worker(struct work_struct *work)
+{
+ if (oprofile_ops.switch_events())
+ return;
+
+ atomic_inc(&oprofile_stats.multiplex_counter);
+ start_switch_worker();
+}
+
+/* User inputs in ms, converts to jiffies */
+int oprofile_set_timeout(unsigned long val_msec)
+{
+ int err = 0;
+ unsigned long time_slice;
+
+ mutex_lock(&start_mutex);
+
+ if (oprofile_started) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!oprofile_ops.switch_events) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ time_slice = msecs_to_jiffies(val_msec);
+ if (time_slice == MAX_JIFFY_OFFSET) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ oprofile_time_slice = time_slice;
+
+out:
+ mutex_unlock(&start_mutex);
+ return err;
+
+}
+
+#else
+
+static inline void start_switch_worker(void) { }
+static inline void stop_switch_worker(void) { }
+
+#endif
/* Actually start profiling (echo 1>/dev/oprofile/enable) */
int oprofile_start(void)
@@ -108,6 +173,8 @@ int oprofile_start(void)
if ((err = oprofile_ops.start()))
goto out;
+ start_switch_worker();
+
oprofile_started = 1;
out:
mutex_unlock(&start_mutex);
@@ -123,6 +190,9 @@ void oprofile_stop(void)
goto out;
oprofile_ops.stop();
oprofile_started = 0;
+
+ stop_switch_worker();
+
/* wake up the daemon to read what remains */
wake_up_buffer_waiter();
out:
@@ -155,7 +225,6 @@ post_sync:
mutex_unlock(&start_mutex);
}
-
int oprofile_set_backtrace(unsigned long val)
{
int err = 0;
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index c288d3c24b5..cb92f5c98c1 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -24,6 +24,8 @@ struct oprofile_operations;
extern unsigned long oprofile_buffer_size;
extern unsigned long oprofile_cpu_buffer_size;
extern unsigned long oprofile_buffer_watershed;
+extern unsigned long oprofile_time_slice;
+
extern struct oprofile_operations oprofile_ops;
extern unsigned long oprofile_started;
extern unsigned long oprofile_backtrace_depth;
@@ -35,5 +37,6 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root);
void oprofile_timer_init(struct oprofile_operations *ops);
int oprofile_set_backtrace(unsigned long depth);
+int oprofile_set_timeout(unsigned long time);
#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 5d36ffc30dd..bbd7516e086 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -9,6 +9,7 @@
#include <linux/fs.h>
#include <linux/oprofile.h>
+#include <linux/jiffies.h>
#include "event_buffer.h"
#include "oprofile_stats.h"
@@ -17,10 +18,51 @@
#define BUFFER_SIZE_DEFAULT 131072
#define CPU_BUFFER_SIZE_DEFAULT 8192
#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
+#define TIME_SLICE_DEFAULT 1
unsigned long oprofile_buffer_size;
unsigned long oprofile_cpu_buffer_size;
unsigned long oprofile_buffer_watershed;
+unsigned long oprofile_time_slice;
+
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+
+static ssize_t timeout_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offset)
+{
+ return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
+ buf, count, offset);
+}
+
+
+static ssize_t timeout_write(struct file *file, char const __user *buf,
+ size_t count, loff_t *offset)
+{
+ unsigned long val;
+ int retval;
+
+ if (*offset)
+ return -EINVAL;
+
+ retval = oprofilefs_ulong_from_user(&val, buf, count);
+ if (retval)
+ return retval;
+
+ retval = oprofile_set_timeout(val);
+
+ if (retval)
+ return retval;
+ return count;
+}
+
+
+static const struct file_operations timeout_fops = {
+ .read = timeout_read,
+ .write = timeout_write,
+};
+
+#endif
+
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
@@ -129,6 +171,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
+ oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
oprofilefs_create_file(sb, root, "enable", &enable_fops);
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
@@ -139,6 +182,9 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+ oprofilefs_create_file(sb, root, "time_slice", &timeout_fops);
+#endif
oprofile_create_stats_files(sb, root);
if (oprofile_ops.create_files)
oprofile_ops.create_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 3c2270a8300..61689e814d4 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -34,6 +34,7 @@ void oprofile_reset_stats(void)
atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
atomic_set(&oprofile_stats.event_lost_overflow, 0);
atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
+ atomic_set(&oprofile_stats.multiplex_counter, 0);
}
@@ -76,4 +77,8 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
&oprofile_stats.event_lost_overflow);
oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
&oprofile_stats.bt_lost_no_mapping);
+#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
+ oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter",
+ &oprofile_stats.multiplex_counter);
+#endif
}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 3da0d08dc1f..0b54e46c3c1 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -17,6 +17,7 @@ struct oprofile_stat_struct {
atomic_t sample_lost_no_mapping;
atomic_t bt_lost_no_mapping;
atomic_t event_lost_overflow;
+ atomic_t multiplex_counter;
};
extern struct oprofile_stat_struct oprofile_stats;
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 4f5b8712931..44803644ca0 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -55,15 +55,12 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
+static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
- /*
- * alloc irq desc if not allocated already.
- */
- desc = irq_to_desc_alloc_node(irq, node);
+ desc = irq_to_desc(irq);
if (!desc) {
printk(KERN_INFO "can not get irq_desc for %d\n", irq);
return NULL;
@@ -72,16 +69,11 @@ static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
- desc->irq_2_iommu = get_one_free_irq_2_iommu(node);
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
-{
- return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
-}
-
#else /* !CONFIG_SPARSE_IRQ */
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e3a87210e94..e03fe98f061 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -598,6 +598,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
}
/**
+ * pci_sriov_resource_alignment - get resource alignment for VF BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ *
+ * Returns the alignment of the VF BAR found in the SR-IOV capability.
+ * This is not the same as the resource size which is defined as
+ * the VF BAR size multiplied by the number of VFs. The alignment
+ * is just the VF BAR size.
+ */
+int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+{
+ struct resource tmp;
+ enum pci_bar_type type;
+ int reg = pci_iov_resource_bar(dev, resno, &type);
+
+ if (!reg)
+ return 0;
+
+ __pci_read_base(dev, type, &tmp, reg);
+ return resource_alignment(&tmp);
+}
+
+/**
* pci_restore_iov_state - restore the state of the IOV capability
* @dev: the PCI device
*/
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d76c4c85367..f99bc7f089f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -508,7 +508,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
return error;
}
- return pci_dev->state_saved ? pci_restore_state(pci_dev) : 0;
+ return pci_restore_state(pci_dev);
}
static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index dbd0f947f49..7b70312181d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -846,6 +846,8 @@ pci_restore_state(struct pci_dev *dev)
int i;
u32 val;
+ if (!dev->state_saved)
+ return 0;
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f73bcbedf37..5ff4d25bf0e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
+extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus);
@@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
}
#endif /* CONFIG_PCI_IOV */
+static inline int pci_resource_alignment(struct pci_dev *dev,
+ struct resource *res)
+{
+#ifdef CONFIG_PCI_IOV
+ int resno = res - dev->resource;
+
+ if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ return pci_sriov_resource_alignment(dev, resno);
+#endif
+ return resource_alignment(res);
+}
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 06b96562396..85ce23997be 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -992,7 +992,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX,
static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
{
- /* set sb600/sb700/sb800 sata to ahci mode */
+ /* set SBX00 SATA in IDE mode to AHCI mode */
u8 tmp;
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
@@ -1011,6 +1011,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
/*
* Serverworks CSB5 IDE does not fully support native mode
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index b636e245445..7c443b4583a 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -25,7 +25,7 @@
#include <linux/ioport.h>
#include <linux/cache.h>
#include <linux/slab.h>
-
+#include "pci.h"
static void pbus_assign_resources_sorted(const struct pci_bus *bus)
{
@@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
continue;
r_size = resource_size(r);
/* For bridges size != alignment */
- align = resource_alignment(r);
+ align = pci_resource_alignment(dev, r);
order = __ffs(align) - 20;
if (order > 11) {
dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1898c7b4790..88cdd1a937d 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
size = resource_size(res);
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
- align = resource_alignment(res);
+ align = pci_resource_alignment(dev, res);
/* First, try exact prefetching match.. */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
struct pci_bus *bus;
int ret;
- align = resource_alignment(res);
+ align = pci_resource_alignment(dev, res);
if (!align) {
dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
"alignment) %pR flags %#lx\n",
@@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
if (!(r->flags) || r->parent)
continue;
- r_align = resource_alignment(r);
+ r_align = pci_resource_alignment(dev, r);
if (!r_align) {
dev_warn(&dev->dev, "BAR %d: bogus alignment "
"%pR flags %#lx\n",
@@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
struct resource_list *ln = list->next;
if (ln)
- align = resource_alignment(ln->res);
+ align = pci_resource_alignment(ln->dev, ln->res);
if (r_align > align) {
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 81d31ea507d..51c0a8bee41 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -335,6 +335,7 @@ static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
if (hci_result != HCI_SUCCESS) {
/* Can't do anything useful */
mutex_unlock(&dev->mutex);
+ return;
}
new_rfk_state = value;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 043b208d971..f215a591919 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
acpi_status status;
struct acpi_object_list input;
union acpi_object params[3];
- char method[4] = "WM";
+ char method[5] = "WM";
if (!find_guid(guid_string, &wblock))
return AE_ERROR;
@@ -328,8 +328,8 @@ struct acpi_buffer *out)
acpi_status status, wc_status = AE_ERROR;
struct acpi_object_list input, wc_input;
union acpi_object wc_params[1], wq_params[1];
- char method[4];
- char wc_method[4] = "WC";
+ char method[5];
+ char wc_method[5] = "WC";
if (!guid_string || !out)
return AE_BAD_PARAMETER;
@@ -410,7 +410,7 @@ const struct acpi_buffer *in)
acpi_handle handle;
struct acpi_object_list input;
union acpi_object params[2];
- char method[4] = "WS";
+ char method[5] = "WS";
if (!guid_string || !in)
return AE_BAD_DATA;
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index ac8cc8cea1e..fea17e7805e 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -244,7 +244,7 @@ int pps_register_cdev(struct pps_device *pps)
}
pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
"pps%d", pps->id);
- if (err)
+ if (IS_ERR(pps->dev))
goto del_cdev;
dev_set_drvdata(pps->dev, pps);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 74983666865..e109da4583a 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -669,14 +669,14 @@ static void dasd_profile_end(struct dasd_block *block,
* memory and 2) dasd_smalloc_request uses the static ccw memory
* that gets allocated for each device.
*/
-struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
+struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
/* Sanity checks */
- BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
+ BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
@@ -700,14 +700,13 @@ struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
return ERR_PTR(-ENOMEM);
}
}
- strncpy((char *) &cqr->magic, magic, 4);
- ASCEBC((char *) &cqr->magic, 4);
+ cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
-struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
@@ -717,7 +716,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
int size;
/* Sanity checks */
- BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
+ BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
@@ -744,8 +743,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
cqr->data = data;
memset(cqr->data, 0, datasize);
}
- strncpy((char *) &cqr->magic, magic, 4);
- ASCEBC((char *) &cqr->magic, 4);
+ cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
@@ -899,9 +897,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
switch (rc) {
case 0:
cqr->status = DASD_CQR_IN_IO;
- DBF_DEV_EVENT(DBF_DEBUG, device,
- "start_IO: request %p started successful",
- cqr);
break;
case -EBUSY:
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
@@ -1699,8 +1694,11 @@ static void __dasd_process_request_queue(struct dasd_block *block)
* for that. State DASD_STATE_ONLINE is normal block device
* operation.
*/
- if (basedev->state < DASD_STATE_READY)
+ if (basedev->state < DASD_STATE_READY) {
+ while ((req = blk_fetch_request(block->request_queue)))
+ __blk_end_request_all(req, -EIO);
return;
+ }
/* Now we try to fetch requests from the request queue */
while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
@@ -2135,9 +2133,9 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
struct dasd_device *base;
block = bdev->bd_disk->private_data;
- base = block->base;
if (!block)
return -ENODEV;
+ base = block->base;
if (!base->discipline ||
!base->discipline->fill_geometry)
@@ -2530,7 +2528,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
void *rdc_buffer,
int rdc_buffer_size,
- char *magic)
+ int magic)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -2561,7 +2559,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
}
-int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
+int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
void *rdc_buffer, int rdc_buffer_size)
{
int ret;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 27991b69205..e8ff7b0c961 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,7 +7,7 @@
*
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-eckd"
#include <linux/timer.h>
#include <linux/slab.h>
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 5b7bbc87593..70a008c0052 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -5,7 +5,7 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-eckd"
#include <linux/list.h>
#include <asm/ebcdic.h>
@@ -379,8 +379,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
int rc;
unsigned long flags;
- cqr = dasd_kmalloc_request("ECKD",
- 1 /* PSF */ + 1 /* RSSD */ ,
+ cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data)),
device);
if (IS_ERR(cqr))
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 644086ba2ed..4e49b4a6c88 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
*
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-diag"
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -523,8 +523,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
/* Build the request */
datasize = sizeof(struct dasd_diag_req) +
count*sizeof(struct dasd_diag_bio);
- cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
- datasize, memdev);
+ cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c11770f5b36..a1ce573648a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -10,7 +10,7 @@
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-eckd"
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -730,7 +730,8 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
+ device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -934,8 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
struct dasd_eckd_private *private;
private = (struct dasd_eckd_private *) device->private;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1 /* PSF */ + 1 /* RSSD */ ,
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_features)),
device);
@@ -998,7 +998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
struct dasd_psf_ssc_data *psf_ssc_data;
struct ccw1 *ccw;
- cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_ssc_data),
device);
@@ -1149,8 +1149,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
goto out_err3;
/* Read Device Characteristics */
- rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data,
- 64);
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+ &private->rdc_data, 64);
if (rc) {
DBF_EVENT(DBF_WARNING,
"Read device characteristics failed, rc=%d for "
@@ -1217,8 +1217,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cplength = 8;
datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- cplength, datasize, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -1499,8 +1498,7 @@ dasd_eckd_format_device(struct dasd_device * device,
return ERR_PTR(-EINVAL);
}
/* Allocate the format ccw request. */
- fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
- cplength, datasize, device);
+ fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
if (IS_ERR(fcp))
return fcp;
@@ -1783,8 +1781,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
datasize += count*sizeof(struct LO_eckd_data);
}
/* Allocate the ccw request. */
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- cplength, datasize, startdev);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+ startdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -1948,8 +1946,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
cidaw * sizeof(unsigned long long);
/* Allocate the ccw request. */
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- cplength, datasize, startdev);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+ startdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -2249,8 +2247,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
/* Allocate the ccw request. */
itcw_size = itcw_calc_size(0, ctidaw, 0);
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 0, itcw_size, startdev);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
if (IS_ERR(cqr))
return cqr;
@@ -2557,8 +2554,7 @@ dasd_eckd_release(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -2600,8 +2596,7 @@ dasd_eckd_reserve(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -2642,8 +2637,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -2681,8 +2675,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
struct ccw1 *ccw;
int rc;
- cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
- 1 /* PSF */ + 1 /* RSSD */ ,
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_perf_stats_t)),
device);
@@ -2828,7 +2821,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
}
/* setup CCWs for PSF + RSSD */
- cqr = dasd_smalloc_request("ECKD", 2 , 0, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -3254,7 +3247,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
/* Read Device Characteristics */
memset(&private->rdc_data, 0, sizeof(private->rdc_data));
- rc = dasd_generic_read_dev_chars(device, "ECKD",
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
DBF_EVENT(DBF_WARNING,
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index c24c8c30380..d96039eae59 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -6,7 +6,7 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-eckd"
#include <linux/init.h>
#include <linux/fs.h>
@@ -464,7 +464,7 @@ int dasd_eer_enable(struct dasd_device *device)
if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
- cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */,
+ cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device);
if (IS_ERR(cqr))
return -ENOMEM;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index cb8f9cef742..7656384a811 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -99,8 +99,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
cqr->lpm = LPM_ANYPATH;
cqr->status = DASD_CQR_FILLED;
} else {
- dev_err(&device->cdev->dev,
- "default ERP has run out of retries and failed\n");
+ pr_err("%s: default ERP has run out of retries and failed\n",
+ dev_name(&device->cdev->dev));
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock();
}
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 31849ad5e59..f245377e8e2 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -5,7 +5,7 @@
* Copyright IBM Corp. 1999, 2009
*/
-#define KMSG_COMPONENT "dasd"
+#define KMSG_COMPONENT "dasd-fba"
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -152,8 +152,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
block->base = device;
/* Read Device Characteristics */
- rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data,
- 32);
+ rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
+ &private->rdc_data, 32);
if (rc) {
DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
"error %d for device: %s",
@@ -305,8 +305,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
datasize += (count - 1)*sizeof(struct LO_fba_data);
}
/* Allocate the ccw request. */
- cqr = dasd_smalloc_request(dasd_fba_discipline.name,
- cplength, datasize, memdev);
+ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index b699ca356ac..5e47a1ee52b 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -59,6 +59,11 @@
#include <asm/dasd.h>
#include <asm/idals.h>
+/* DASD discipline magic */
+#define DASD_ECKD_MAGIC 0xC5C3D2C4
+#define DASD_DIAG_MAGIC 0xC4C9C1C7
+#define DASD_FBA_MAGIC 0xC6C2C140
+
/*
* SECTION: Type definitions
*/
@@ -540,9 +545,9 @@ extern struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
-dasd_kmalloc_request(char *, int, int, struct dasd_device *);
+dasd_kmalloc_request(int , int, int, struct dasd_device *);
struct dasd_ccw_req *
-dasd_smalloc_request(char *, int, int, struct dasd_device *);
+dasd_smalloc_request(int , int, int, struct dasd_device *);
void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
@@ -587,7 +592,7 @@ void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *);
int dasd_generic_restore_device(struct ccw_device *);
-int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int);
+int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
/* externals in dasd_devmap.c */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index df918ef2796..f756a1b0c57 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -98,8 +98,8 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
- dev_info(&base->cdev->dev, "The DASD has been put in the quiesce "
- "state\n");
+ pr_info("%s: The DASD has been put in the quiesce "
+ "state\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped |= DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -119,8 +119,8 @@ static int dasd_ioctl_resume(struct dasd_block *block)
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
- dev_info(&base->cdev->dev, "I/O operations have been resumed "
- "on the DASD\n");
+ pr_info("%s: I/O operations have been resumed "
+ "on the DASD\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
base->stopped &= ~DASD_STOPPED_QUIESCE;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -146,8 +146,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
- dev_warn(&base->cdev->dev,
- "The DASD cannot be formatted while it is enabled\n");
+ pr_warning("%s: The DASD cannot be formatted while it is "
+ "enabled\n", dev_name(&base->cdev->dev));
return -EBUSY;
}
@@ -175,9 +175,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
dasd_sfree_request(cqr, cqr->memdev);
if (rc) {
if (rc != -ERESTARTSYS)
- dev_err(&base->cdev->dev,
- "Formatting unit %d failed with "
- "rc=%d\n", fdata->start_unit, rc);
+ pr_err("%s: Formatting unit %d failed with "
+ "rc=%d\n", dev_name(&base->cdev->dev),
+ fdata->start_unit, rc);
return rc;
}
fdata->start_unit++;
@@ -204,9 +204,9 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
return -EFAULT;
if (bdev != bdev->bd_contains) {
- dev_warn(&block->base->cdev->dev,
- "The specified DASD is a partition and cannot be "
- "formatted\n");
+ pr_warning("%s: The specified DASD is a partition and cannot "
+ "be formatted\n",
+ dev_name(&block->base->cdev->dev));
return -EINVAL;
}
return dasd_format(block, &fdata);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index db442cd6621..ee604e92a5f 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -42,7 +42,6 @@
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <asm/uaccess.h>
-#include <asm/checksum.h>
#define XPRAM_NAME "xpram"
#define XPRAM_DEVS 1 /* one partition */
@@ -51,7 +50,6 @@
typedef struct {
unsigned int size; /* size of xpram segment in pages */
unsigned int offset; /* start page of xpram segment */
- unsigned int csum; /* partition checksum for suspend */
} xpram_device_t;
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
@@ -387,58 +385,6 @@ out:
}
/*
- * Save checksums for all partitions.
- */
-static int xpram_save_checksums(void)
-{
- unsigned long mem_page;
- int rc, i;
-
- rc = 0;
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return -ENOMEM;
- for (i = 0; i < xpram_devs; i++) {
- rc = xpram_page_in(mem_page, xpram_devices[i].offset);
- if (rc)
- goto fail;
- xpram_devices[i].csum = csum_partial((const void *) mem_page,
- PAGE_SIZE, 0);
- }
-fail:
- free_page(mem_page);
- return rc ? -ENXIO : 0;
-}
-
-/*
- * Verify checksums for all partitions.
- */
-static int xpram_validate_checksums(void)
-{
- unsigned long mem_page;
- unsigned int csum;
- int rc, i;
-
- rc = 0;
- mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
- if (!mem_page)
- return -ENOMEM;
- for (i = 0; i < xpram_devs; i++) {
- rc = xpram_page_in(mem_page, xpram_devices[i].offset);
- if (rc)
- goto fail;
- csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0);
- if (xpram_devices[i].csum != csum) {
- rc = -EINVAL;
- goto fail;
- }
- }
-fail:
- free_page(mem_page);
- return rc ? -ENXIO : 0;
-}
-
-/*
* Resume failed: Print error message and call panic.
*/
static void xpram_resume_error(const char *message)
@@ -458,21 +404,10 @@ static int xpram_restore(struct device *dev)
xpram_resume_error("xpram disappeared");
if (xpram_pages != xpram_highest_page_index() + 1)
xpram_resume_error("Size of xpram changed");
- if (xpram_validate_checksums())
- xpram_resume_error("Data of xpram changed");
return 0;
}
-/*
- * Save necessary state in suspend.
- */
-static int xpram_freeze(struct device *dev)
-{
- return xpram_save_checksums();
-}
-
static struct dev_pm_ops xpram_pm_ops = {
- .freeze = xpram_freeze,
.restore = xpram_restore,
};
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 0769ced52db..4e34d3686c2 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -82,6 +82,16 @@ config SCLP_CPI
You should only select this option if you know what you are doing,
need this feature and intend to run your kernel in LPAR.
+config SCLP_ASYNC
+ tristate "Support for Call Home via Asynchronous SCLP Records"
+ depends on S390
+ help
+ This option enables the call home function, which is able to inform
+ the service element and connected organisations about a kernel panic.
+ You should only select this option if you know what you are doing,
+ want for inform other people about your kernel panics,
+ need this feature and intend to run your kernel in LPAR.
+
config S390_TAPE
tristate "S/390 tape device support"
depends on CCW
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 7e73e39a174..efb500ab66c 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
+obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 3234e90bd7f..89ece1c235a 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -581,7 +581,7 @@ static int __init mon_init(void)
monreader_device->release = (void (*)(struct device *))kfree;
rc = device_register(monreader_device);
if (rc) {
- kfree(monreader_device);
+ put_device(monreader_device);
goto out_driver;
}
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 60e7cb07095..6bb5a6bdfab 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -27,6 +27,7 @@
#define EVTYP_VT220MSG 0x1A
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
+#define EVTYP_ASYNC 0x0A
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
@@ -38,6 +39,7 @@
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_CONFMGMDATA_MASK 0x10000000
#define EVTYP_SDIAS_MASK 0x00000010
+#define EVTYP_ASYNC_MASK 0x00400000
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
@@ -85,12 +87,12 @@ struct sccb_header {
} __attribute__((packed));
extern u64 sclp_facilities;
-
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
+
struct gds_subvector {
u8 length;
u8 key;
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
new file mode 100644
index 00000000000..daaec185ed3
--- /dev/null
+++ b/drivers/s390/char/sclp_async.c
@@ -0,0 +1,224 @@
+/*
+ * Enable Asynchronous Notification via SCLP.
+ *
+ * Copyright IBM Corp. 2009
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+#include <linux/utsname.h>
+#include "sclp.h"
+
+static int callhome_enabled;
+static struct sclp_req *request;
+static struct sclp_async_sccb *sccb;
+static int sclp_async_send_wait(char *message);
+static struct ctl_table_header *callhome_sysctl_header;
+static DEFINE_SPINLOCK(sclp_async_lock);
+static char nodename[64];
+#define SCLP_NORMAL_WRITE 0x00
+
+struct async_evbuf {
+ struct evbuf_header header;
+ u64 reserved;
+ u8 rflags;
+ u8 empty;
+ u8 rtype;
+ u8 otype;
+ char comp_id[12];
+ char data[3000]; /* there is still some space left */
+} __attribute__((packed));
+
+struct sclp_async_sccb {
+ struct sccb_header header;
+ struct async_evbuf evbuf;
+} __attribute__((packed));
+
+static struct sclp_register sclp_async_register = {
+ .send_mask = EVTYP_ASYNC_MASK,
+};
+
+static int call_home_on_panic(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ strncat(data, nodename, strlen(nodename));
+ sclp_async_send_wait(data);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block call_home_panic_nb = {
+ .notifier_call = call_home_on_panic,
+ .priority = INT_MAX,
+};
+
+static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp,
+ void __user *buffer, size_t *count,
+ loff_t *ppos)
+{
+ unsigned long val;
+ int len, rc;
+ char buf[2];
+
+ if (!*count | (*ppos && !write)) {
+ *count = 0;
+ return 0;
+ }
+ if (!write) {
+ len = sprintf(buf, "%d\n", callhome_enabled);
+ buf[len] = '\0';
+ rc = copy_to_user(buffer, buf, sizeof(buf));
+ if (rc != 0)
+ return -EFAULT;
+ } else {
+ len = *count;
+ rc = copy_from_user(buf, buffer, sizeof(buf));
+ if (rc != 0)
+ return -EFAULT;
+ if (strict_strtoul(buf, 0, &val) != 0)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ callhome_enabled = val;
+ }
+ *count = len;
+ *ppos += len;
+ return 0;
+}
+
+static struct ctl_table callhome_table[] = {
+ {
+ .procname = "callhome",
+ .mode = 0644,
+ .proc_handler = &proc_handler_callhome,
+ },
+ { .ctl_name = 0 }
+};
+
+static struct ctl_table kern_dir_table[] = {
+ {
+ .ctl_name = CTL_KERN,
+ .procname = "kernel",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = callhome_table,
+ },
+ { .ctl_name = 0 }
+};
+
+/*
+ * Function used to transfer asynchronous notification
+ * records which waits for send completion
+ */
+static int sclp_async_send_wait(char *message)
+{
+ struct async_evbuf *evb;
+ int rc;
+ unsigned long flags;
+
+ if (!callhome_enabled)
+ return 0;
+ sccb->evbuf.header.type = EVTYP_ASYNC;
+ sccb->evbuf.rtype = 0xA5;
+ sccb->evbuf.otype = 0x00;
+ evb = &sccb->evbuf;
+ request->command = SCLP_CMDW_WRITE_EVENT_DATA;
+ request->sccb = sccb;
+ request->status = SCLP_REQ_FILLED;
+ strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
+ /*
+ * Retain Queue
+ * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
+ */
+ strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
+ sccb->evbuf.header.length = sizeof(sccb->evbuf);
+ sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
+ sccb->header.function_code = SCLP_NORMAL_WRITE;
+ rc = sclp_add_request(request);
+ if (rc)
+ return rc;
+ spin_lock_irqsave(&sclp_async_lock, flags);
+ while (request->status != SCLP_REQ_DONE &&
+ request->status != SCLP_REQ_FAILED) {
+ sclp_sync_wait();
+ }
+ spin_unlock_irqrestore(&sclp_async_lock, flags);
+ if (request->status != SCLP_REQ_DONE)
+ return -EIO;
+ rc = ((struct sclp_async_sccb *)
+ request->sccb)->header.response_code;
+ if (rc != 0x0020)
+ return -EIO;
+ if (evb->header.flags != 0x80)
+ return -EIO;
+ return rc;
+}
+
+static int __init sclp_async_init(void)
+{
+ int rc;
+
+ rc = sclp_register(&sclp_async_register);
+ if (rc)
+ return rc;
+ callhome_sysctl_header = register_sysctl_table(kern_dir_table);
+ if (!callhome_sysctl_header) {
+ rc = -ENOMEM;
+ goto out_sclp;
+ }
+ if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
+ rc = -EOPNOTSUPP;
+ goto out_sclp;
+ }
+ rc = -ENOMEM;
+ request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
+ if (!request)
+ goto out_sys;
+ sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ goto out_mem;
+ rc = atomic_notifier_chain_register(&panic_notifier_list,
+ &call_home_panic_nb);
+ if (rc)
+ goto out_mem;
+
+ strncpy(nodename, init_utsname()->nodename, 64);
+ return 0;
+
+out_mem:
+ kfree(request);
+ free_page((unsigned long) sccb);
+out_sys:
+ unregister_sysctl_table(callhome_sysctl_header);
+out_sclp:
+ sclp_unregister(&sclp_async_register);
+ return rc;
+
+}
+module_init(sclp_async_init);
+
+static void __exit sclp_async_exit(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &call_home_panic_nb);
+ unregister_sysctl_table(callhome_sysctl_header);
+ sclp_unregister(&sclp_async_register);
+ free_page((unsigned long) sccb);
+ kfree(request);
+}
+module_exit(sclp_async_exit);
+
+MODULE_AUTHOR("Copyright IBM Corp. 2009");
+MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5a519fac37b..2fe45ff77b7 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -8,7 +8,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#define KMSG_COMPONENT "tape"
+#define KMSG_COMPONENT "tape_34xx"
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 418f72dd39b..e4cc3aae916 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -8,7 +8,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#define KMSG_COMPONENT "tape"
+#define KMSG_COMPONENT "tape_3590"
#include <linux/module.h>
#include <linux/init.h>
@@ -39,8 +39,6 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
* - Read Alternate: implemented
*******************************************************************/
-#define KMSG_COMPONENT "tape"
-
static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
[0x00] = "",
[0x10] = "Lost Sense",
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 47ff695255e..4cb9e70507a 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -302,8 +302,6 @@ tapeblock_revalidate_disk(struct gendisk *disk)
if (!device->blk_data.medium_changed)
return 0;
- dev_info(&device->cdev->dev, "Determining the size of the recorded "
- "area...\n");
rc = tape_mtop(device, MTFSFM, 1);
if (rc)
return rc;
@@ -312,6 +310,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
if (rc < 0)
return rc;
+ pr_info("%s: Determining the size of the recorded area...\n",
+ dev_name(&device->cdev->dev));
DBF_LH(3, "Image file ends at %d\n", rc);
nr_of_blks = rc;
@@ -330,8 +330,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
device->bof = rc;
nr_of_blks -= rc;
- dev_info(&device->cdev->dev, "The size of the recorded area is %i "
- "blocks\n", nr_of_blks);
+ pr_info("%s: The size of the recorded area is %i blocks\n",
+ dev_name(&device->cdev->dev), nr_of_blks);
set_capacity(device->blk_data.disk,
nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
@@ -366,8 +366,8 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
if (device->required_tapemarks) {
DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
- dev_warn(&device->cdev->dev, "Opening the tape failed because"
- " of missing end-of-file marks\n");
+ pr_warning("%s: Opening the tape failed because of missing "
+ "end-of-file marks\n", dev_name(&device->cdev->dev));
rc = -EPERM;
goto put_device;
}
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 1d420d94759..5cd31e07164 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -214,13 +214,15 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
switch(newstate){
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
- dev_info(&device->cdev->dev, "The tape cartridge has been "
- "successfully unloaded\n");
+ if (device->medium_state == MS_LOADED)
+ pr_info("%s: The tape cartridge has been successfully "
+ "unloaded\n", dev_name(&device->cdev->dev));
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
- dev_info(&device->cdev->dev, "A tape cartridge has been "
- "mounted\n");
+ if (device->medium_state == MS_UNLOADED)
+ pr_info("%s: A tape cartridge has been mounted\n",
+ dev_name(&device->cdev->dev));
break;
default:
// print nothing
@@ -358,11 +360,11 @@ tape_generic_online(struct tape_device *device,
out_char:
tapechar_cleanup_device(device);
+out_minor:
+ tape_remove_minor(device);
out_discipline:
device->discipline->cleanup_device(device);
device->discipline = NULL;
-out_minor:
- tape_remove_minor(device);
out:
module_put(discipline->owner);
return rc;
@@ -654,8 +656,8 @@ tape_generic_remove(struct ccw_device *cdev)
*/
DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
device->cdev_id);
- dev_warn(&device->cdev->dev, "A tape unit was detached"
- " while in use\n");
+ pr_warning("%s: A tape unit was detached while in "
+ "use\n", dev_name(&device->cdev->dev));
tape_state_set(device, TS_NOT_OPER);
__tape_discard_requests(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 1a9420ba518..750354ad16e 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -68,7 +68,7 @@ tape_std_assign(struct tape_device *device)
* to another host (actually this shouldn't happen but it does).
* So we set up a timeout for this call.
*/
- init_timer(&timeout);
+ init_timer_on_stack(&timeout);
timeout.function = tape_std_assign_timeout;
timeout.data = (unsigned long) request;
timeout.expires = jiffies + 2 * HZ;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c20a4fe6da5..d1a142fa3eb 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -765,8 +765,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
} else
return -ENOMEM;
ret = device_register(dev);
- if (ret)
+ if (ret) {
+ put_device(dev);
return ret;
+ }
ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
if (ret) {
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 31b902e94f7..77571b68539 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -1026,9 +1026,15 @@ static int __init ur_init(void)
debug_set_level(vmur_dbf, 6);
+ vmur_class = class_create(THIS_MODULE, "vmur");
+ if (IS_ERR(vmur_class)) {
+ rc = PTR_ERR(vmur_class);
+ goto fail_free_dbf;
+ }
+
rc = ccw_driver_register(&ur_driver);
if (rc)
- goto fail_free_dbf;
+ goto fail_class_destroy;
rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
if (rc) {
@@ -1038,18 +1044,13 @@ static int __init ur_init(void)
}
ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
- vmur_class = class_create(THIS_MODULE, "vmur");
- if (IS_ERR(vmur_class)) {
- rc = PTR_ERR(vmur_class);
- goto fail_unregister_region;
- }
pr_info("%s loaded.\n", ur_banner);
return 0;
-fail_unregister_region:
- unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
fail_unregister_driver:
ccw_driver_unregister(&ur_driver);
+fail_class_destroy:
+ class_destroy(vmur_class);
fail_free_dbf:
debug_unregister(vmur_dbf);
return rc;
@@ -1057,9 +1058,9 @@ fail_free_dbf:
static void __exit ur_exit(void)
{
- class_destroy(vmur_class);
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
ccw_driver_unregister(&ur_driver);
+ class_destroy(vmur_class);
debug_unregister(vmur_dbf);
pr_info("%s unloaded.\n", ur_banner);
}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1bbae433fbd..c431198bdbc 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -275,7 +275,7 @@ struct zcore_header {
u32 num_pages;
u32 pad1;
u64 tod;
- cpuid_t cpu_id;
+ struct cpuid cpu_id;
u32 arch_id;
u32 volnr;
u32 build_arch;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index adb3dd30152..fa4c9662f65 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 common i/o drivers
#
-obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
+obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o
ccw_device-objs += device.o device_fsm.o device_ops.o
ccw_device-objs += device_id.o device_pgid.o device_status.o
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 3e5f304ad88..40002830d48 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -417,7 +417,8 @@ int chp_new(struct chp_id chpid)
if (ret) {
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
chpid.cssid, chpid.id, ret);
- goto out_free;
+ put_device(&chp->dev);
+ goto out;
}
ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
if (ret) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 425e8f89a6c..37aa611d4ac 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -37,29 +37,6 @@ struct channel_path_desc {
struct channel_path;
-struct css_general_char {
- u64 : 12;
- u32 dynio : 1; /* bit 12 */
- u32 : 28;
- u32 aif : 1; /* bit 41 */
- u32 : 3;
- u32 mcss : 1; /* bit 45 */
- u32 fcs : 1; /* bit 46 */
- u32 : 1;
- u32 ext_mb : 1; /* bit 48 */
- u32 : 7;
- u32 aif_tdd : 1; /* bit 56 */
- u32 : 1;
- u32 qebsm : 1; /* bit 58 */
- u32 : 8;
- u32 aif_osa : 1; /* bit 67 */
- u32 : 14;
- u32 cib : 1; /* bit 82 */
- u32 : 5;
- u32 fcx : 1; /* bit 88 */
- u32 : 7;
-}__attribute__((packed));
-
struct css_chsc_char {
u64 res;
u64 : 20;
@@ -72,7 +49,6 @@ struct css_chsc_char {
u32 : 19;
}__attribute__((packed));
-extern struct css_general_char css_general_characteristics;
extern struct css_chsc_char css_chsc_characteristics;
struct chsc_ssd_info {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5ec7789bd9d..138124fcfca 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -139,12 +139,11 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
__u8 lpm, /* logical path mask */
__u8 key) /* storage key */
{
- char dbf_txt[15];
int ccode;
union orb *orb;
- CIO_TRACE_EVENT(4, "stIO");
- CIO_TRACE_EVENT(4, dev_name(&sch->dev));
+ CIO_TRACE_EVENT(5, "stIO");
+ CIO_TRACE_EVENT(5, dev_name(&sch->dev));
orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
@@ -169,8 +168,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
ccode = ssch(sch->schid, orb);
/* process condition code */
- sprintf(dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT(4, dbf_txt);
+ CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -201,16 +199,14 @@ cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
int
cio_resume (struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
- CIO_TRACE_EVENT (4, "resIO");
+ CIO_TRACE_EVENT(4, "resIO");
CIO_TRACE_EVENT(4, dev_name(&sch->dev));
ccode = rsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (4, dbf_txt);
+ CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -235,13 +231,12 @@ cio_resume (struct subchannel *sch)
int
cio_halt(struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "haltIO");
+ CIO_TRACE_EVENT(2, "haltIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
@@ -249,8 +244,7 @@ cio_halt(struct subchannel *sch)
*/
ccode = hsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -270,13 +264,12 @@ cio_halt(struct subchannel *sch)
int
cio_clear(struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "clearIO");
+ CIO_TRACE_EVENT(2, "clearIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
@@ -284,8 +277,7 @@ cio_clear(struct subchannel *sch)
*/
ccode = csch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
@@ -306,19 +298,17 @@ cio_clear(struct subchannel *sch)
int
cio_cancel (struct subchannel *sch)
{
- char dbf_txt[15];
int ccode;
if (!sch)
return -ENODEV;
- CIO_TRACE_EVENT (2, "cancelIO");
+ CIO_TRACE_EVENT(2, "cancelIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
ccode = xsch (sch->schid);
- sprintf (dbf_txt, "ccode:%d", ccode);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0: /* success */
@@ -429,11 +419,10 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- char dbf_txt[15];
int retry;
int ret;
- CIO_TRACE_EVENT (2, "ensch");
+ CIO_TRACE_EVENT(2, "ensch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
@@ -460,8 +449,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
} else
break;
}
- sprintf (dbf_txt, "ret:%d", ret);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_enable_subchannel);
@@ -472,11 +460,10 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- char dbf_txt[15];
int retry;
int ret;
- CIO_TRACE_EVENT (2, "dissch");
+ CIO_TRACE_EVENT(2, "dissch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
@@ -495,8 +482,7 @@ int cio_disable_subchannel(struct subchannel *sch)
} else
break;
}
- sprintf (dbf_txt, "ret:%d", ret);
- CIO_TRACE_EVENT (2, dbf_txt);
+ CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
@@ -578,11 +564,6 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
goto out;
}
mutex_init(&sch->reg_mutex);
- /* Set a name for the subchannel */
- if (cio_is_console(schid))
- sch->dev.init_name = cio_get_console_sch_name(schid);
- else
- dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
/*
* The first subchannel that is not-operational (ccode==3)
@@ -686,7 +667,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
#ifdef CONFIG_CCW_CONSOLE
static struct subchannel console_subchannel;
-static char console_sch_name[10] = "0.x.xxxx";
static struct io_subchannel_private console_priv;
static int console_subchannel_in_use;
@@ -873,12 +853,6 @@ cio_get_console_subchannel(void)
return &console_subchannel;
}
-const char *cio_get_console_sch_name(struct subchannel_id schid)
-{
- snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
- return (const char *)console_sch_name;
-}
-
#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 5150fba742a..2e43558c704 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -133,15 +133,11 @@ extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
extern spinlock_t * cio_get_console_lock(void);
extern void *cio_get_console_priv(void);
-extern const char *cio_get_console_sch_name(struct subchannel_id schid);
-extern const char *cio_get_console_cdev_name(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
#define cio_get_console_lock() NULL
#define cio_get_console_priv() NULL
-#define cio_get_console_sch_name(schid) NULL
-#define cio_get_console_cdev_name(sch) NULL
#endif
#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 85d43c6bcb6..e995123fd80 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -152,24 +152,15 @@ css_alloc_subchannel(struct subchannel_id schid)
}
static void
-css_free_subchannel(struct subchannel *sch)
-{
- if (sch) {
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
- kfree(sch->lock);
- kfree(sch);
- }
-}
-
-static void
css_subchannel_release(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) {
+ /* Reset intparm to zeroes. */
+ sch->config.intparm = 0;
+ cio_commit_config(sch);
kfree(sch->lock);
kfree(sch);
}
@@ -180,6 +171,8 @@ static int css_sch_device_register(struct subchannel *sch)
int ret;
mutex_lock(&sch->reg_mutex);
+ dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
+ sch->schid.sch_no);
ret = device_register(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
@@ -327,7 +320,7 @@ int css_probe_device(struct subchannel_id schid)
return PTR_ERR(sch);
ret = css_register_subchannel(sch);
if (ret)
- css_free_subchannel(sch);
+ put_device(&sch->dev);
return ret;
}
@@ -644,7 +637,10 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
* not working) so we do it now. This is true e.g. for the
* console subchannel.
*/
- css_register_subchannel(sch);
+ if (css_register_subchannel(sch)) {
+ if (!cio_is_console(schid))
+ put_device(&sch->dev);
+ }
return 0;
}
@@ -661,8 +657,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
- css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
+ css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
+ css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
@@ -920,8 +916,10 @@ init_channel_subsystem (void)
goto out_device;
}
ret = device_register(&css->pseudo_subchannel->dev);
- if (ret)
+ if (ret) {
+ put_device(&css->pseudo_subchannel->dev);
goto out_file;
+ }
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 3c57c1a18bb..0f95405c2c5 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -307,8 +307,11 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
static void ccw_device_unregister(struct ccw_device *cdev)
{
- if (test_and_clear_bit(1, &cdev->private->registered))
+ if (test_and_clear_bit(1, &cdev->private->registered)) {
device_del(&cdev->dev);
+ /* Release reference from device_initialize(). */
+ put_device(&cdev->dev);
+ }
}
static void ccw_device_remove_orphan_cb(struct work_struct *work)
@@ -319,7 +322,6 @@ static void ccw_device_remove_orphan_cb(struct work_struct *work)
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
ccw_device_unregister(cdev);
- put_device(&cdev->dev);
/* Release cdev reference for workqueue processing. */
put_device(&cdev->dev);
}
@@ -333,15 +335,15 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
* Forced offline in disconnected state means
* 'throw away device'.
*/
- /* Get cdev reference for workqueue processing. */
- if (!get_device(&cdev->dev))
- return;
if (ccw_device_is_orphan(cdev)) {
/*
* Deregister ccw device.
* Unfortunately, we cannot do this directly from the
* attribute method.
*/
+ /* Get cdev reference for workqueue processing. */
+ if (!get_device(&cdev->dev))
+ return;
spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
@@ -380,30 +382,34 @@ int ccw_device_set_offline(struct ccw_device *cdev)
}
cdev->online = 0;
spin_lock_irq(cdev->ccwlock);
- ret = ccw_device_offline(cdev);
- if (ret == -ENODEV) {
- if (cdev->private->state != DEV_STATE_NOT_OPER) {
- cdev->private->state = DEV_STATE_OFFLINE;
- dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
- }
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
- /* Give up reference from ccw_device_set_online(). */
- put_device(&cdev->dev);
- return ret;
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
}
+ ret = ccw_device_offline(cdev);
+ if (ret)
+ goto error;
spin_unlock_irq(cdev->ccwlock);
- if (ret == 0) {
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- /* Give up reference from ccw_device_set_online(). */
- put_device(&cdev->dev);
- } else {
- CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
- "device 0.%x.%04x\n",
- ret, cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- cdev->online = 1;
- }
- return ret;
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return 0;
+
+error:
+ CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
+ ret, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ cdev->private->state = DEV_STATE_OFFLINE;
+ dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+ spin_unlock_irq(cdev->ccwlock);
+ /* Give up reference from ccw_device_set_online(). */
+ put_device(&cdev->dev);
+ return -ENODEV;
}
/**
@@ -421,6 +427,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
int ccw_device_set_online(struct ccw_device *cdev)
{
int ret;
+ int ret2;
if (!cdev)
return -ENODEV;
@@ -444,28 +451,53 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- if (cdev->private->state != DEV_STATE_ONLINE) {
+ spin_lock_irq(cdev->ccwlock);
+ /* Check if online processing was successful */
+ if ((cdev->private->state != DEV_STATE_ONLINE) &&
+ (cdev->private->state != DEV_STATE_W4SENSE)) {
+ spin_unlock_irq(cdev->ccwlock);
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return -ENODEV;
}
- if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
- cdev->online = 1;
- return 0;
- }
+ spin_unlock_irq(cdev->ccwlock);
+ if (cdev->drv->set_online)
+ ret = cdev->drv->set_online(cdev);
+ if (ret)
+ goto rollback;
+ cdev->online = 1;
+ return 0;
+
+rollback:
spin_lock_irq(cdev->ccwlock);
- ret = ccw_device_offline(cdev);
+ /* Wait until a final state or DISCONNECTED is reached */
+ while (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ spin_lock_irq(cdev->ccwlock);
+ }
+ ret2 = ccw_device_offline(cdev);
+ if (ret2)
+ goto error;
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
+ cdev->private->state == DEV_STATE_DISCONNECTED));
+ /* Give up online reference since onlining failed. */
+ put_device(&cdev->dev);
+ return ret;
+
+error:
+ CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
+ "device 0.%x.%04x\n",
+ ret2, cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ cdev->private->state = DEV_STATE_OFFLINE;
spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else
- CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
- "device 0.%x.%04x\n",
- ret, cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
- return (ret == 0) ? -ENODEV : ret;
+ return ret;
}
static int online_store_handle_offline(struct ccw_device *cdev)
@@ -637,8 +669,12 @@ static int ccw_device_register(struct ccw_device *cdev)
int ret;
dev->bus = &ccw_bus_type;
-
- if ((ret = device_add(dev)))
+ ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno);
+ if (ret)
+ return ret;
+ ret = device_add(dev);
+ if (ret)
return ret;
set_bit(1, &cdev->private->registered);
@@ -772,10 +808,8 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
cdev = io_subchannel_allocate_dev(sch);
if (!IS_ERR(cdev)) {
ret = io_subchannel_initialize_dev(sch, cdev);
- if (ret) {
- kfree(cdev);
+ if (ret)
cdev = ERR_PTR(ret);
- }
}
return cdev;
}
@@ -1026,9 +1060,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
return;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
/* Release cdev reference for workqueue processing.*/
put_device(&cdev->dev);
/* Release subchannel reference for local processing. */
@@ -1037,6 +1068,9 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
{
+ /* Get cdev reference for workqueue processing. */
+ if (!get_device(&cdev->dev))
+ return;
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work);
@@ -1057,9 +1091,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
/* Device did not respond in time. */
case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1;
- /* Remove device found not operational. */
- if (!get_device(&cdev->dev))
- break;
ccw_device_schedule_sch_unregister(cdev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
@@ -1097,13 +1128,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
init_waitqueue_head(&priv->wait_q);
init_timer(&priv->timer);
- /* Set an initial name for the device. */
- if (cio_is_console(sch->schid))
- cdev->dev.init_name = cio_get_console_cdev_name(sch);
- else
- dev_set_name(&cdev->dev, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
-
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
@@ -1173,8 +1197,8 @@ static void io_subchannel_irq(struct subchannel *sch)
cdev = sch_get_cdev(sch);
- CIO_TRACE_EVENT(3, "IRQ");
- CIO_TRACE_EVENT(3, dev_name(&sch->dev));
+ CIO_TRACE_EVENT(6, "IRQ");
+ CIO_TRACE_EVENT(6, dev_name(&sch->dev));
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
}
@@ -1212,9 +1236,6 @@ static void io_subchannel_do_unreg(struct work_struct *work)
sch = container_of(work, struct subchannel, work);
css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
put_device(&sch->dev);
}
@@ -1336,7 +1357,6 @@ io_subchannel_remove (struct subchannel *sch)
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
ccw_device_unregister(cdev);
- put_device(&cdev->dev);
kfree(sch->private);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
@@ -1573,8 +1593,6 @@ static int purge_fn(struct device *dev, void *data)
spin_unlock_irq(cdev->ccwlock);
if (!unreg)
goto out;
- if (!get_device(&cdev->dev))
- goto out;
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
priv->dev_id.devno);
ccw_device_schedule_sch_unregister(cdev);
@@ -1690,10 +1708,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch);
spin_lock_irqsave(sch->lock, flags);
-
- /* Reset intparm to zeroes. */
- sch->config.intparm = 0;
- cio_commit_config(sch);
break;
case REPROBE:
ccw_device_trigger_reprobe(cdev);
@@ -1714,7 +1728,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
#ifdef CONFIG_CCW_CONSOLE
static struct ccw_device console_cdev;
-static char console_cdev_name[10] = "0.x.xxxx";
static struct ccw_device_private console_private;
static int console_cdev_in_use;
@@ -1798,13 +1811,6 @@ int ccw_device_force_console(void)
return ccw_device_pm_restore(&console_cdev.dev);
}
EXPORT_SYMBOL_GPL(ccw_device_force_console);
-
-const char *cio_get_console_cdev_name(struct subchannel *sch)
-{
- snprintf(console_cdev_name, 10, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
- return (const char *)console_cdev_name;
-}
#endif
/*
@@ -2022,7 +2028,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
spin_unlock_irq(sch->lock);
if (ret) {
CIO_MSG_EVENT(0, "Couldn't start recognition for device "
- "%s (ret=%d)\n", dev_name(&cdev->dev), ret);
+ "0.%x.%04x (ret=%d)\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
spin_lock_irq(sch->lock);
cdev->private->state = DEV_STATE_DISCONNECTED;
spin_unlock_irq(sch->lock);
@@ -2085,8 +2093,9 @@ static int ccw_device_pm_restore(struct device *dev)
}
/* check if the device id has changed */
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
- CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from "
- "%04x to %04x)\n", dev_name(&sch->dev),
+ CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
+ "changed from %04x to %04x)\n",
+ sch->schid.ssid, sch->schid.sch_no,
cdev->private->dev_id.devno,
sch->schib.pmcw.dev);
goto out_unreg_unlock;
@@ -2119,8 +2128,9 @@ static int ccw_device_pm_restore(struct device *dev)
if (cm_enabled) {
ret = ccw_set_cmf(cdev, 1);
if (ret) {
- CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed "
- "(rc=%d)\n", dev_name(&cdev->dev), ret);
+ CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
+ "(rc=%d)\n", cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, ret);
ret = 0;
}
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 3db88c52d28..e728ce447f6 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -394,6 +394,13 @@ ccw_device_done(struct ccw_device *cdev, int state)
ccw_device_schedule_sch_unregister(cdev);
cdev->private->flags.donotify = 0;
}
+ if (state == DEV_STATE_NOT_OPER) {
+ CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
+ cdev->private->dev_id.devno, sch->schid.sch_no);
+ if (!ccw_device_notify(cdev, CIO_GONE))
+ ccw_device_schedule_sch_unregister(cdev);
+ cdev->private->flags.donotify = 0;
+ }
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
@@ -731,6 +738,17 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev,
}
/*
+ * Handle path verification event in offline state.
+ */
+static void ccw_device_offline_verify(struct ccw_device *cdev,
+ enum dev_event dev_event)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
+ css_schedule_eval(sch->schid);
+}
+
+/*
* Handle path verification event.
*/
static void
@@ -887,6 +905,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
}
call_handler:
cdev->private->state = DEV_STATE_ONLINE;
+ /* In case sensing interfered with setting the device online */
+ wake_up(&cdev->private->wait_q);
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
/* Start delayed path verification. */
@@ -1149,7 +1169,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_VERIFY] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b1241f8fae8..ff7748a9199 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,7 +1,7 @@
/*
* linux/drivers/s390/cio/qdio.h
*
- * Copyright 2000,2008 IBM Corp.
+ * Copyright 2000,2009 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
@@ -246,6 +246,7 @@ struct qdio_q {
atomic_t nr_buf_used;
struct qdio_irq *irq_ptr;
+ struct dentry *debugfs_q;
struct tasklet_struct tasklet;
/* error condition during a data transfer */
@@ -267,6 +268,7 @@ struct qdio_irq {
struct qib qib;
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
+ struct dentry *debugfs_dev;
unsigned long int_parm;
struct subchannel_id schid;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index b8626d4df11..1b78f639ead 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,14 +1,12 @@
/*
* drivers/s390/cio/qdio_debug.c
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008,2009
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
-#include <asm/qdio.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
@@ -17,10 +15,7 @@ debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
-#define MAX_DEBUGFS_QUEUES 32
-static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
-static DEFINE_MUTEX(debugfs_mutex);
-#define QDIO_DEBUGFS_NAME_LEN 40
+#define QDIO_DEBUGFS_NAME_LEN 10
void qdio_allocate_dbf(struct qdio_initialize *init_data,
struct qdio_irq *irq_ptr)
@@ -130,20 +125,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
filp->f_path.dentry->d_inode->i_private);
}
-static void remove_debugfs_entry(struct qdio_q *q)
-{
- int i;
-
- for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
- if (!debugfs_queues[i])
- continue;
- if (debugfs_queues[i]->d_inode->i_private == q) {
- debugfs_remove(debugfs_queues[i]);
- debugfs_queues[i] = NULL;
- }
- }
-}
-
static struct file_operations debugfs_fops = {
.owner = THIS_MODULE,
.open = qstat_seq_open,
@@ -155,22 +136,15 @@ static struct file_operations debugfs_fops = {
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
- int i = 0;
char name[QDIO_DEBUGFS_NAME_LEN];
- while (debugfs_queues[i] != NULL) {
- i++;
- if (i >= MAX_DEBUGFS_QUEUES)
- return;
- }
- snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
- dev_name(&cdev->dev),
+ snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
- debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
- debugfs_root, q, &debugfs_fops);
- if (IS_ERR(debugfs_queues[i]))
- debugfs_queues[i] = NULL;
+ q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
+ q->irq_ptr->debugfs_dev, q, &debugfs_fops);
+ if (IS_ERR(q->debugfs_q))
+ q->debugfs_q = NULL;
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -178,12 +152,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
struct qdio_q *q;
int i;
- mutex_lock(&debugfs_mutex);
+ irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
+ debugfs_root);
+ if (IS_ERR(irq_ptr->debugfs_dev))
+ irq_ptr->debugfs_dev = NULL;
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
- mutex_unlock(&debugfs_mutex);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -191,17 +167,16 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
struct qdio_q *q;
int i;
- mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
- remove_debugfs_entry(q);
+ debugfs_remove(q->debugfs_q);
for_each_output_queue(irq_ptr, q, i)
- remove_debugfs_entry(q);
- mutex_unlock(&debugfs_mutex);
+ debugfs_remove(q->debugfs_q);
+ debugfs_remove(irq_ptr->debugfs_dev);
}
int __init qdio_debug_init(void)
{
- debugfs_root = debugfs_create_dir("qdio_queues", NULL);
+ debugfs_root = debugfs_create_dir("qdio", NULL);
qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 0038750ad94..9aef402a5f1 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -798,8 +798,10 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
tasklet_schedule(&q->tasklet);
+ return;
+ }
}
qdio_stop_polling(q);
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c
deleted file mode 100644
index f8da25ab576..00000000000
--- a/drivers/s390/cio/scsw.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Helper functions for scsw access.
- *
- * Copyright IBM Corp. 2008
- * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <asm/cio.h>
-#include "css.h"
-#include "chsc.h"
-
-/**
- * scsw_is_tm - check for transport mode scsw
- * @scsw: pointer to scsw
- *
- * Return non-zero if the specified scsw is a transport mode scsw, zero
- * otherwise.
- */
-int scsw_is_tm(union scsw *scsw)
-{
- return css_general_characteristics.fcx && (scsw->tm.x == 1);
-}
-EXPORT_SYMBOL(scsw_is_tm);
-
-/**
- * scsw_key - return scsw key field
- * @scsw: pointer to scsw
- *
- * Return the value of the key field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_key(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.key;
- else
- return scsw->cmd.key;
-}
-EXPORT_SYMBOL(scsw_key);
-
-/**
- * scsw_eswf - return scsw eswf field
- * @scsw: pointer to scsw
- *
- * Return the value of the eswf field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_eswf(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.eswf;
- else
- return scsw->cmd.eswf;
-}
-EXPORT_SYMBOL(scsw_eswf);
-
-/**
- * scsw_cc - return scsw cc field
- * @scsw: pointer to scsw
- *
- * Return the value of the cc field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_cc(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.cc;
- else
- return scsw->cmd.cc;
-}
-EXPORT_SYMBOL(scsw_cc);
-
-/**
- * scsw_ectl - return scsw ectl field
- * @scsw: pointer to scsw
- *
- * Return the value of the ectl field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_ectl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.ectl;
- else
- return scsw->cmd.ectl;
-}
-EXPORT_SYMBOL(scsw_ectl);
-
-/**
- * scsw_pno - return scsw pno field
- * @scsw: pointer to scsw
- *
- * Return the value of the pno field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_pno(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.pno;
- else
- return scsw->cmd.pno;
-}
-EXPORT_SYMBOL(scsw_pno);
-
-/**
- * scsw_fctl - return scsw fctl field
- * @scsw: pointer to scsw
- *
- * Return the value of the fctl field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_fctl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.fctl;
- else
- return scsw->cmd.fctl;
-}
-EXPORT_SYMBOL(scsw_fctl);
-
-/**
- * scsw_actl - return scsw actl field
- * @scsw: pointer to scsw
- *
- * Return the value of the actl field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_actl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.actl;
- else
- return scsw->cmd.actl;
-}
-EXPORT_SYMBOL(scsw_actl);
-
-/**
- * scsw_stctl - return scsw stctl field
- * @scsw: pointer to scsw
- *
- * Return the value of the stctl field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_stctl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.stctl;
- else
- return scsw->cmd.stctl;
-}
-EXPORT_SYMBOL(scsw_stctl);
-
-/**
- * scsw_dstat - return scsw dstat field
- * @scsw: pointer to scsw
- *
- * Return the value of the dstat field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_dstat(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.dstat;
- else
- return scsw->cmd.dstat;
-}
-EXPORT_SYMBOL(scsw_dstat);
-
-/**
- * scsw_cstat - return scsw cstat field
- * @scsw: pointer to scsw
- *
- * Return the value of the cstat field of the specified scsw, regardless of
- * whether it is a transport mode or command mode scsw.
- */
-u32 scsw_cstat(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw->tm.cstat;
- else
- return scsw->cmd.cstat;
-}
-EXPORT_SYMBOL(scsw_cstat);
-
-/**
- * scsw_cmd_is_valid_key - check key field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the key field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_key(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_key);
-
-/**
- * scsw_cmd_is_valid_sctl - check fctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fctl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_sctl(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
-
-/**
- * scsw_cmd_is_valid_eswf - check eswf field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the eswf field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_eswf(union scsw *scsw)
-{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
-
-/**
- * scsw_cmd_is_valid_cc - check cc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cc field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_cc(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
- (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
-
-/**
- * scsw_cmd_is_valid_fmt - check fmt field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fmt field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_fmt(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
-
-/**
- * scsw_cmd_is_valid_pfch - check pfch field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the pfch field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_pfch(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
-
-/**
- * scsw_cmd_is_valid_isic - check isic field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the isic field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_isic(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
-
-/**
- * scsw_cmd_is_valid_alcc - check alcc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the alcc field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_alcc(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
-
-/**
- * scsw_cmd_is_valid_ssi - check ssi field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the ssi field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_ssi(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
-
-/**
- * scsw_cmd_is_valid_zcc - check zcc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the zcc field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_zcc(union scsw *scsw)
-{
- return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
- (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
-
-/**
- * scsw_cmd_is_valid_ectl - check ectl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the ectl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_ectl(union scsw *scsw)
-{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
-
-/**
- * scsw_cmd_is_valid_pno - check pno field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the pno field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_pno(union scsw *scsw)
-{
- return (scsw->cmd.fctl != 0) &&
- (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
- ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
-
-/**
- * scsw_cmd_is_valid_fctl - check fctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fctl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_fctl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
-
-/**
- * scsw_cmd_is_valid_actl - check actl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the actl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_actl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
-
-/**
- * scsw_cmd_is_valid_stctl - check stctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the stctl field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_stctl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
-
-/**
- * scsw_cmd_is_valid_dstat - check dstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the dstat field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_dstat(union scsw *scsw)
-{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- (scsw->cmd.cc != 3);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
-
-/**
- * scsw_cmd_is_valid_cstat - check cstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cstat field of the specified command mode scsw is
- * valid, zero otherwise.
- */
-int scsw_cmd_is_valid_cstat(union scsw *scsw)
-{
- return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
- (scsw->cmd.cc != 3);
-}
-EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
-
-/**
- * scsw_tm_is_valid_key - check key field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the key field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_key(union scsw *scsw)
-{
- return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_key);
-
-/**
- * scsw_tm_is_valid_eswf - check eswf field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the eswf field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_eswf(union scsw *scsw)
-{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
-
-/**
- * scsw_tm_is_valid_cc - check cc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cc field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_cc(union scsw *scsw)
-{
- return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
- (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_cc);
-
-/**
- * scsw_tm_is_valid_fmt - check fmt field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fmt field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_fmt(union scsw *scsw)
-{
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
-
-/**
- * scsw_tm_is_valid_x - check x field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the x field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_x(union scsw *scsw)
-{
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_x);
-
-/**
- * scsw_tm_is_valid_q - check q field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the q field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_q(union scsw *scsw)
-{
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_q);
-
-/**
- * scsw_tm_is_valid_ectl - check ectl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the ectl field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_ectl(union scsw *scsw)
-{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
-
-/**
- * scsw_tm_is_valid_pno - check pno field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the pno field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_pno(union scsw *scsw)
-{
- return (scsw->tm.fctl != 0) &&
- (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
- ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
- (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_pno);
-
-/**
- * scsw_tm_is_valid_fctl - check fctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fctl field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_fctl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
-
-/**
- * scsw_tm_is_valid_actl - check actl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the actl field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_actl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_actl);
-
-/**
- * scsw_tm_is_valid_stctl - check stctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the stctl field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_stctl(union scsw *scsw)
-{
- /* Only valid if pmcw.dnv == 1*/
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
-
-/**
- * scsw_tm_is_valid_dstat - check dstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the dstat field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_dstat(union scsw *scsw)
-{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- (scsw->tm.cc != 3);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
-
-/**
- * scsw_tm_is_valid_cstat - check cstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cstat field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_cstat(union scsw *scsw)
-{
- return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
- (scsw->tm.cc != 3);
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
-
-/**
- * scsw_tm_is_valid_fcxs - check fcxs field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fcxs field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_fcxs(union scsw *scsw)
-{
- return 1;
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
-
-/**
- * scsw_tm_is_valid_schxs - check schxs field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the schxs field of the specified transport mode scsw is
- * valid, zero otherwise.
- */
-int scsw_tm_is_valid_schxs(union scsw *scsw)
-{
- return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
- SCHN_STAT_INTF_CTRL_CHK |
- SCHN_STAT_PROT_CHECK |
- SCHN_STAT_CHN_DATA_CHK));
-}
-EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
-
-/**
- * scsw_is_valid_actl - check actl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the actl field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_actl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_actl(scsw);
- else
- return scsw_cmd_is_valid_actl(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_actl);
-
-/**
- * scsw_is_valid_cc - check cc field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cc field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_cc(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_cc(scsw);
- else
- return scsw_cmd_is_valid_cc(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_cc);
-
-/**
- * scsw_is_valid_cstat - check cstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the cstat field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_cstat(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_cstat(scsw);
- else
- return scsw_cmd_is_valid_cstat(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_cstat);
-
-/**
- * scsw_is_valid_dstat - check dstat field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the dstat field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_dstat(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_dstat(scsw);
- else
- return scsw_cmd_is_valid_dstat(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_dstat);
-
-/**
- * scsw_is_valid_ectl - check ectl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the ectl field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_ectl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_ectl(scsw);
- else
- return scsw_cmd_is_valid_ectl(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_ectl);
-
-/**
- * scsw_is_valid_eswf - check eswf field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the eswf field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_eswf(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_eswf(scsw);
- else
- return scsw_cmd_is_valid_eswf(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_eswf);
-
-/**
- * scsw_is_valid_fctl - check fctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the fctl field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_fctl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_fctl(scsw);
- else
- return scsw_cmd_is_valid_fctl(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_fctl);
-
-/**
- * scsw_is_valid_key - check key field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the key field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_key(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_key(scsw);
- else
- return scsw_cmd_is_valid_key(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_key);
-
-/**
- * scsw_is_valid_pno - check pno field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the pno field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_pno(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_pno(scsw);
- else
- return scsw_cmd_is_valid_pno(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_pno);
-
-/**
- * scsw_is_valid_stctl - check stctl field validity
- * @scsw: pointer to scsw
- *
- * Return non-zero if the stctl field of the specified scsw is valid,
- * regardless of whether it is a transport mode or command mode scsw.
- * Return zero if the field does not contain a valid value.
- */
-int scsw_is_valid_stctl(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_valid_stctl(scsw);
- else
- return scsw_cmd_is_valid_stctl(scsw);
-}
-EXPORT_SYMBOL(scsw_is_valid_stctl);
-
-/**
- * scsw_cmd_is_solicited - check for solicited scsw
- * @scsw: pointer to scsw
- *
- * Return non-zero if the command mode scsw indicates that the associated
- * status condition is solicited, zero if it is unsolicited.
- */
-int scsw_cmd_is_solicited(union scsw *scsw)
-{
- return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
-}
-EXPORT_SYMBOL(scsw_cmd_is_solicited);
-
-/**
- * scsw_tm_is_solicited - check for solicited scsw
- * @scsw: pointer to scsw
- *
- * Return non-zero if the transport mode scsw indicates that the associated
- * status condition is solicited, zero if it is unsolicited.
- */
-int scsw_tm_is_solicited(union scsw *scsw)
-{
- return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
- (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
-}
-EXPORT_SYMBOL(scsw_tm_is_solicited);
-
-/**
- * scsw_is_solicited - check for solicited scsw
- * @scsw: pointer to scsw
- *
- * Return non-zero if the transport or command mode scsw indicates that the
- * associated status condition is solicited, zero if it is unsolicited.
- */
-int scsw_is_solicited(union scsw *scsw)
-{
- if (scsw_is_tm(scsw))
- return scsw_tm_is_solicited(scsw);
- else
- return scsw_cmd_is_solicited(scsw);
-}
-EXPORT_SYMBOL(scsw_is_solicited);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed3dcdea7fe..090b32a339c 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -648,7 +648,9 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state)
/* Poll on the device until all requests are finished. */
do {
flags = 0;
+ spin_lock_bh(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
+ spin_unlock_bh(&ap_dev->lock);
} while ((flags & 1) || (flags & 2));
ap_device_remove(dev);
@@ -1109,12 +1111,15 @@ static void ap_scan_bus(struct work_struct *unused)
ap_dev->device.bus = &ap_bus_type;
ap_dev->device.parent = ap_root_device;
- dev_set_name(&ap_dev->device, "card%02x",
- AP_QID_DEVICE(ap_dev->qid));
+ if (dev_set_name(&ap_dev->device, "card%02x",
+ AP_QID_DEVICE(ap_dev->qid))) {
+ kfree(ap_dev);
+ continue;
+ }
ap_dev->device.release = ap_device_release;
rc = device_register(&ap_dev->device);
if (rc) {
- kfree(ap_dev);
+ put_device(&ap_dev->device);
continue;
}
/* Add device attributes. */
@@ -1407,14 +1412,12 @@ static void ap_reset(struct ap_device *ap_dev)
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
{
- spin_lock(&ap_dev->lock);
if (!ap_dev->unregistered) {
if (ap_poll_queue(ap_dev, flags))
ap_dev->unregistered = 1;
if (ap_dev->reset == AP_RESET_DO)
ap_reset(ap_dev);
}
- spin_unlock(&ap_dev->lock);
return 0;
}
@@ -1441,7 +1444,9 @@ static void ap_poll_all(unsigned long dummy)
flags = 0;
spin_lock(&ap_device_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) {
+ spin_lock(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
+ spin_unlock(&ap_dev->lock);
}
spin_unlock(&ap_device_list_lock);
} while (flags & 1);
@@ -1487,7 +1492,9 @@ static int ap_poll_thread(void *data)
flags = 0;
spin_lock_bh(&ap_device_list_lock);
list_for_each_entry(ap_dev, &ap_device_list, list) {
+ spin_lock(&ap_dev->lock);
__ap_poll_device(ap_dev, &flags);
+ spin_unlock(&ap_dev->lock);
}
spin_unlock_bh(&ap_device_list_lock);
}
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index e38e5d306fa..2930fc763ac 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -403,10 +403,14 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
return len;
}
-void __init s390_virtio_console_init(void)
+static int __init s390_virtio_console_init(void)
{
- virtio_cons_early_init(early_put_chars);
+ if (!MACHINE_IS_KVM)
+ return -ENODEV;
+ return virtio_cons_early_init(early_put_chars);
}
+console_initcall(s390_virtio_console_init);
+
/*
* We do this after core stuff, but before the drivers.
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 8c36eafcfbf..87dff11061b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1839,9 +1839,10 @@ static int netiucv_register_device(struct net_device *ndev)
return -ENOMEM;
ret = device_register(dev);
-
- if (ret)
+ if (ret) {
+ put_device(dev);
return ret;
+ }
ret = netiucv_add_files(dev);
if (ret)
goto out_unreg;
@@ -2226,8 +2227,10 @@ static int __init netiucv_init(void)
netiucv_dev->release = (void (*)(struct device *))kfree;
netiucv_dev->driver = &netiucv_driver;
rc = device_register(netiucv_dev);
- if (rc)
+ if (rc) {
+ put_device(netiucv_dev);
goto out_driver;
+ }
netiucv_banner();
return rc;
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index e76a320d373..102000d1af6 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -219,13 +219,13 @@ static int __init smsg_init(void)
smsg_dev->driver = &smsg_driver;
rc = device_register(smsg_dev);
if (rc)
- goto out_free_dev;
+ goto out_put;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
-out_free_dev:
- kfree(smsg_dev);
+out_put:
+ put_device(smsg_dev);
out_free_path:
iucv_path_free(smsg_path);
smsg_path = NULL;
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 15dab96d05e..7c815d3327f 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -537,8 +537,12 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
}
if (temp_index != 0 && fan_index != 0) {
kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld");
- if (IS_ERR(kenvctrld_task))
- return PTR_ERR(kenvctrld_task);
+ if (IS_ERR(kenvctrld_task)) {
+ int err = PTR_ERR(kenvctrld_task);
+
+ kenvctrld_task = NULL;
+ return err;
+ }
}
return 0;
@@ -561,7 +565,8 @@ void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp)
struct bbc_cpu_temperature *tp, *tpos;
struct bbc_fan_control *fp, *fpos;
- kthread_stop(kenvctrld_task);
+ if (kenvctrld_task)
+ kthread_stop(kenvctrld_task);
list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) {
list_del(&tp->bp_list);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index 042d9bce991..d0ab23a5835 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -26,7 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
static void open_s3_dev(struct t3cdev *);
static void close_s3_dev(struct t3cdev *);
-static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error);
+static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port);
static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
static struct cxgb3_client t3c_client = {
@@ -34,7 +34,7 @@ static struct cxgb3_client t3c_client = {
.handlers = cxgb3i_cpl_handlers,
.add = open_s3_dev,
.remove = close_s3_dev,
- .err_handler = s3_err_handler,
+ .event_handler = s3_event_handler,
};
/**
@@ -66,16 +66,16 @@ static void close_s3_dev(struct t3cdev *t3dev)
cxgb3i_ddp_cleanup(t3dev);
}
-static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error)
+static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port)
{
struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
- cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n",
- snic, tdev, status, error);
+ cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n",
+ snic, tdev, event, port);
if (!snic)
return;
- switch (status) {
+ switch (event) {
case OFFLOAD_STATUS_DOWN:
snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
break;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index f3da592f7bc..35a13867495 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -119,6 +119,64 @@ _base_fault_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
}
+/**
+ * mpt2sas_base_start_watchdog - start the fault_reset_work_q
+ * @ioc: pointer to scsi command object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ if (ioc->fault_reset_work_q)
+ return;
+
+ /* initialize fault polling */
+ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
+ snprintf(ioc->fault_reset_work_q_name,
+ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
+ ioc->fault_reset_work_q =
+ create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+ if (!ioc->fault_reset_work_q) {
+ printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
+ * @ioc: pointer to scsi command object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ wq = ioc->fault_reset_work_q;
+ ioc->fault_reset_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work(&ioc->fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
/**
* _base_sas_ioc_info - verbose translation of the ioc status
@@ -440,6 +498,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
return;
+ /* each nexus loss loginfo */
+ if (log_info == 0x31170000)
+ return;
+
/* eat the loginfos associated with task aborts */
if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
0x31140000 || log_info == 0x31130000))
@@ -1109,7 +1171,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
}
}
- pci_set_drvdata(pdev, ioc->shost);
_base_mask_interrupts(ioc);
r = _base_enable_msix(ioc);
if (r)
@@ -1132,7 +1193,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->pci_irq = -1;
pci_release_selected_regions(ioc->pdev, ioc->bars);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return r;
}
@@ -3191,7 +3251,6 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
ioc->chip_phys = 0;
pci_release_selected_regions(ioc->pdev, ioc->bars);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
return;
}
@@ -3205,7 +3264,6 @@ int
mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
{
int r, i;
- unsigned long flags;
dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
__func__));
@@ -3214,6 +3272,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
if (r)
return r;
+ pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
if (r)
goto out_free_resources;
@@ -3288,23 +3347,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
if (r)
goto out_free_resources;
- /* initialize fault polling */
- INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
- snprintf(ioc->fault_reset_work_q_name,
- sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
- ioc->fault_reset_work_q =
- create_singlethread_workqueue(ioc->fault_reset_work_q_name);
- if (!ioc->fault_reset_work_q) {
- printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
- ioc->name, __func__, __LINE__);
- goto out_free_resources;
- }
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->fault_reset_work_q)
- queue_delayed_work(ioc->fault_reset_work_q,
- &ioc->fault_reset_work,
- msecs_to_jiffies(FAULT_POLLING_INTERVAL));
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ mpt2sas_base_start_watchdog(ioc);
return 0;
out_free_resources:
@@ -3312,6 +3355,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->remove_host = 1;
mpt2sas_base_free_resources(ioc);
_base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
kfree(ioc->tm_cmds.reply);
kfree(ioc->transport_cmds.reply);
kfree(ioc->config_cmds.reply);
@@ -3337,22 +3381,14 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
void
mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
{
- unsigned long flags;
- struct workqueue_struct *wq;
dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
__func__));
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- wq = ioc->fault_reset_work_q;
- ioc->fault_reset_work_q = NULL;
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- if (!cancel_delayed_work(&ioc->fault_reset_work))
- flush_workqueue(wq);
- destroy_workqueue(wq);
-
+ mpt2sas_base_stop_watchdog(ioc);
mpt2sas_base_free_resources(ioc);
_base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
kfree(ioc->pfacts);
kfree(ioc->ctl_cmds.reply);
kfree(ioc->base_cmds.reply);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 286c185fa9e..acdcff150a3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,10 +69,10 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "01.100.03.00"
+#define MPT2SAS_DRIVER_VERSION "01.100.04.00"
#define MPT2SAS_MAJOR_VERSION 01
#define MPT2SAS_MINOR_VERSION 100
-#define MPT2SAS_BUILD_VERSION 03
+#define MPT2SAS_BUILD_VERSION 04
#define MPT2SAS_RELEASE_VERSION 00
/*
@@ -673,6 +673,8 @@ typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
/* base shared API */
extern struct list_head mpt2sas_ioc_list;
+void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
+void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc);
void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 58cfb97846f..6ddee161beb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -236,17 +236,25 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
Mpi2ConfigRequest_t *config_request;
int r;
u8 retry_count;
- u8 issue_reset;
+ u8 issue_host_reset = 0;
u16 wait_state_count;
+ mutex_lock(&ioc->config_cmds.mutex);
if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n",
ioc->name, __func__);
+ mutex_unlock(&ioc->config_cmds.mutex);
return -EAGAIN;
}
retry_count = 0;
retry_config:
+ if (retry_count) {
+ if (retry_count > 2) /* attempt only 2 retries */
+ return -EFAULT;
+ printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
+ ioc->name, __func__, retry_count);
+ }
wait_state_count = 0;
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
@@ -254,8 +262,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
printk(MPT2SAS_ERR_FMT
"%s: failed due to ioc not operational\n",
ioc->name, __func__);
- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
- return -EFAULT;
+ r = -EFAULT;
+ goto out;
}
ssleep(1);
ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
@@ -271,8 +279,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (!smid) {
printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
- return -EAGAIN;
+ r = -EAGAIN;
+ goto out;
}
r = 0;
@@ -292,9 +300,15 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
ioc->name, __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2ConfigRequest_t)/4);
- if (!(ioc->config_cmds.status & MPT2_CMD_RESET))
- issue_reset = 1;
- goto issue_host_reset;
+ retry_count++;
+ if (ioc->config_cmds.smid == smid)
+ mpt2sas_base_free_smid(ioc, smid);
+ if ((ioc->shost_recovery) ||
+ (ioc->config_cmds.status & MPT2_CMD_RESET))
+ goto retry_config;
+ issue_host_reset = 1;
+ r = -EFAULT;
+ goto out;
}
if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
memcpy(mpi_reply, ioc->config_cmds.reply,
@@ -302,21 +316,13 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (retry_count)
printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n",
ioc->name, __func__);
+out:
ioc->config_cmds.status = MPT2_CMD_NOT_USED;
- return r;
-
- issue_host_reset:
- if (issue_reset)
+ mutex_unlock(&ioc->config_cmds.mutex);
+ if (issue_host_reset)
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
- ioc->config_cmds.status = MPT2_CMD_NOT_USED;
- if (!retry_count) {
- printk(MPT2SAS_INFO_FMT "%s: attempting retry\n",
- ioc->name, __func__);
- retry_count++;
- goto retry_config;
- }
- return -EFAULT;
+ return r;
}
/**
@@ -375,7 +381,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -417,7 +422,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -438,7 +442,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2BiosPage2_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -480,7 +483,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -501,7 +503,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2BiosPage3_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -543,7 +544,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -564,7 +564,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -606,7 +605,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -627,7 +625,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -669,7 +666,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -690,7 +686,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -732,7 +727,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -753,7 +747,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2IOCPage8_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -795,7 +788,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -818,7 +810,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -863,7 +854,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -886,7 +876,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -931,7 +920,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -953,7 +941,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
Mpi2ConfigReply_t mpi_reply;
Mpi2SasIOUnitPage0_t config_page;
- mutex_lock(&ioc->config_cmds.mutex);
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1002,7 +989,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1026,8 +1012,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
Mpi2ConfigRequest_t mpi_request;
int r;
struct config_request mem;
-
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sz);
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1070,7 +1054,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1095,7 +1078,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sz);
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1138,7 +1120,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1161,7 +1142,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1206,7 +1186,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1230,7 +1209,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1277,7 +1255,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1300,7 +1277,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1345,7 +1321,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1367,7 +1342,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1413,7 +1387,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1435,7 +1408,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1481,7 +1453,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1505,7 +1476,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t));
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1548,7 +1518,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1572,7 +1541,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
struct config_request mem;
u16 ioc_status;
- mutex_lock(&ioc->config_cmds.mutex);
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
*num_pds = 0;
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1620,7 +1588,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1645,7 +1612,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
memset(config_page, 0, sz);
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1687,7 +1653,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1711,7 +1676,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int r;
struct config_request mem;
- mutex_lock(&ioc->config_cmds.mutex);
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1754,7 +1718,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
@@ -1778,7 +1741,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
struct config_request mem;
u16 ioc_status;
- mutex_lock(&ioc->config_cmds.mutex);
*volume_handle = 0;
memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1842,7 +1804,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
_config_free_config_dma_memory(ioc, &mem);
out:
- mutex_unlock(&ioc->config_cmds.mutex);
return r;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 2a01a5f2a84..2e9a4445596 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2767,6 +2767,10 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
char *desc_ioc_state = NULL;
char *desc_scsi_status = NULL;
char *desc_scsi_state = ioc->tmp_string;
+ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+
+ if (log_info == 0x31170000)
+ return;
switch (ioc_status) {
case MPI2_IOCSTATUS_SUCCESS:
@@ -3426,7 +3430,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
__le64 sas_address;
int i;
unsigned long flags;
- struct _sas_port *mpt2sas_port;
+ struct _sas_port *mpt2sas_port = NULL;
int rc = 0;
if (!handle)
@@ -3518,12 +3522,20 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
&expander_pg1, i, handle))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- continue;
+ rc = -1;
+ goto out_fail;
}
sas_expander->phy[i].handle = handle;
sas_expander->phy[i].phy_id = i;
- mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i],
- expander_pg1, sas_expander->parent_dev);
+
+ if ((mpt2sas_transport_add_expander_phy(ioc,
+ &sas_expander->phy[i], expander_pg1,
+ sas_expander->parent_dev))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
}
if (sas_expander->enclosure_handle) {
@@ -3540,8 +3552,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
out_fail:
- if (sas_expander)
- kfree(sas_expander->phy);
+ if (mpt2sas_port)
+ mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_expander->parent_handle);
kfree(sas_expander);
return rc;
}
@@ -3663,12 +3676,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
sas_device->hidden_raid_component = is_pd;
/* get enclosure_logical_id */
- if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0,
- MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- sas_device->enclosure_handle))) {
+ if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0(
+ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_device->enclosure_handle)))
sas_device->enclosure_logical_id =
le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
- }
/* get device name */
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
@@ -4250,12 +4262,6 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
u16 handle = le16_to_cpu(element->VolDevHandle);
int rc;
-#if 0 /* RAID_HACKS */
- if (le32_to_cpu(event_data->Flags) &
- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
- return;
-#endif
-
mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
printk(MPT2SAS_ERR_FMT
@@ -4310,12 +4316,6 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc,
unsigned long flags;
struct MPT2SAS_TARGET *sas_target_priv_data;
-#if 0 /* RAID_HACKS */
- if (le32_to_cpu(event_data->Flags) &
- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
- return;
-#endif
-
spin_lock_irqsave(&ioc->raid_device_lock, flags);
raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
@@ -4428,14 +4428,38 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
struct _sas_device *sas_device;
unsigned long flags;
u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device)
+ if (sas_device) {
sas_device->hidden_raid_component = 1;
- else
- _scsih_add_device(ioc, handle, 0, 1);
+ return;
+ }
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ _scsih_link_change(ioc,
+ le16_to_cpu(sas_device_pg0.ParentDevHandle),
+ handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_add_device(ioc, handle, 0, 1);
}
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4535,12 +4559,15 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
{
Mpi2EventIrConfigElement_t *element;
int i;
+ u8 foreign_config;
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_ir_config_change_event_debug(ioc, event_data);
#endif
+ foreign_config = (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
for (i = 0; i < event_data->NumElements; i++, element++) {
@@ -4548,11 +4575,13 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
switch (element->ReasonCode) {
case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
case MPI2_EVENT_IR_CHANGE_RC_ADDED:
- _scsih_sas_volume_add(ioc, element);
+ if (!foreign_config)
+ _scsih_sas_volume_add(ioc, element);
break;
case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
- _scsih_sas_volume_delete(ioc, element);
+ if (!foreign_config)
+ _scsih_sas_volume_delete(ioc, element);
break;
case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
_scsih_sas_pd_hide(ioc, element);
@@ -4671,6 +4700,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
u32 state;
struct _sas_device *sas_device;
unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
return;
@@ -4687,22 +4719,40 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
switch (state) {
-#if 0
- case MPI2_RAID_PD_STATE_OFFLINE:
- if (sas_device)
- _scsih_remove_device(ioc, handle);
- break;
-#endif
case MPI2_RAID_PD_STATE_ONLINE:
case MPI2_RAID_PD_STATE_DEGRADED:
case MPI2_RAID_PD_STATE_REBUILDING:
case MPI2_RAID_PD_STATE_OPTIMAL:
- if (sas_device)
+ if (sas_device) {
sas_device->hidden_raid_component = 1;
- else
- _scsih_add_device(ioc, handle, 0, 1);
+ return;
+ }
+
+ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ _scsih_link_change(ioc,
+ le16_to_cpu(sas_device_pg0.ParentDevHandle),
+ handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_add_device(ioc, handle, 0, 1);
+
break;
+ case MPI2_RAID_PD_STATE_OFFLINE:
case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
case MPI2_RAID_PD_STATE_HOT_SPARE:
@@ -5774,6 +5824,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
u32 device_state;
+ mpt2sas_base_stop_watchdog(ioc);
flush_scheduled_work();
scsi_block_requests(shost);
device_state = pci_choose_state(pdev, state);
@@ -5816,6 +5867,7 @@ _scsih_resume(struct pci_dev *pdev)
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
scsi_unblock_requests(shost);
+ mpt2sas_base_start_watchdog(ioc);
return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 037c1e0b7c4..6553833c12d 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -527,7 +527,7 @@ config SERIAL_S3C24A0
config SERIAL_S3C6400
tristate "Samsung S3C6400/S3C6410 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C600 || CPU_S3C6410)
+ depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410)
default y
help
Serial port support for the Samsung S3C6400 and S3C6410
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index e0d44af4745..3f3119d760d 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -111,29 +111,32 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
unsigned int bpw;
unsigned int hz;
unsigned int div;
+ unsigned long clk;
bpw = t ? t->bits_per_word : spi->bits_per_word;
hz = t ? t->speed_hz : spi->max_speed_hz;
+ if (!bpw)
+ bpw = 8;
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
if (bpw != 8) {
dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw);
return -EINVAL;
}
- div = clk_get_rate(hw->clk) / hz;
-
- /* is clk = pclk / (2 * (pre+1)), or is it
- * clk = (pclk * 2) / ( pre + 1) */
-
- div /= 2;
-
- if (div > 0)
- div -= 1;
+ clk = clk_get_rate(hw->clk);
+ div = DIV_ROUND_UP(clk, hz * 2) - 1;
if (div > 255)
div = 255;
- dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", div, hz);
+ dev_dbg(&spi->dev, "setting pre-scaler to %d (wanted %d, got %ld)\n",
+ div, hz, clk / (2 * (div + 1)));
+
+
writeb(div, hw->regs + S3C2410_SPPRE);
spin_lock(&hw->bitbang.lock);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 9d7c99394ec..640f65c6ef8 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1752,12 +1752,12 @@ static int comedi_open(struct inode *inode, struct file *file)
mutex_lock(&dev->mutex);
if (dev->attached)
goto ok;
- if (!capable(CAP_SYS_MODULE) && dev->in_request_module) {
+ if (!capable(CAP_NET_ADMIN) && dev->in_request_module) {
DPRINTK("in request module\n");
mutex_unlock(&dev->mutex);
return -ENODEV;
}
- if (capable(CAP_SYS_MODULE) && dev->in_request_module)
+ if (capable(CAP_NET_ADMIN) && dev->in_request_module)
goto ok;
dev->in_request_module = 1;
@@ -1770,8 +1770,8 @@ static int comedi_open(struct inode *inode, struct file *file)
dev->in_request_module = 0;
- if (!dev->attached && !capable(CAP_SYS_MODULE)) {
- DPRINTK("not attached and not CAP_SYS_MODULE\n");
+ if (!dev->attached && !capable(CAP_NET_ADMIN)) {
+ DPRINTK("not attached and not CAP_NET_ADMIN\n");
mutex_unlock(&dev->mutex);
return -ENODEV;
}
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 7b605795b77..e63c9bea6c5 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -1950,14 +1950,7 @@ static int pohmelfs_get_sb(struct file_system_type *fs_type,
*/
static void pohmelfs_kill_super(struct super_block *sb)
{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .nr_to_write = LONG_MAX,
- };
- generic_sync_sb_inodes(sb, &wbc);
-
+ sync_inodes_sb(sb);
kill_anon_super(sb);
}
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 0a69672097a..4e83c297ec9 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -953,7 +953,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
mutex_lock(&tz->lock);
- tz->ops->get_temp(tz, &temp);
+ if (tz->ops->get_temp(tz, &temp)) {
+ /* get_temp failed - retry it later */
+ printk(KERN_WARNING PREFIX "failed to read out thermal zone "
+ "%d\n", tz->id);
+ goto leave;
+ }
for (count = 0; count < tz->trips; count++) {
tz->ops->get_trip_type(tz, count, &trip_type);
@@ -1005,6 +1010,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
THERMAL_TRIPS_NONE);
tz->last_temperature = temp;
+
+ leave:
if (tz->passive)
thermal_zone_device_set_polling(tz, tz->passive_delay);
else if (tz->polling_delay)
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 8f24564f77b..07f22b62563 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -481,6 +481,9 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
/* tell the board code to enable the panel */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
ch = &priv->ch[k];
+ if (!ch->enabled)
+ continue;
+
board_cfg = &ch->cfg.board_cfg;
if (board_cfg->display_on)
board_cfg->display_on(board_cfg->board_data);
@@ -498,6 +501,8 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
/* clean up deferred io and ask board code to disable panel */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
ch = &priv->ch[k];
+ if (!ch->enabled)
+ continue;
/* deferred io mode:
* flush frame, and wait for frame end interrupt
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 15502d5e364..54cd9161017 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
xenfb_init_shared_page(info, fb_info);
+ ret = xenfb_connect_backend(dev, info);
+ if (ret < 0)
+ goto error;
+
ret = register_framebuffer(fb_info);
if (ret) {
fb_deferred_io_cleanup(fb_info);
@@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
}
info->fb_info = fb_info;
- ret = xenfb_connect_backend(dev, info);
- if (ret < 0)
- goto error;
-
xenfb_make_preferred_console();
return 0;
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 3fe9742c23c..2f8643efe92 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -37,7 +37,7 @@
#include <linux/uaccess.h>
#include <asm/addrspace.h>
-#include <asm/ar7/ar7.h>
+#include <asm/mach-ar7/ar7.h>
#define DRVNAME "ar7_wdt"
#define LONGNAME "TI AR7 Watchdog Timer"