summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/he.c11
-rw-r--r--drivers/base/hypervisor.c3
-rw-r--r--drivers/block/cciss.c10
-rw-r--r--drivers/block/cryptoloop.c160
-rw-r--r--drivers/block/floppy.c12
-rw-r--r--drivers/char/Kconfig22
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/agp.h2
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/efficeon-agp.c16
-rw-r--r--drivers/char/agp/frontend.c27
-rw-r--r--drivers/char/agp/generic.c39
-rw-r--r--drivers/char/agp/intel-agp.c173
-rw-r--r--drivers/char/agp/uninorth-agp.c4
-rw-r--r--drivers/char/agp/via-agp.c4
-rw-r--r--drivers/char/briq_panel.c268
-rw-r--r--drivers/char/drm/radeon_state.c9
-rw-r--r--drivers/char/hvc_console.c19
-rw-r--r--drivers/char/hvc_console.h2
-rw-r--r--drivers/char/hvc_iseries.c594
-rw-r--r--drivers/char/hvc_rtas.c2
-rw-r--r--drivers/char/hvc_vio.c7
-rw-r--r--drivers/char/hvsi.c7
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c9
-rw-r--r--drivers/char/istallion.c2
-rw-r--r--drivers/char/synclink_gt.c14
-rw-r--r--drivers/char/tpm/tpm_atmel.h4
-rw-r--r--drivers/char/viocons.c31
-rw-r--r--drivers/char/viotape.c6
-rw-r--r--drivers/char/watchdog/sbc8360.c4
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c173
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/crypto/Kconfig45
-rw-r--r--drivers/crypto/Makefile8
-rw-r--r--drivers/crypto/padlock-aes.c258
-rw-r--r--drivers/crypto/padlock-generic.c63
-rw-r--r--drivers/crypto/padlock-sha.c318
-rw-r--r--drivers/crypto/padlock.c58
-rw-r--r--drivers/crypto/padlock.h17
-rw-r--r--drivers/i2c/busses/i2c-powermac.c3
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/ide/pci/aec62xx.c12
-rw-r--r--drivers/ide/pci/alim15x3.c2
-rw-r--r--drivers/ide/pci/serverworks.c10
-rw-r--r--drivers/ide/pci/sgiioc4.c60
-rw-r--r--drivers/ide/pci/siimage.c6
-rw-r--r--drivers/ide/pci/sis5513.c2
-rw-r--r--drivers/ide/pci/via82cxxx.c3
-rw-r--r--drivers/ide/ppc/pmac.c8
-rw-r--r--drivers/infiniband/Kconfig4
-rw-r--r--drivers/infiniband/Makefile4
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/addr.c22
-rw-r--r--drivers/infiniband/core/cache.c5
-rw-r--r--drivers/infiniband/core/cm.c66
-rw-r--r--drivers/infiniband/core/cma.c405
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/iwcm.c1019
-rw-r--r--drivers/infiniband/core/iwcm.h62
-rw-r--r--drivers/infiniband/core/mad.c19
-rw-r--r--drivers/infiniband/core/mad_priv.h1
-rw-r--r--drivers/infiniband/core/mad_rmpp.c94
-rw-r--r--drivers/infiniband/core/sa_query.c67
-rw-r--r--drivers/infiniband/core/smi.c16
-rw-r--r--drivers/infiniband/core/sysfs.c13
-rw-r--r--drivers/infiniband/core/ucm.c9
-rw-r--r--drivers/infiniband/core/user_mad.c7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c64
-rw-r--r--drivers/infiniband/core/verbs.c21
-rw-r--r--drivers/infiniband/hw/amso1100/Kbuild8
-rw-r--r--drivers/infiniband/hw/amso1100/Kconfig15
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c1255
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h551
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c321
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.h108
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c144
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cm.c452
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c433
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c209
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mm.c375
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.c174
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h106
-rw-r--r--drivers/infiniband/hw/amso1100/c2_pd.c89
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c869
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h181
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c975
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c663
-rw-r--r--drivers/infiniband/hw/amso1100/c2_status.h158
-rw-r--r--drivers/infiniband/hw/amso1100/c2_user.h82
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c260
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.h63
-rw-r--r--drivers/infiniband/hw/amso1100/c2_wr.h1520
-rw-r--r--drivers/infiniband/hw/ehca/Kconfig16
-rw-r--r--drivers/infiniband/hw/ehca/Makefile16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c271
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h346
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes_pSeries.h236
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c427
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c185
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c241
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c762
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.h77
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h182
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c818
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mcast.c131
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2261
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.h140
-rw-r--r--drivers/infiniband/hw/ehca/ehca_pd.c114
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qes.h259
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c1507
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c653
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c111
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h172
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c392
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c874
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h261
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c80
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.h90
-rw-r--r--drivers/infiniband/hw/ehca/hipz_fns.h68
-rw-r--r--drivers/infiniband/hw/ehca/hipz_fns_core.h100
-rw-r--r--drivers/infiniband/hw/ehca/hipz_hw.h388
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c149
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h247
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig21
-rw-r--r--drivers/infiniband/hw/ipath/Makefile29
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h19
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c183
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c154
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c349
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c35
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c (renamed from drivers/infiniband/hw/ipath/ipath_ht400.c)53
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c (renamed from drivers/infiniband/hw/ipath/ipath_pe800.c)82
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c21
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c24
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h57
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c1179
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.h115
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c339
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c122
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c242
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c160
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c244
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c27
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c41
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c182
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c687
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h252
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_ppc64.c52
-rw-r--r--drivers/infiniband/hw/ipath/verbs_debug.h108
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c62
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c88
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c20
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_uar.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c194
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c35
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h7
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c80
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c57
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/macintosh/macio_asic.c10
-rw-r--r--drivers/macintosh/macio_sysfs.c8
-rw-r--r--drivers/macintosh/smu.c19
-rw-r--r--drivers/macintosh/therm_adt746x.c8
-rw-r--r--drivers/macintosh/therm_pm72.c14
-rw-r--r--drivers/macintosh/therm_windtunnel.c4
-rw-r--r--drivers/macintosh/via-cuda.c4
-rw-r--r--drivers/macintosh/via-pmu-backlight.c99
-rw-r--r--drivers/macintosh/via-pmu-led.c2
-rw-r--r--drivers/macintosh/via-pmu.c22
-rw-r--r--drivers/macintosh/windfarm_pm81.c4
-rw-r--r--drivers/macintosh/windfarm_pm91.c2
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c13
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c8
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c12
-rw-r--r--drivers/md/dm-crypt.c146
-rw-r--r--drivers/md/raid1.c57
-rw-r--r--drivers/media/Kconfig2
-rw-r--r--drivers/media/common/saa7146_video.c2
-rw-r--r--drivers/media/dvb/b2c2/Kconfig1
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/dvb/frontends/Kconfig60
-rw-r--r--drivers/media/dvb/frontends/Makefile2
-rw-r--r--drivers/media/dvb/pluto2/Kconfig1
-rw-r--r--drivers/media/dvb/ttpci/Kconfig5
-rw-r--r--drivers/media/dvb/ttusb-budget/Kconfig3
-rw-r--r--drivers/media/video/Kconfig8
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c1
-rw-r--r--drivers/media/video/cx88/Kconfig1
-rw-r--r--drivers/media/video/saa7134/Kconfig1
-rw-r--r--drivers/media/video/tuner-types.c10
-rw-r--r--drivers/media/video/zoran.h2
-rw-r--r--drivers/media/video/zoran_driver.c22
-rw-r--r--drivers/mmc/imxmmc.c69
-rw-r--r--drivers/mmc/mmc.c55
-rw-r--r--drivers/mmc/mmc_block.c60
-rw-r--r--drivers/mtd/Kconfig8
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c87
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c112
-rw-r--r--drivers/mtd/chips/jedec_probe.c14
-rw-r--r--drivers/mtd/devices/block2mtd.c93
-rw-r--r--drivers/mtd/devices/m25p80.c12
-rw-r--r--drivers/mtd/devices/pmc551.c1158
-rw-r--r--drivers/mtd/maps/Kconfig20
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/amd76xrom.c5
-rw-r--r--drivers/mtd/maps/arctic-mtd.c14
-rw-r--r--drivers/mtd/maps/beech-mtd.c14
-rw-r--r--drivers/mtd/maps/cstm_mips_ixx.c18
-rw-r--r--drivers/mtd/maps/ebony.c4
-rw-r--r--drivers/mtd/maps/fortunet.c3
-rw-r--r--drivers/mtd/maps/ichxrom.c3
-rw-r--r--drivers/mtd/maps/iq80310.c118
-rw-r--r--drivers/mtd/maps/ixp4xx.c2
-rw-r--r--drivers/mtd/maps/l440gx.c12
-rw-r--r--drivers/mtd/maps/lasat.c2
-rw-r--r--drivers/mtd/maps/nettel.c34
-rw-r--r--drivers/mtd/maps/ocotea.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap.c33
-rw-r--r--drivers/mtd/maps/redwood.c11
-rw-r--r--drivers/mtd/maps/sbc8240.c11
-rw-r--r--drivers/mtd/maps/scx200_docflash.c9
-rw-r--r--drivers/mtd/maps/walnut.c4
-rw-r--r--drivers/mtd/mtdchar.c9
-rw-r--r--drivers/mtd/mtdcore.c10
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/au1550nd.c11
-rw-r--r--drivers/mtd/nand/edb7312.c3
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/mtd/nand/ndfc.c2
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c7
-rw-r--r--drivers/mtd/nand/sharpsl.c7
-rw-r--r--drivers/mtd/ssfdc.c468
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bmac.c13
-rw-r--r--drivers/net/dm9000.c4
-rw-r--r--drivers/net/e100.c6
-rw-r--r--drivers/net/e1000/e1000_main.c8
-rw-r--r--drivers/net/ibmveth.c3
-rw-r--r--drivers/net/ibmveth.h27
-rw-r--r--drivers/net/lp486e.c6
-rw-r--r--drivers/net/mace.c2
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/ppp_mppe.c68
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/sunlance.c27
-rw-r--r--drivers/net/wireless/airo.c22
-rw-r--r--drivers/net/wireless/strip.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c61
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c43
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h11
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c32
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/s390/Kconfig30
-rw-r--r--drivers/s390/block/dasd.c200
-rw-r--r--drivers/s390/block/dasd_devmap.c82
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c10
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monwriter.c292
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/vmcp.h2
-rw-r--r--drivers/s390/cio/ccwgroup.c14
-rw-r--r--drivers/s390/cio/chsc.c13
-rw-r--r--drivers/s390/cio/cio.c95
-rw-r--r--drivers/s390/cio/css.c203
-rw-r--r--drivers/s390/cio/device.c128
-rw-r--r--drivers/s390/cio/device_fsm.c60
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c108
-rw-r--r--drivers/s390/cio/qdio.c4
-rw-r--r--drivers/s390/cio/qdio.h16
-rw-r--r--drivers/s390/crypto/Makefile15
-rw-r--r--drivers/s390/crypto/ap_bus.c1221
-rw-r--r--drivers/s390/crypto/ap_bus.h158
-rw-r--r--drivers/s390/crypto/z90common.h166
-rw-r--r--drivers/s390/crypto/z90crypt.h71
-rw-r--r--drivers/s390/crypto/z90hardware.c2531
-rw-r--r--drivers/s390/crypto/z90main.c3379
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1091
-rw-r--r--drivers/s390/crypto/zcrypt_api.h141
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h350
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c435
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h126
-rw-r--r--drivers/s390/crypto/zcrypt_error.h133
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c100
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c418
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h117
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c630
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h176
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c951
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h79
-rw-r--r--drivers/s390/net/Kconfig9
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/ctcmain.c3
-rw-r--r--drivers/s390/net/iucv.c4
-rw-r--r--drivers/s390/net/lcs.c13
-rw-r--r--drivers/s390/net/netiucv.c80
-rw-r--r--drivers/s390/net/qeth.h73
-rw-r--r--drivers/s390/net/qeth_eddp.c5
-rw-r--r--drivers/s390/net/qeth_main.c517
-rw-r--r--drivers/s390/net/qeth_proc.c23
-rw-r--r--drivers/s390/net/qeth_sys.c64
-rw-r--r--drivers/s390/net/qeth_tso.h2
-rw-r--r--drivers/s390/s390mach.c17
-rw-r--r--drivers/s390/scsi/zfcp_def.h8
-rw-r--r--drivers/s390/sysinfo.c455
-rw-r--r--drivers/sbus/char/openprom.c13
-rw-r--r--drivers/scsi/ata_piix.c36
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c11
-rw-r--r--drivers/scsi/iscsi_tcp.c67
-rw-r--r--drivers/scsi/iscsi_tcp.h6
-rw-r--r--drivers/scsi/libata-core.c13
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/mesh.c5
-rw-r--r--drivers/scsi/sata_mv.c3
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_via.c1
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/serial/8250_pci.c31
-rw-r--r--drivers/serial/pmac_zilog.c9
-rw-r--r--drivers/serial/s3c2410.c2
-rw-r--r--drivers/serial/serial_core.c3
-rw-r--r--drivers/serial/sh-sci.c4
-rw-r--r--drivers/usb/gadget/ether.c45
-rw-r--r--drivers/usb/host/uhci-q.c4
-rw-r--r--drivers/usb/input/hid-core.c149
-rw-r--r--drivers/usb/input/usbtouchscreen.c2
-rw-r--r--drivers/usb/input/yealink.c12
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c2
-rw-r--r--drivers/usb/net/pegasus.h3
-rw-r--r--drivers/usb/net/rtl8150.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.h5
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h24
-rw-r--r--drivers/video/S3triofb.c12
-rw-r--r--drivers/video/aty/aty128fb.c18
-rw-r--r--drivers/video/aty/atyfb_base.c18
-rw-r--r--drivers/video/aty/radeon_backlight.c4
-rw-r--r--drivers/video/aty/radeon_base.c8
-rw-r--r--drivers/video/aty/radeon_monitor.c12
-rw-r--r--drivers/video/aty/radeon_pm.c4
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/nvidia/nv_backlight.c18
-rw-r--r--drivers/video/nvidia/nv_of.c12
-rw-r--r--drivers/video/offb.c22
-rw-r--r--drivers/video/riva/fbdev.c23
376 files changed, 37572 insertions, 11805 deletions
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index d369130f423..ffcb9fd31c3 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1928,7 +1928,9 @@ he_service_rbrq(struct he_dev *he_dev, int group)
#ifdef notdef
ATM_SKB(skb)->vcc = vcc;
#endif
+ spin_unlock(&he_dev->global_lock);
vcc->push(vcc, skb);
+ spin_lock(&he_dev->global_lock);
atomic_inc(&vcc->stats->rx);
@@ -2282,6 +2284,8 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
if (new_tail == he_dev->tpdrq_head) {
+ int slot;
+
hprintk("tpdrq full (cid 0x%x)\n", cid);
/*
* FIXME
@@ -2289,6 +2293,13 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
* after service_tbrq, service the backlog
* for now, we just drop the pdu
*/
+ for (slot = 0; slot < TPD_MAXIOV; ++slot) {
+ if (tpd->iovec[slot].addr)
+ pci_unmap_single(he_dev->pci_dev,
+ tpd->iovec[slot].addr,
+ tpd->iovec[slot].len & TPD_LEN_MASK,
+ PCI_DMA_TODEVICE);
+ }
if (tpd->skb) {
if (tpd->vcc->pop)
tpd->vcc->pop(tpd->vcc, tpd->skb);
diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
index 0c85e9d6a44..7080b413ddc 100644
--- a/drivers/base/hypervisor.c
+++ b/drivers/base/hypervisor.c
@@ -1,8 +1,9 @@
/*
* hypervisor.c - /sys/hypervisor subsystem.
*
- * This file is released under the GPLv2
+ * Copyright (C) IBM Corp. 2006
*
+ * This file is released under the GPLv2
*/
#include <linux/kobject.h>
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 7b0eca703a6..2cd3391ff87 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -48,14 +48,14 @@
#include <linux/completion.h>
#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
+#define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
+MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
- " SA6i P600 P800 P400 P400i E200 E200i");
+ " SA6i P600 P800 P400 P400i E200 E200i E500");
MODULE_LICENSE("GPL");
#include "cciss_cmd.h"
@@ -82,6 +82,7 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
{0,}
};
@@ -110,6 +111,7 @@ static struct board_type products[] = {
{0x3213103C, "Smart Array E200i", &SA5_access},
{0x3214103C, "Smart Array E200i", &SA5_access},
{0x3215103C, "Smart Array E200i", &SA5_access},
+ {0x3233103C, "Smart Array E500", &SA5_access},
};
/* How long to wait (in milliseconds) for board to go into simple mode */
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 3d4261c39f1..40535036e89 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -40,11 +40,13 @@ static int
cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
{
int err = -EINVAL;
+ int cipher_len;
+ int mode_len;
char cms[LO_NAME_SIZE]; /* cipher-mode string */
char *cipher;
char *mode;
char *cmsp = cms; /* c-m string pointer */
- struct crypto_tfm *tfm = NULL;
+ struct crypto_blkcipher *tfm;
/* encryption breaks for non sector aligned offsets */
@@ -53,20 +55,39 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
cms[LO_NAME_SIZE - 1] = 0;
- cipher = strsep(&cmsp, "-");
- mode = strsep(&cmsp, "-");
-
- if (mode == NULL || strcmp(mode, "cbc") == 0)
- tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC |
- CRYPTO_TFM_REQ_MAY_SLEEP);
- else if (strcmp(mode, "ecb") == 0)
- tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB |
- CRYPTO_TFM_REQ_MAY_SLEEP);
- if (tfm == NULL)
+
+ cipher = cmsp;
+ cipher_len = strcspn(cmsp, "-");
+
+ mode = cmsp + cipher_len;
+ mode_len = 0;
+ if (*mode) {
+ mode++;
+ mode_len = strcspn(mode, "-");
+ }
+
+ if (!mode_len) {
+ mode = "cbc";
+ mode_len = 3;
+ }
+
+ if (cipher_len + mode_len + 3 > LO_NAME_SIZE)
return -EINVAL;
- err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
+ memmove(cms, mode, mode_len);
+ cmsp = cms + mode_len;
+ *cmsp++ = '(';
+ memcpy(cmsp, info->lo_crypt_name, cipher_len);
+ cmsp += cipher_len;
+ *cmsp++ = ')';
+ *cmsp = 0;
+
+ tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
if (err != 0)
goto out_free_tfm;
@@ -75,99 +96,49 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
return 0;
out_free_tfm:
- crypto_free_tfm(tfm);
+ crypto_free_blkcipher(tfm);
out:
return err;
}
-typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm,
+typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc,
struct scatterlist *sg_out,
struct scatterlist *sg_in,
unsigned int nsg);
-
-static int
-cryptoloop_transfer_ecb(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t IV)
-{
- struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
- struct scatterlist sg_out = { NULL, };
- struct scatterlist sg_in = { NULL, };
-
- encdec_ecb_t encdecfunc;
- struct page *in_page, *out_page;
- unsigned in_offs, out_offs;
-
- if (cmd == READ) {
- in_page = raw_page;
- in_offs = raw_off;
- out_page = loop_page;
- out_offs = loop_off;
- encdecfunc = tfm->crt_u.cipher.cit_decrypt;
- } else {
- in_page = loop_page;
- in_offs = loop_off;
- out_page = raw_page;
- out_offs = raw_off;
- encdecfunc = tfm->crt_u.cipher.cit_encrypt;
- }
-
- while (size > 0) {
- const int sz = min(size, LOOP_IV_SECTOR_SIZE);
-
- sg_in.page = in_page;
- sg_in.offset = in_offs;
- sg_in.length = sz;
-
- sg_out.page = out_page;
- sg_out.offset = out_offs;
- sg_out.length = sz;
-
- encdecfunc(tfm, &sg_out, &sg_in, sz);
-
- size -= sz;
- in_offs += sz;
- out_offs += sz;
- }
-
- return 0;
-}
-
-typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm,
- struct scatterlist *sg_out,
- struct scatterlist *sg_in,
- unsigned int nsg, u8 *iv);
-
static int
-cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t IV)
+cryptoloop_transfer(struct loop_device *lo, int cmd,
+ struct page *raw_page, unsigned raw_off,
+ struct page *loop_page, unsigned loop_off,
+ int size, sector_t IV)
{
- struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
+ struct crypto_blkcipher *tfm = lo->key_data;
+ struct blkcipher_desc desc = {
+ .tfm = tfm,
+ .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
+ };
struct scatterlist sg_out = { NULL, };
struct scatterlist sg_in = { NULL, };
encdec_cbc_t encdecfunc;
struct page *in_page, *out_page;
unsigned in_offs, out_offs;
+ int err;
if (cmd == READ) {
in_page = raw_page;
in_offs = raw_off;
out_page = loop_page;
out_offs = loop_off;
- encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv;
+ encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
} else {
in_page = loop_page;
in_offs = loop_off;
out_page = raw_page;
out_offs = raw_off;
- encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv;
+ encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
}
while (size > 0) {
@@ -183,7 +154,10 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
sg_out.offset = out_offs;
sg_out.length = sz;
- encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv);
+ desc.info = iv;
+ err = encdecfunc(&desc, &sg_out, &sg_in, sz);
+ if (err)
+ return err;
IV++;
size -= sz;
@@ -195,32 +169,6 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
}
static int
-cryptoloop_transfer(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t IV)
-{
- struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
- if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB)
- {
- lo->transfer = cryptoloop_transfer_ecb;
- return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off,
- loop_page, loop_off, size, IV);
- }
- if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC)
- {
- lo->transfer = cryptoloop_transfer_cbc;
- return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off,
- loop_page, loop_off, size, IV);
- }
-
- /* This is not supposed to happen */
-
- printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n");
- return -EINVAL;
-}
-
-static int
cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
{
return -EINVAL;
@@ -229,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
static int
cryptoloop_release(struct loop_device *lo)
{
- struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
+ struct crypto_blkcipher *tfm = lo->key_data;
if (tfm != NULL) {
- crypto_free_tfm(tfm);
+ crypto_free_blkcipher(tfm);
lo->key_data = NULL;
return 0;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 5109fa37c66..ad1d7065a1b 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4177,6 +4177,11 @@ static int __init floppy_init(void)
int i, unit, drive;
int err, dr;
+#if defined(CONFIG_PPC_MERGE)
+ if (check_legacy_ioport(FDC1))
+ return -ENODEV;
+#endif
+
raw_cmd = NULL;
for (dr = 0; dr < N_DRIVE; dr++) {
@@ -4234,13 +4239,6 @@ static int __init floppy_init(void)
}
use_virtual_dma = can_use_virtual_dma & 1;
-#if defined(CONFIG_PPC_MERGE)
- if (check_legacy_ioport(FDC1)) {
- del_timer(&fd_timeout);
- err = -ENODEV;
- goto out_unreg_region;
- }
-#endif
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
del_timer(&fd_timeout);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index c40e487d9f5..52ea94b891f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -495,6 +495,21 @@ config LEGACY_PTY_COUNT
When not in use, each legacy PTY occupies 12 bytes on 32-bit
architectures and 24 bytes on 64-bit architectures.
+config BRIQ_PANEL
+ tristate 'Total Impact briQ front panel driver'
+ depends on PPC_CHRP
+ ---help---
+ The briQ is a small footprint CHRP computer with a frontpanel VFD, a
+ tristate led and two switches. It is the size of a CDROM drive.
+
+ If you have such one and want anything showing on the VFD then you
+ must answer Y here.
+
+ To compile this driver as a module, choose M here: the
+ module will be called briq_panel.
+
+ It's safe to say N here.
+
config PRINTER
tristate "Parallel printer support"
depends on PARPORT
@@ -596,6 +611,13 @@ config HVC_CONSOLE
console. This driver allows each pSeries partition to have a console
which is accessed via the HMC.
+config HVC_ISERIES
+ bool "iSeries Hypervisor Virtual Console support"
+ depends on PPC_ISERIES && !VIOCONS
+ select HVC_DRIVER
+ help
+ iSeries machines support a hypervisor virtual console.
+
config HVC_RTAS
bool "IBM RTAS Console support"
depends on PPC_RTAS
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 6e0f4469d8b..8c6dfc62152 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
+obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_VIOCONS) += viocons.o
obj-$(CONFIG_VIOTAPE) += viotape.o
obj-$(CONFIG_HVCS) += hvcs.o
obj-$(CONFIG_SGI_MBCS) += mbcs.o
+obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
obj-$(CONFIG_PRINTER) += lp.o
obj-$(CONFIG_TIPAR) += tipar.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 3c623b67ea1..8b3317fd46c 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -117,7 +117,7 @@ struct agp_bridge_driver {
};
struct agp_bridge_data {
- struct agp_version *version;
+ const struct agp_version *version;
struct agp_bridge_driver *driver;
struct vm_operations_struct *vm_ops;
void *previous_size;
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 509adc40325..d59e037ddd1 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -44,7 +44,7 @@
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
#define AGPGART_VERSION_MINOR 101
-static struct agp_version agp_current_version =
+static const struct agp_version agp_current_version =
{
.major = AGPGART_VERSION_MAJOR,
.minor = AGPGART_VERSION_MINOR,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index b788b0a3bbf..30f730ff81c 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -337,13 +337,6 @@ static struct agp_bridge_driver efficeon_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
-
-static int agp_efficeon_resume(struct pci_dev *pdev)
-{
- printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
- return efficeon_configure();
-}
-
static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -414,11 +407,18 @@ static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
+#ifdef CONFIG_PM
static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
{
return 0;
}
+static int agp_efficeon_resume(struct pci_dev *pdev)
+{
+ printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
+ return efficeon_configure();
+}
+#endif
static struct pci_device_id agp_efficeon_pci_table[] = {
{
@@ -439,8 +439,10 @@ static struct pci_driver agp_efficeon_pci_driver = {
.id_table = agp_efficeon_pci_table,
.probe = agp_efficeon_probe,
.remove = agp_efficeon_remove,
+#ifdef CONFIG_PM
.suspend = agp_efficeon_suspend,
.resume = agp_efficeon_resume,
+#endif
};
static int __init agp_efficeon_init(void)
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index d9c5a9142ad..0f2ed2aa2d8 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -151,35 +151,12 @@ static void agp_add_seg_to_client(struct agp_client *client,
client->segments = seg;
}
-/* Originally taken from linux/mm/mmap.c from the array
- * protection_map.
- * The original really should be exported to modules, or
- * some routine which does the conversion for you
- */
-
-static const pgprot_t my_protect_map[16] =
-{
- __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
- __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
-};
-
static pgprot_t agp_convert_mmap_flags(int prot)
{
-#define _trans(x,bit1,bit2) \
-((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
-
unsigned long prot_bits;
- pgprot_t temp;
-
- prot_bits = _trans(prot, PROT_READ, VM_READ) |
- _trans(prot, PROT_WRITE, VM_WRITE) |
- _trans(prot, PROT_EXEC, VM_EXEC);
-
- prot_bits |= VM_SHARED;
- temp = my_protect_map[prot_bits & 0x0000000f];
-
- return temp;
+ prot_bits = calc_vm_prot_bits(prot) | VM_SHARED;
+ return vm_get_page_prot(prot_bits);
}
static int agp_create_segment(struct agp_client *client, struct agp_region *region)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index cc5ea347a8a..0dcdb363923 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -568,25 +568,34 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
goto done;
+ } else if (*requested_mode & AGPSTAT3_4X) {
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ goto done;
+
} else {
/*
- * If we didn't specify AGPx8, we can only do x4.
- * If the hardware can't do x4, we're up shit creek, and never
- * should have got this far.
+ * If we didn't specify an AGP mode, we see if both
+ * the graphics card, and the bridge can do x8, and use if so.
+ * If not, we fall back to x4 mode.
*/
- *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
- if ((*bridge_agpstat & AGPSTAT3_4X) && (*vga_agpstat & AGPSTAT3_4X))
- *bridge_agpstat |= AGPSTAT3_4X;
- else {
- printk(KERN_INFO PFX "Badness. Don't know which AGP mode to set. "
- "[bridge_agpstat:%x vga_agpstat:%x fell back to:- bridge_agpstat:%x vga_agpstat:%x]\n",
- origbridge, origvga, *bridge_agpstat, *vga_agpstat);
- if (!(*bridge_agpstat & AGPSTAT3_4X))
- printk(KERN_INFO PFX "Bridge couldn't do AGP x4.\n");
- if (!(*vga_agpstat & AGPSTAT3_4X))
- printk(KERN_INFO PFX "Graphic card couldn't do AGP x4.\n");
- return;
+ if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
+ printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode supported by bridge & card (x8).\n");
+ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ } else {
+ printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
+ if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+ printk("bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", *bridge_agpstat, origbridge);
+ *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *bridge_agpstat |= AGPSTAT3_4X;
+ }
+ if (!(*vga_agpstat & AGPSTAT3_8X)) {
+ printk("graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", *vga_agpstat, origvga);
+ *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
+ *vga_agpstat |= AGPSTAT3_4X;
+ }
}
}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 61ac3809f99..d1ede7db5a1 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2,14 +2,6 @@
* Intel AGPGART routines.
*/
-/*
- * Intel(R) 855GM/852GM and 865G support added by David Dawes
- * <dawes@tungstengraphics.com>.
- *
- * Intel(R) 915G/915GM support added by Alan Hourihane
- * <alanh@tungstengraphics.com>.
- */
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
@@ -17,6 +9,21 @@
#include <linux/agp_backend.h>
#include "agp.h"
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
+#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
+#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82965G_1_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
+#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
+#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
+
+#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB)
+
+
/* Intel 815 register */
#define INTEL_815_APCONT 0x51
#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
@@ -40,6 +47,8 @@
#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+/* Intel 965G registers */
+#define I965_MSAC 0x62
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
@@ -354,6 +363,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
/* The 64M mode still requires a 128k gatt */
{64, 16384, 5},
{256, 65536, 6},
+ {512, 131072, 7},
};
static struct _intel_i830_private {
@@ -377,7 +387,11 @@ static void intel_i830_init_gtt_entries(void)
/* We obtain the size of the GTT, which is also stored (for some
* reason) at the top of stolen memory. Then we add 4KB to that
* for the video BIOS popup, which is also stored in there. */
- size = agp_bridge->driver->fetch_size() + 4;
+
+ if (IS_I965)
+ size = 512 + 4;
+ else
+ size = agp_bridge->driver->fetch_size() + 4;
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
@@ -423,7 +437,7 @@ static void intel_i830_init_gtt_entries(void)
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || IS_I965 )
gtt_entries = MB(48) - KB(size);
else
gtt_entries = 0;
@@ -433,7 +447,7 @@ static void intel_i830_init_gtt_entries(void)
if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || IS_I965)
gtt_entries = MB(64) - KB(size);
else
gtt_entries = 0;
@@ -791,6 +805,77 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
return 0;
}
+static int intel_i965_fetch_size(void)
+{
+ struct aper_size_info_fixed *values;
+ u32 offset = 0;
+ u8 temp;
+
+#define I965_512MB_ADDRESS_MASK (3<<1)
+
+ values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
+
+ pci_read_config_byte(intel_i830_private.i830_dev, I965_MSAC, &temp);
+ temp &= I965_512MB_ADDRESS_MASK;
+ switch (temp) {
+ case 0x00:
+ offset = 0; /* 128MB */
+ break;
+ case 0x06:
+ offset = 3; /* 512MB */
+ break;
+ default:
+ case 0x02:
+ offset = 2; /* 256MB */
+ break;
+ }
+
+ agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
+
+ return values[offset].size;
+}
+
+/* The intel i965 automatically initializes the agp aperture during POST.
++ * Use the memory already set aside for in the GTT.
++ */
+static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
+{
+ int page_order;
+ struct aper_size_info_fixed *size;
+ int num_entries;
+ u32 temp;
+
+ size = agp_bridge->current_size;
+ page_order = size->page_order;
+ num_entries = size->num_entries;
+ agp_bridge->gatt_table_real = NULL;
+
+ pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
+
+ temp &= 0xfff00000;
+ intel_i830_private.gtt = ioremap((temp + (512 * 1024)) , 512 * 1024);
+
+ if (!intel_i830_private.gtt)
+ return -ENOMEM;
+
+
+ intel_i830_private.registers = ioremap(temp,128 * 4096);
+ if (!intel_i830_private.registers)
+ return -ENOMEM;
+
+ temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_i830_init_gtt_entries();
+
+ agp_bridge->gatt_table = NULL;
+
+ agp_bridge->gatt_bus_addr = temp;
+
+ return 0;
+}
+
static int intel_fetch_size(void)
{
@@ -1307,7 +1392,7 @@ static struct agp_bridge_driver intel_830_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 3,
+ .num_aperture_sizes = 4,
.needs_scratch_page = TRUE,
.configure = intel_i830_configure,
.fetch_size = intel_i830_fetch_size,
@@ -1469,7 +1554,7 @@ static struct agp_bridge_driver intel_915_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_i830_sizes,
.size_type = FIXED_APER_SIZE,
- .num_aperture_sizes = 3,
+ .num_aperture_sizes = 4,
.needs_scratch_page = TRUE,
.configure = intel_i915_configure,
.fetch_size = intel_i915_fetch_size,
@@ -1489,6 +1574,29 @@ static struct agp_bridge_driver intel_915_driver = {
.agp_destroy_page = agp_generic_destroy_page,
};
+static struct agp_bridge_driver intel_i965_driver = {
+ .owner = THIS_MODULE,
+ .aperture_sizes = intel_i830_sizes,
+ .size_type = FIXED_APER_SIZE,
+ .num_aperture_sizes = 4,
+ .needs_scratch_page = TRUE,
+ .configure = intel_i915_configure,
+ .fetch_size = intel_i965_fetch_size,
+ .cleanup = intel_i915_cleanup,
+ .tlb_flush = intel_i810_tlbflush,
+ .mask_memory = intel_i810_mask_memory,
+ .masks = intel_i810_masks,
+ .agp_enable = intel_i810_agp_enable,
+ .cache_flush = global_cache_flush,
+ .create_gatt_table = intel_i965_create_gatt_table,
+ .free_gatt_table = intel_i830_free_gatt_table,
+ .insert_memory = intel_i915_insert_entries,
+ .remove_memory = intel_i915_remove_entries,
+ .alloc_by_type = intel_i830_alloc_by_type,
+ .free_by_type = intel_i810_free_by_type,
+ .agp_alloc_page = agp_generic_alloc_page,
+ .agp_destroy_page = agp_generic_destroy_page,
+};
static struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
@@ -1684,6 +1792,35 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
bridge->driver = &intel_845_driver;
name = "945GM";
break;
+ case PCI_DEVICE_ID_INTEL_82946GZ_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82946GZ_IG))
+ bridge->driver = &intel_i965_driver;
+ else
+ bridge->driver = &intel_845_driver;
+ name = "946GZ";
+ break;
+ case PCI_DEVICE_ID_INTEL_82965G_1_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82965G_1_IG))
+ bridge->driver = &intel_i965_driver;
+ else
+ bridge->driver = &intel_845_driver;
+ name = "965G";
+ break;
+ case PCI_DEVICE_ID_INTEL_82965Q_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82965Q_IG))
+ bridge->driver = &intel_i965_driver;
+ else
+ bridge->driver = &intel_845_driver;
+ name = "965Q";
+ break;
+ case PCI_DEVICE_ID_INTEL_82965G_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82965G_IG))
+ bridge->driver = &intel_i965_driver;
+ else
+ bridge->driver = &intel_845_driver;
+ name = "965G";
+ break;
+
case PCI_DEVICE_ID_INTEL_7505_0:
bridge->driver = &intel_7505_driver;
name = "E7505";
@@ -1766,6 +1903,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
+#ifdef CONFIG_PM
static int agp_intel_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
@@ -1786,9 +1924,12 @@ static int agp_intel_resume(struct pci_dev *pdev)
intel_i830_configure();
else if (bridge->driver == &intel_810_driver)
intel_i810_configure();
+ else if (bridge->driver == &intel_i965_driver)
+ intel_i915_configure();
return 0;
}
+#endif
static struct pci_device_id agp_intel_pci_table[] = {
#define ID(x) \
@@ -1825,6 +1966,10 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965G_HB),
{ }
};
@@ -1835,7 +1980,9 @@ static struct pci_driver agp_intel_pci_driver = {
.id_table = agp_intel_pci_table,
.probe = agp_intel_probe,
.remove = __devexit_p(agp_intel_remove),
+#ifdef CONFIG_PM
.resume = agp_intel_resume,
+#endif
};
static int __init agp_intel_init(void)
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 1de1b12043b..91b71e750ee 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -601,8 +601,8 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
uninorth_node = of_find_node_by_name(NULL, "u3");
}
if (uninorth_node) {
- int *revprop = (int *)
- get_property(uninorth_node, "device-rev", NULL);
+ const int *revprop = get_property(uninorth_node,
+ "device-rev", NULL);
if (revprop != NULL)
uninorth_rev = *revprop & 0x3f;
of_node_put(uninorth_node);
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index b8ec25d1747..c149ac9ce9a 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -9,7 +9,7 @@
#include <linux/agp_backend.h>
#include "agp.h"
-static struct pci_device_id agp_via_pci_table[];
+static const struct pci_device_id agp_via_pci_table[];
#define VIA_GARTCTRL 0x80
#define VIA_APSIZE 0x84
@@ -485,7 +485,7 @@ static int agp_via_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
/* must be the same order as name table above */
-static struct pci_device_id agp_via_pci_table[] = {
+static const struct pci_device_id agp_via_pci_table[] = {
#define ID(x) \
{ \
.class = (PCI_CLASS_BRIDGE_HOST << 8), \
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
new file mode 100644
index 00000000000..a0e5eac5f33
--- /dev/null
+++ b/drivers/char/briq_panel.c
@@ -0,0 +1,268 @@
+/*
+ * Drivers for the Total Impact PPC based computer "BRIQ"
+ * by Dr. Karsten Jeppesen
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/timer.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+
+#define BRIQ_PANEL_MINOR 156
+#define BRIQ_PANEL_VFD_IOPORT 0x0390
+#define BRIQ_PANEL_LED_IOPORT 0x0398
+#define BRIQ_PANEL_VER "1.1 (04/20/2002)"
+#define BRIQ_PANEL_MSG0 "Loading Linux"
+
+static int vfd_is_open;
+static unsigned char vfd[40];
+static int vfd_cursor;
+static unsigned char ledpb, led;
+
+static void update_vfd(void)
+{
+ int i;
+
+ /* cursor home */
+ outb(0x02, BRIQ_PANEL_VFD_IOPORT);
+ for (i=0; i<20; i++)
+ outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1);
+
+ /* cursor to next line */
+ outb(0xc0, BRIQ_PANEL_VFD_IOPORT);
+ for (i=20; i<40; i++)
+ outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1);
+
+}
+
+static void set_led(char state)
+{
+ if (state == 'R')
+ led = 0x01;
+ else if (state == 'G')
+ led = 0x02;
+ else if (state == 'Y')
+ led = 0x03;
+ else if (state == 'X')
+ led = 0x00;
+ outb(led, BRIQ_PANEL_LED_IOPORT);
+}
+
+static int briq_panel_open(struct inode *ino, struct file *filep)
+{
+ /* enforce single access */
+ if (vfd_is_open)
+ return -EBUSY;
+ vfd_is_open = 1;
+
+ return 0;
+}
+
+static int briq_panel_release(struct inode *ino, struct file *filep)
+{
+ if (!vfd_is_open)
+ return -ENODEV;
+
+ vfd_is_open = 0;
+
+ return 0;
+}
+
+static ssize_t briq_panel_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned short c;
+ unsigned char cp;
+
+#if 0 /* Can't seek (pread) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+#endif
+
+ if (!vfd_is_open)
+ return -ENODEV;
+
+ c = (inb(BRIQ_PANEL_LED_IOPORT) & 0x000c) | (ledpb & 0x0003);
+ set_led(' ');
+ /* upper button released */
+ if ((!(ledpb & 0x0004)) && (c & 0x0004)) {
+ cp = ' ';
+ ledpb = c;
+ if (copy_to_user(buf, &cp, 1))
+ return -EFAULT;
+ return 1;
+ }
+ /* lower button released */
+ else if ((!(ledpb & 0x0008)) && (c & 0x0008)) {
+ cp = '\r';
+ ledpb = c;
+ if (copy_to_user(buf, &cp, 1))
+ return -EFAULT;
+ return 1;
+ } else {
+ ledpb = c;
+ return 0;
+ }
+}
+
+static void scroll_vfd( void )
+{
+ int i;
+
+ for (i=0; i<20; i++) {
+ vfd[i] = vfd[i+20];
+ vfd[i+20] = ' ';
+ }
+ vfd_cursor = 20;
+}
+
+static ssize_t briq_panel_write(struct file *file, const char *buf, size_t len,
+ loff_t *ppos)
+{
+ size_t indx = len;
+ int i, esc = 0;
+
+#if 0 /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+#endif
+
+ if (!vfd_is_open)
+ return -EBUSY;
+
+ for (;;) {
+ if (!indx)
+ break;
+ if (esc) {
+ set_led(*buf);
+ esc = 0;
+ } else if (*buf == 27) {
+ esc = 1;
+ } else if (*buf == 12) {
+ /* do a form feed */
+ for (i=0; i<40; i++)
+ vfd[i] = ' ';
+ vfd_cursor = 0;
+ } else if (*buf == 10) {
+ if (vfd_cursor < 20)
+ vfd_cursor = 20;
+ else if (vfd_cursor < 40)
+ vfd_cursor = 40;
+ else if (vfd_cursor < 60)
+ vfd_cursor = 60;
+ if (vfd_cursor > 59)
+ scroll_vfd();
+ } else {
+ /* just a character */
+ if (vfd_cursor > 39)
+ scroll_vfd();
+ vfd[vfd_cursor++] = *buf;
+ }
+ indx--;
+ buf++;
+ }
+ update_vfd();
+
+ return len;
+}
+
+static struct file_operations briq_panel_fops = {
+ .owner = THIS_MODULE,
+ .read = briq_panel_read,
+ .write = briq_panel_write,
+ .open = briq_panel_open,
+ .release = briq_panel_release,
+};
+
+static struct miscdevice briq_panel_miscdev = {
+ BRIQ_PANEL_MINOR,
+ "briq_panel",
+ &briq_panel_fops
+};
+
+static int __init briq_panel_init(void)
+{
+ struct device_node *root = find_path_device("/");
+ char *machine;
+ int i;
+
+ machine = get_property(root, "model", NULL);
+ if (!machine || strncmp(machine, "TotalImpact,BRIQ-1", 18) != 0)
+ return -ENODEV;
+
+ printk(KERN_INFO
+ "briq_panel: v%s Dr. Karsten Jeppesen (kj@totalimpact.com)\n",
+ BRIQ_PANEL_VER);
+
+ if (!request_region(BRIQ_PANEL_VFD_IOPORT, 4, "BRIQ Front Panel"))
+ return -EBUSY;
+
+ if (!request_region(BRIQ_PANEL_LED_IOPORT, 2, "BRIQ Front Panel")) {
+ release_region(BRIQ_PANEL_VFD_IOPORT, 4);
+ return -EBUSY;
+ }
+ ledpb = inb(BRIQ_PANEL_LED_IOPORT) & 0x000c;
+
+ if (misc_register(&briq_panel_miscdev) < 0) {
+ release_region(BRIQ_PANEL_VFD_IOPORT, 4);
+ release_region(BRIQ_PANEL_LED_IOPORT, 2);
+ return -EBUSY;
+ }
+
+ outb(0x38, BRIQ_PANEL_VFD_IOPORT); /* Function set */
+ outb(0x01, BRIQ_PANEL_VFD_IOPORT); /* Clear display */
+ outb(0x0c, BRIQ_PANEL_VFD_IOPORT); /* Display on */
+ outb(0x06, BRIQ_PANEL_VFD_IOPORT); /* Entry normal */
+ for (i=0; i<40; i++)
+ vfd[i]=' ';
+#ifndef MODULE
+ vfd[0] = 'L';
+ vfd[1] = 'o';
+ vfd[2] = 'a';
+ vfd[3] = 'd';
+ vfd[4] = 'i';
+ vfd[5] = 'n';
+ vfd[6] = 'g';
+ vfd[7] = ' ';
+ vfd[8] = '.';
+ vfd[9] = '.';
+ vfd[10] = '.';
+#endif /* !MODULE */
+
+ update_vfd();
+
+ return 0;
+}
+
+static void __exit briq_panel_exit(void)
+{
+ misc_deregister(&briq_panel_miscdev);
+ release_region(BRIQ_PANEL_VFD_IOPORT, 4);
+ release_region(BRIQ_PANEL_LED_IOPORT, 2);
+}
+
+module_init(briq_panel_init);
+module_exit(briq_panel_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Karsten Jeppesen <karsten@jeppesens.com>");
+MODULE_DESCRIPTION("Driver for the Total Impact briQ front panel");
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 5bb2234a909..39a7f685e3f 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -175,6 +175,14 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
}
break;
+ case R200_EMIT_VAP_CTL:{
+ RING_LOCALS;
+ BEGIN_RING(2);
+ OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+ ADVANCE_RING();
+ }
+ break;
+
case RADEON_EMIT_RB3D_COLORPITCH:
case RADEON_EMIT_RE_LINE_PATTERN:
case RADEON_EMIT_SE_LINE_WIDTH:
@@ -202,7 +210,6 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
case R200_EMIT_TFACTOR_0:
case R200_EMIT_VTX_FMT_0:
- case R200_EMIT_VAP_CTL:
case R200_EMIT_MATRIX_SELECT_0:
case R200_EMIT_TEX_PROC_CTL_2:
case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index ca2f538e549..a76d2c40dd5 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -80,7 +80,8 @@ struct hvc_struct {
struct tty_struct *tty;
unsigned int count;
int do_wakeup;
- char outbuf[N_OUTBUF] __ALIGNED__;
+ char *outbuf;
+ int outbuf_size;
int n_outbuf;
uint32_t vtermno;
struct hv_ops *ops;
@@ -319,10 +320,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
struct kobject *kobjp;
/* Auto increments kobject reference if found. */
- if (!(hp = hvc_get_by_index(tty->index))) {
- printk(KERN_WARNING "hvc_console: tty open failed, no vty associated with tty.\n");
+ if (!(hp = hvc_get_by_index(tty->index)))
return -ENODEV;
- }
spin_lock_irqsave(&hp->lock, flags);
/* Check and then increment for fast path open. */
@@ -505,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
if (hp->n_outbuf > 0)
hvc_push(hp);
- while (count > 0 && (rsize = N_OUTBUF - hp->n_outbuf) > 0) {
+ while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) {
if (rsize > count)
rsize = count;
memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
@@ -538,7 +537,7 @@ static int hvc_write_room(struct tty_struct *tty)
if (!hp)
return -1;
- return N_OUTBUF - hp->n_outbuf;
+ return hp->outbuf_size - hp->n_outbuf;
}
static int hvc_chars_in_buffer(struct tty_struct *tty)
@@ -668,6 +667,7 @@ int khvcd(void *unused)
do {
poll_mask = 0;
hvc_kicked = 0;
+ try_to_freeze();
wmb();
if (cpus_empty(cpus_in_xmon)) {
spin_lock(&hvc_structs_lock);
@@ -728,12 +728,13 @@ static struct kobj_type hvc_kobj_type = {
};
struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
- struct hv_ops *ops)
+ struct hv_ops *ops, int outbuf_size)
{
struct hvc_struct *hp;
int i;
- hp = kmalloc(sizeof(*hp), GFP_KERNEL);
+ hp = kmalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
+ GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
@@ -742,6 +743,8 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
hp->vtermno = vtermno;
hp->irq = irq;
hp->ops = ops;
+ hp->outbuf_size = outbuf_size;
+ hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
kobject_init(&hp->kobj);
hp->kobj.ktype = &hvc_kobj_type;
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 96b7401319c..8c59818050e 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -56,7 +56,7 @@ extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
/* register a vterm for hvc tty operation (module_init or hotplug add) */
extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq,
- struct hv_ops *ops);
+ struct hv_ops *ops, int outbuf_size);
/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */
extern int __devexit hvc_remove(struct hvc_struct *hp);
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
new file mode 100644
index 00000000000..4747729459c
--- /dev/null
+++ b/drivers/char/hvc_iseries.c
@@ -0,0 +1,594 @@
+/*
+ * iSeries vio driver interface to hvc_console.c
+ *
+ * This code is based heavily on hvc_vio.c and viocons.c
+ *
+ * Copyright (C) 2006 Stephen Rothwell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/console.h>
+
+#include <asm/hvconsole.h>
+#include <asm/vio.h>
+#include <asm/prom.h>
+#include <asm/iseries/vio.h>
+#include <asm/iseries/hv_call.h>
+#include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_lp_event.h>
+
+#include "hvc_console.h"
+
+#define VTTY_PORTS 10
+
+static DEFINE_SPINLOCK(consolelock);
+static DEFINE_SPINLOCK(consoleloglock);
+
+static const char hvc_driver_name[] = "hvc_console";
+
+#define IN_BUF_SIZE 200
+
+/*
+ * Our port information.
+ */
+static struct port_info {
+ HvLpIndex lp;
+ u64 seq; /* sequence number of last HV send */
+ u64 ack; /* last ack from HV */
+ struct hvc_struct *hp;
+ int in_start;
+ int in_end;
+ unsigned char in_buf[IN_BUF_SIZE];
+} port_info[VTTY_PORTS] = {
+ [ 0 ... VTTY_PORTS - 1 ] = {
+ .lp = HvLpIndexInvalid
+ }
+};
+
+#define viochar_is_console(pi) ((pi) == &port_info[0])
+
+static struct vio_device_id hvc_driver_table[] __devinitdata = {
+ {"serial", "IBM,iSeries-vty"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, hvc_driver_table);
+
+static void hvlog(char *fmt, ...)
+{
+ int i;
+ unsigned long flags;
+ va_list args;
+ static char buf[256];
+
+ spin_lock_irqsave(&consoleloglock, flags);
+ va_start(args, fmt);
+ i = vscnprintf(buf, sizeof(buf) - 1, fmt, args);
+ va_end(args);
+ buf[i++] = '\r';
+ HvCall_writeLogBuffer(buf, i);
+ spin_unlock_irqrestore(&consoleloglock, flags);
+}
+
+/*
+ * Initialize the common fields in a charLpEvent
+ */
+static void init_data_event(struct viocharlpevent *viochar, HvLpIndex lp)
+{
+ struct HvLpEvent *hev = &viochar->event;
+
+ memset(viochar, 0, sizeof(struct viocharlpevent));
+
+ hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK |
+ HV_LP_EVENT_INT;
+ hev->xType = HvLpEvent_Type_VirtualIo;
+ hev->xSubtype = viomajorsubtype_chario | viochardata;
+ hev->xSourceLp = HvLpConfig_getLpIndex();
+ hev->xTargetLp = lp;
+ hev->xSizeMinus1 = sizeof(struct viocharlpevent);
+ hev->xSourceInstanceId = viopath_sourceinst(lp);
+ hev->xTargetInstanceId = viopath_targetinst(lp);
+}
+
+static int get_chars(uint32_t vtermno, char *buf, int count)
+{
+ struct port_info *pi;
+ int n = 0;
+ unsigned long flags;
+
+ if (vtermno >= VTTY_PORTS)
+ return -EINVAL;
+ if (count == 0)
+ return 0;
+
+ pi = &port_info[vtermno];
+ spin_lock_irqsave(&consolelock, flags);
+
+ if (pi->in_end == 0)
+ goto done;
+
+ n = pi->in_end - pi->in_start;
+ if (n > count)
+ n = count;
+ memcpy(buf, &pi->in_buf[pi->in_start], n);
+ pi->in_start += n;
+ if (pi->in_start == pi->in_end) {
+ pi->in_start = 0;
+ pi->in_end = 0;
+ }
+done:
+ spin_unlock_irqrestore(&consolelock, flags);
+ return n;
+}
+
+static int put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct viocharlpevent *viochar;
+ struct port_info *pi;
+ HvLpEvent_Rc hvrc;
+ unsigned long flags;
+ int sent = 0;
+
+ if (vtermno >= VTTY_PORTS)
+ return -EINVAL;
+
+ pi = &port_info[vtermno];
+
+ spin_lock_irqsave(&consolelock, flags);
+
+ if (viochar_is_console(pi) && !viopath_isactive(pi->lp)) {
+ spin_lock_irqsave(&consoleloglock, flags);
+ HvCall_writeLogBuffer(buf, count);
+ spin_unlock_irqrestore(&consoleloglock, flags);
+ sent = count;
+ goto done;
+ }
+
+ viochar = vio_get_event_buffer(viomajorsubtype_chario);
+ if (viochar == NULL) {
+ hvlog("\n\rviocons: Can't get viochar buffer.");
+ goto done;
+ }
+
+ while ((count > 0) && ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) {
+ int len;
+
+ len = (count > VIOCHAR_MAX_DATA) ? VIOCHAR_MAX_DATA : count;
+
+ if (viochar_is_console(pi)) {
+ spin_lock_irqsave(&consoleloglock, flags);
+ HvCall_writeLogBuffer(buf, len);
+ spin_unlock_irqrestore(&consoleloglock, flags);
+ }
+
+ init_data_event(viochar, pi->lp);
+
+ viochar->len = len;
+ viochar->event.xCorrelationToken = pi->seq++;
+ viochar->event.xSizeMinus1 =
+ offsetof(struct viocharlpevent, data) + len;
+
+ memcpy(viochar->data, buf, len);
+
+ hvrc = HvCallEvent_signalLpEvent(&viochar->event);
+ if (hvrc)
+ hvlog("\n\rerror sending event! return code %d\n\r",
+ (int)hvrc);
+ sent += len;
+ count -= len;
+ buf += len;
+ }
+
+ vio_free_event_buffer(viomajorsubtype_chario, viochar);
+done:
+ spin_unlock_irqrestore(&consolelock, flags);
+ return sent;
+}
+
+static struct hv_ops hvc_get_put_ops = {
+ .get_chars = get_chars,
+ .put_chars = put_chars,
+};
+
+static int __devinit hvc_vio_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct hvc_struct *hp;
+ struct port_info *pi;
+
+ /* probed with invalid parameters. */
+ if (!vdev || !id)
+ return -EPERM;
+
+ if (vdev->unit_address >= VTTY_PORTS)
+ return -ENODEV;
+
+ pi = &port_info[vdev->unit_address];
+
+ hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
+ VIOCHAR_MAX_DATA);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+ pi->hp = hp;
+ dev_set_drvdata(&vdev->dev, pi);
+
+ return 0;
+}
+
+static int __devexit hvc_vio_remove(struct vio_dev *vdev)
+{
+ struct port_info *pi = dev_get_drvdata(&vdev->dev);
+ struct hvc_struct *hp = pi->hp;
+
+ return hvc_remove(hp);
+}
+
+static struct vio_driver hvc_vio_driver = {
+ .id_table = hvc_driver_table,
+ .probe = hvc_vio_probe,
+ .remove = hvc_vio_remove,
+ .driver = {
+ .name = hvc_driver_name,
+ .owner = THIS_MODULE,
+ }
+};
+
+static void hvc_open_event(struct HvLpEvent *event)
+{
+ unsigned long flags;
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+ u8 port = cevent->virtual_device;
+ struct port_info *pi;
+ int reject = 0;
+
+ if (hvlpevent_is_ack(event)) {
+ if (port >= VTTY_PORTS)
+ return;
+
+ spin_lock_irqsave(&consolelock, flags);
+
+ pi = &port_info[port];
+ if (event->xRc == HvLpEvent_Rc_Good) {
+ pi->seq = pi->ack = 0;
+ /*
+ * This line allows connections from the primary
+ * partition but once one is connected from the
+ * primary partition nothing short of a reboot
+ * of linux will allow access from the hosting
+ * partition again without a required iSeries fix.
+ */
+ pi->lp = event->xTargetLp;
+ }
+
+ spin_unlock_irqrestore(&consolelock, flags);
+ if (event->xRc != HvLpEvent_Rc_Good)
+ printk(KERN_WARNING
+ "hvc: handle_open_event: event->xRc == (%d).\n",
+ event->xRc);
+
+ if (event->xCorrelationToken != 0) {
+ atomic_t *aptr= (atomic_t *)event->xCorrelationToken;
+ atomic_set(aptr, 1);
+ } else
+ printk(KERN_WARNING
+ "hvc: weird...got open ack without atomic\n");
+ return;
+ }
+
+ /* This had better require an ack, otherwise complain */
+ if (!hvlpevent_need_ack(event)) {
+ printk(KERN_WARNING "hvc: viocharopen without ack bit!\n");
+ return;
+ }
+
+ spin_lock_irqsave(&consolelock, flags);
+
+ /* Make sure this is a good virtual tty */
+ if (port >= VTTY_PORTS) {
+ event->xRc = HvLpEvent_Rc_SubtypeError;
+ cevent->subtype_result_code = viorc_openRejected;
+ /*
+ * Flag state here since we can't printk while holding
+ * the consolelock spinlock.
+ */
+ reject = 1;
+ } else {
+ pi = &port_info[port];
+ if ((pi->lp != HvLpIndexInvalid) &&
+ (pi->lp != event->xSourceLp)) {
+ /*
+ * If this is tty is already connected to a different
+ * partition, fail.
+ */
+ event->xRc = HvLpEvent_Rc_SubtypeError;
+ cevent->subtype_result_code = viorc_openRejected;
+ reject = 2;
+ } else {
+ pi->lp = event->xSourceLp;
+ event->xRc = HvLpEvent_Rc_Good;
+ cevent->subtype_result_code = viorc_good;
+ pi->seq = pi->ack = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&consolelock, flags);
+
+ if (reject == 1)
+ printk(KERN_WARNING "hvc: open rejected: bad virtual tty.\n");
+ else if (reject == 2)
+ printk(KERN_WARNING "hvc: open rejected: console in exclusive "
+ "use by another partition.\n");
+
+ /* Return the acknowledgement */
+ HvCallEvent_ackLpEvent(event);
+}
+
+/*
+ * Handle a close charLpEvent. This should ONLY be an Interrupt because the
+ * virtual console should never actually issue a close event to the hypervisor
+ * because the virtual console never goes away. A close event coming from the
+ * hypervisor simply means that there are no client consoles connected to the
+ * virtual console.
+ */
+static void hvc_close_event(struct HvLpEvent *event)
+{
+ unsigned long flags;
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+ u8 port = cevent->virtual_device;
+
+ if (!hvlpevent_is_int(event)) {
+ printk(KERN_WARNING
+ "hvc: got unexpected close acknowlegement\n");
+ return;
+ }
+
+ if (port >= VTTY_PORTS) {
+ printk(KERN_WARNING
+ "hvc: close message from invalid virtual device.\n");
+ return;
+ }
+
+ /* For closes, just mark the console partition invalid */
+ spin_lock_irqsave(&consolelock, flags);
+
+ if (port_info[port].lp == event->xSourceLp)
+ port_info[port].lp = HvLpIndexInvalid;
+
+ spin_unlock_irqrestore(&consolelock, flags);
+}
+
+static void hvc_data_event(struct HvLpEvent *event)
+{
+ unsigned long flags;
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+ struct port_info *pi;
+ int n;
+ u8 port = cevent->virtual_device;
+
+ if (port >= VTTY_PORTS) {
+ printk(KERN_WARNING "hvc: data on invalid virtual device %d\n",
+ port);
+ return;
+ }
+ if (cevent->len == 0)
+ return;
+
+ /*
+ * Change 05/01/2003 - Ryan Arnold: If a partition other than
+ * the current exclusive partition tries to send us data
+ * events then just drop them on the floor because we don't
+ * want his stinking data. He isn't authorized to receive
+ * data because he wasn't the first one to get the console,
+ * therefore he shouldn't be allowed to send data either.
+ * This will work without an iSeries fix.
+ */
+ pi = &port_info[port];
+ if (pi->lp != event->xSourceLp)
+ return;
+
+ spin_lock_irqsave(&consolelock, flags);
+
+ n = IN_BUF_SIZE - pi->in_end;
+ if (n > cevent->len)
+ n = cevent->len;
+ if (n > 0) {
+ memcpy(&pi->in_buf[pi->in_end], cevent->data, n);
+ pi->in_end += n;
+ }
+ spin_unlock_irqrestore(&consolelock, flags);
+ if (n == 0)
+ printk(KERN_WARNING "hvc: input buffer overflow\n");
+}
+
+static void hvc_ack_event(struct HvLpEvent *event)
+{
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+ unsigned long flags;
+ u8 port = cevent->virtual_device;
+
+ if (port >= VTTY_PORTS) {
+ printk(KERN_WARNING "hvc: data on invalid virtual device\n");
+ return;
+ }
+
+ spin_lock_irqsave(&consolelock, flags);
+ port_info[port].ack = event->xCorrelationToken;
+ spin_unlock_irqrestore(&consolelock, flags);
+}
+
+static void hvc_config_event(struct HvLpEvent *event)
+{
+ struct viocharlpevent *cevent = (struct viocharlpevent *)event;
+
+ if (cevent->data[0] == 0x01)
+ printk(KERN_INFO "hvc: window resized to %d: %d: %d: %d\n",
+ cevent->data[1], cevent->data[2],
+ cevent->data[3], cevent->data[4]);
+ else
+ printk(KERN_WARNING "hvc: unknown config event\n");
+}
+
+static void hvc_handle_event(struct HvLpEvent *event)
+{
+ int charminor;
+
+ if (event == NULL)
+ return;
+
+ charminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
+ switch (charminor) {
+ case viocharopen:
+ hvc_open_event(event);
+ break;
+ case viocharclose:
+ hvc_close_event(event);
+ break;
+ case viochardata:
+ hvc_data_event(event);
+ break;
+ case viocharack:
+ hvc_ack_event(event);
+ break;
+ case viocharconfig:
+ hvc_config_event(event);
+ break;
+ default:
+ if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
+ event->xRc = HvLpEvent_Rc_InvalidSubtype;
+ HvCallEvent_ackLpEvent(event);
+ }
+ }
+}
+
+static int send_open(HvLpIndex remoteLp, void *sem)
+{
+ return HvCallEvent_signalLpEventFast(remoteLp,
+ HvLpEvent_Type_VirtualIo,
+ viomajorsubtype_chario | viocharopen,
+ HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
+ viopath_sourceinst(remoteLp),
+ viopath_targetinst(remoteLp),
+ (u64)(unsigned long)sem, VIOVERSION << 16,
+ 0, 0, 0, 0);
+}
+
+static int hvc_vio_init(void)
+{
+ atomic_t wait_flag;
+ int rc;
+
+ /* +2 for fudge */
+ rc = viopath_open(HvLpConfig_getPrimaryLpIndex(),
+ viomajorsubtype_chario, VIOCHAR_WINDOW + 2);
+ if (rc)
+ printk(KERN_WARNING "hvc: error opening to primary %d\n", rc);
+
+ if (viopath_hostLp == HvLpIndexInvalid)
+ vio_set_hostlp();
+
+ /*
+ * And if the primary is not the same as the hosting LP, open to the
+ * hosting lp
+ */
+ if ((viopath_hostLp != HvLpIndexInvalid) &&
+ (viopath_hostLp != HvLpConfig_getPrimaryLpIndex())) {
+ printk(KERN_INFO "hvc: open path to hosting (%d)\n",
+ viopath_hostLp);
+ rc = viopath_open(viopath_hostLp, viomajorsubtype_chario,
+ VIOCHAR_WINDOW + 2); /* +2 for fudge */
+ if (rc)
+ printk(KERN_WARNING
+ "error opening to partition %d: %d\n",
+ viopath_hostLp, rc);
+ }
+
+ if (vio_setHandler(viomajorsubtype_chario, hvc_handle_event) < 0)
+ printk(KERN_WARNING
+ "hvc: error seting handler for console events!\n");
+
+ /*
+ * First, try to open the console to the hosting lp.
+ * Wait on a semaphore for the response.
+ */
+ atomic_set(&wait_flag, 0);
+ if ((viopath_isactive(viopath_hostLp)) &&
+ (send_open(viopath_hostLp, &wait_flag) == 0)) {
+ printk(KERN_INFO "hvc: hosting partition %d\n", viopath_hostLp);
+ while (atomic_read(&wait_flag) == 0)
+ mb();
+ atomic_set(&wait_flag, 0);
+ }
+
+ /*
+ * If we don't have an active console, try the primary
+ */
+ if ((!viopath_isactive(port_info[0].lp)) &&
+ (viopath_isactive(HvLpConfig_getPrimaryLpIndex())) &&
+ (send_open(HvLpConfig_getPrimaryLpIndex(), &wait_flag) == 0)) {
+ printk(KERN_INFO "hvc: opening console to primary partition\n");
+ while (atomic_read(&wait_flag) == 0)
+ mb();
+ }
+
+ /* Register as a vio device to receive callbacks */
+ rc = vio_register_driver(&hvc_vio_driver);
+
+ return rc;
+}
+module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
+
+static void hvc_vio_exit(void)
+{
+ vio_unregister_driver(&hvc_vio_driver);
+}
+module_exit(hvc_vio_exit);
+
+/* the device tree order defines our numbering */
+static int hvc_find_vtys(void)
+{
+ struct device_node *vty;
+ int num_found = 0;
+
+ for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
+ vty = of_find_node_by_name(vty, "vty")) {
+ uint32_t *vtermno;
+
+ /* We have statically defined space for only a certain number
+ * of console adapters.
+ */
+ if ((num_found >= MAX_NR_HVC_CONSOLES) ||
+ (num_found >= VTTY_PORTS))
+ break;
+
+ vtermno = (uint32_t *)get_property(vty, "reg", NULL);
+ if (!vtermno)
+ continue;
+
+ if (!device_is_compatible(vty, "IBM,iSeries-vty"))
+ continue;
+
+ if (num_found == 0)
+ add_preferred_console("hvc", 0, NULL);
+ hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops);
+ ++num_found;
+ }
+
+ return num_found;
+}
+console_initcall(hvc_find_vtys);
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
index 57106e02fd2..4b97eaf1860 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/char/hvc_rtas.c
@@ -94,7 +94,7 @@ static int hvc_rtas_init(void)
/* Allocate an hvc_struct for the console device we instantiated
* earlier. Save off hp so that we can return it on exit */
- hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops);
+ hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops, 16);
if (IS_ERR(hp))
return PTR_ERR(hp);
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 9add81ceb44..cc95941148f 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -90,7 +90,8 @@ static int __devinit hvc_vio_probe(struct vio_dev *vdev,
if (!vdev || !id)
return -EPERM;
- hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops);
+ hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
+ MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
dev_set_drvdata(&vdev->dev, hp);
@@ -140,7 +141,7 @@ static int hvc_find_vtys(void)
for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
vty = of_find_node_by_name(vty, "vty")) {
- uint32_t *vtermno;
+ const uint32_t *vtermno;
/* We have statically defined space for only a certain number
* of console adapters.
@@ -148,7 +149,7 @@ static int hvc_find_vtys(void)
if (num_found >= MAX_NR_HVC_CONSOLES)
break;
- vtermno = (uint32_t *)get_property(vty, "reg", NULL);
+ vtermno = get_property(vty, "reg", NULL);
if (!vtermno)
continue;
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 017f755632a..a89a95fb5e4 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -1274,11 +1274,10 @@ static int __init hvsi_console_init(void)
vty != NULL;
vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) {
struct hvsi_struct *hp;
- uint32_t *vtermno;
- uint32_t *irq;
+ const uint32_t *vtermno, *irq;
- vtermno = (uint32_t *)get_property(vty, "reg", NULL);
- irq = (uint32_t *)get_property(vty, "interrupts", NULL);
+ vtermno = get_property(vty, "reg", NULL);
+ irq = get_property(vty, "interrupts", NULL);
if (!vtermno || !irq)
continue;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 0aa5d608fe6..843d34c8627 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3428,6 +3428,7 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
if (rv) {
+ rv->user = NULL;
rv->done = free_recv_msg;
atomic_inc(&recv_msg_inuse_count);
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f57eba0bf25..abca98beac1 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -402,10 +402,10 @@ static void handle_flags(struct smi_info *smi_info)
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_EVENTS;
- } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
- if (smi_info->oem_data_avail_handler)
- if (smi_info->oem_data_avail_handler(smi_info))
- goto retry;
+ } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
+ smi_info->oem_data_avail_handler) {
+ if (smi_info->oem_data_avail_handler(smi_info))
+ goto retry;
} else {
smi_info->si_state = SI_NORMAL;
}
@@ -2481,6 +2481,7 @@ static __devinit int init_ipmi_si(void)
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
+ driver_unregister(&ipmi_driver);
printk("ipmi_si: Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 84dfc427813..8c09997cc3d 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -3488,7 +3488,7 @@ static int stli_initecp(stlibrd_t *brdp)
*/
EBRDENABLE(brdp);
sigsp = (cdkecpsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
- memcpy(&sig, sigsp, sizeof(cdkecpsig_t));
+ memcpy_fromio(&sig, sigsp, sizeof(cdkecpsig_t));
EBRDDISABLE(brdp);
if (sig.magic != cpu_to_le32(ECP_MAGIC))
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index b2dbbdb1bf8..2f07b085536 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -391,8 +391,8 @@ static MGSL_PARAMS default_params = {
#define DESC_LIST_SIZE 4096
#define MASK_PARITY BIT1
-#define MASK_FRAMING BIT2
-#define MASK_BREAK BIT3
+#define MASK_FRAMING BIT0
+#define MASK_BREAK BIT14
#define MASK_OVERRUN BIT4
#define GSR 0x00 /* global status */
@@ -1800,17 +1800,17 @@ static void rx_async(struct slgt_info *info)
stat = 0;
- if ((status = *(p+1) & (BIT9 + BIT8))) {
- if (status & BIT9)
+ if ((status = *(p+1) & (BIT1 + BIT0))) {
+ if (status & BIT1)
icount->parity++;
- else if (status & BIT8)
+ else if (status & BIT0)
icount->frame++;
/* discard char if tty control flags say so */
if (status & info->ignore_status_mask)
continue;
- if (status & BIT9)
+ if (status & BIT1)
stat = TTY_PARITY;
- else if (status & BIT8)
+ else if (status & BIT0)
stat = TTY_FRAME;
}
if (tty) {
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
index 2e68eeb8a2c..aefd683c60b 100644
--- a/drivers/char/tpm/tpm_atmel.h
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -37,7 +37,7 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
{
struct device_node *dn;
unsigned long address, size;
- unsigned int *reg;
+ const unsigned int *reg;
int reglen;
int naddrc;
int nsizec;
@@ -52,7 +52,7 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
return NULL;
}
- reg = (unsigned int *) get_property(dn, "reg", &reglen);
+ reg = get_property(dn, "reg", &reglen);
naddrc = prom_n_addr_cells(dn);
nsizec = prom_n_size_cells(dn);
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
index 766f7864c6c..f3efeaf2826 100644
--- a/drivers/char/viocons.c
+++ b/drivers/char/viocons.c
@@ -43,7 +43,6 @@
#include <linux/sysrq.h>
#include <asm/iseries/vio.h>
-
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/hv_call_event.h>
#include <asm/iseries/hv_lp_config.h>
@@ -67,35 +66,6 @@ static int vio_sysrq_pressed;
extern int sysrq_enabled;
#endif
-/*
- * The structure of the events that flow between us and OS/400. You can't
- * mess with this unless the OS/400 side changes too
- */
-struct viocharlpevent {
- struct HvLpEvent event;
- u32 reserved;
- u16 version;
- u16 subtype_result_code;
- u8 virtual_device;
- u8 len;
- u8 data[VIOCHAR_MAX_DATA];
-};
-
-#define VIOCHAR_WINDOW 10
-#define VIOCHAR_HIGHWATERMARK 3
-
-enum viocharsubtype {
- viocharopen = 0x0001,
- viocharclose = 0x0002,
- viochardata = 0x0003,
- viocharack = 0x0004,
- viocharconfig = 0x0005
-};
-
-enum viochar_rc {
- viochar_rc_ebusy = 1
-};
-
#define VIOCHAR_NUM_BUF 16
/*
@@ -1183,6 +1153,7 @@ static int __init viocons_init(void)
port_info[i].magic = VIOTTY_MAGIC;
}
HvCall_setLogBufferFormatAndCodepage(HvCall_LogBuffer_ASCII, 437);
+ add_preferred_console("viocons", 0, NULL);
register_console(&viocons_early);
return 0;
}
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index b72b2049aaa..73c78bf75d7 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -940,7 +940,6 @@ static void vioHandleTapeEvent(struct HvLpEvent *event)
static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
- char tapename[32];
int i = vdev->unit_address;
int j;
@@ -956,10 +955,9 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
"iseries!vt%d", i);
class_device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80),
NULL, "iseries!nvt%d", i);
- sprintf(tapename, "iseries/vt%d", i);
- printk(VIOTAPE_KERN_INFO "tape %s is iSeries "
+ printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
"resource %10.10s type %4.4s, model %3.3s\n",
- tapename, viotape_unitinfo[i].rsrcname,
+ i, viotape_unitinfo[i].rsrcname,
viotape_unitinfo[i].type, viotape_unitinfo[i].model);
return 0;
}
diff --git a/drivers/char/watchdog/sbc8360.c b/drivers/char/watchdog/sbc8360.c
index 1035be5b501..41fc6f80c49 100644
--- a/drivers/char/watchdog/sbc8360.c
+++ b/drivers/char/watchdog/sbc8360.c
@@ -200,7 +200,7 @@ static int wd_margin = 0xB;
static int wd_multiplier = 2;
static int nowayout = WATCHDOG_NOWAYOUT;
-module_param(timeout, int, 27);
+module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Index into timeout table (0-63) (default=27 (60s))");
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
@@ -407,7 +407,7 @@ module_exit(sbc8360_exit);
MODULE_AUTHOR("Ian E. Morgan <imorgan@webcon.ca>");
MODULE_DESCRIPTION("SBC8360 watchdog driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0");
+MODULE_VERSION("1.01");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
/* end of sbc8360.c */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b3df613ae4e..d35a9f06ab7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -32,7 +32,7 @@
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
/**
- * The "cpufreq driver" - the arch- or hardware-dependend low
+ * The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
* also protects the cpufreq_cpu_data array.
*/
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 52cf1f02182..bf8aa45d4f0 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -55,6 +55,10 @@ struct cpu_dbs_info_s {
struct cpufreq_policy *cur_policy;
struct work_struct work;
unsigned int enable;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_lo;
+ unsigned int freq_lo_jiffies;
+ unsigned int freq_hi_jiffies;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
@@ -72,15 +76,15 @@ static DEFINE_MUTEX(dbs_mutex);
static struct workqueue_struct *kondemand_wq;
-struct dbs_tuners {
+static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int up_threshold;
unsigned int ignore_nice;
-};
-
-static struct dbs_tuners dbs_tuners_ins = {
+ unsigned int powersave_bias;
+} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.ignore_nice = 0,
+ .powersave_bias = 0,
};
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
@@ -96,6 +100,70 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
return retval;
}
+/*
+ * Find right freq to be set now with powersave_bias on.
+ * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
+ * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
+ */
+static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
+ unsigned int freq_next,
+ unsigned int relation)
+{
+ unsigned int freq_req, freq_reduc, freq_avg;
+ unsigned int freq_hi, freq_lo;
+ unsigned int index = 0;
+ unsigned int jiffies_total, jiffies_hi, jiffies_lo;
+ struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
+
+ if (!dbs_info->freq_table) {
+ dbs_info->freq_lo = 0;
+ dbs_info->freq_lo_jiffies = 0;
+ return freq_next;
+ }
+
+ cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
+ relation, &index);
+ freq_req = dbs_info->freq_table[index].frequency;
+ freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
+ freq_avg = freq_req - freq_reduc;
+
+ /* Find freq bounds for freq_avg in freq_table */
+ index = 0;
+ cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
+ CPUFREQ_RELATION_H, &index);
+ freq_lo = dbs_info->freq_table[index].frequency;
+ index = 0;
+ cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
+ CPUFREQ_RELATION_L, &index);
+ freq_hi = dbs_info->freq_table[index].frequency;
+
+ /* Find out how long we have to be in hi and lo freqs */
+ if (freq_hi == freq_lo) {
+ dbs_info->freq_lo = 0;
+ dbs_info->freq_lo_jiffies = 0;
+ return freq_lo;
+ }
+ jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
+ jiffies_hi += ((freq_hi - freq_lo) / 2);
+ jiffies_hi /= (freq_hi - freq_lo);
+ jiffies_lo = jiffies_total - jiffies_hi;
+ dbs_info->freq_lo = freq_lo;
+ dbs_info->freq_lo_jiffies = jiffies_lo;
+ dbs_info->freq_hi_jiffies = jiffies_hi;
+ return freq_hi;
+}
+
+static void ondemand_powersave_bias_init(void)
+{
+ int i;
+ for_each_online_cpu(i) {
+ struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
+ dbs_info->freq_table = cpufreq_frequency_get_table(i);
+ dbs_info->freq_lo = 0;
+ }
+}
+
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
@@ -124,6 +192,7 @@ static ssize_t show_##file_name \
show_one(sampling_rate, sampling_rate);
show_one(up_threshold, up_threshold);
show_one(ignore_nice_load, ignore_nice);
+show_one(powersave_bias, powersave_bias);
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
const char *buf, size_t count)
@@ -198,6 +267,27 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
return count;
}
+static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1000)
+ input = 1000;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.powersave_bias = input;
+ ondemand_powersave_bias_init();
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
#define define_one_rw(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
@@ -205,6 +295,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
define_one_rw(up_threshold);
define_one_rw(ignore_nice_load);
+define_one_rw(powersave_bias);
static struct attribute * dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -212,6 +303,7 @@ static struct attribute * dbs_attributes[] = {
&sampling_rate.attr,
&up_threshold.attr,
&ignore_nice_load.attr,
+ &powersave_bias.attr,
NULL
};
@@ -234,6 +326,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
if (!this_dbs_info->enable)
return;
+ this_dbs_info->freq_lo = 0;
policy = this_dbs_info->cur_policy;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
@@ -274,11 +367,18 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
/* Check for frequency increase */
if (load > dbs_tuners_ins.up_threshold) {
/* if we are already at full speed then break out early */
- if (policy->cur == policy->max)
- return;
-
- __cpufreq_driver_target(policy, policy->max,
- CPUFREQ_RELATION_H);
+ if (!dbs_tuners_ins.powersave_bias) {
+ if (policy->cur == policy->max)
+ return;
+
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
+ } else {
+ int freq = powersave_bias_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, freq,
+ CPUFREQ_RELATION_L);
+ }
return;
}
@@ -293,37 +393,64 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
* policy. To be safe, we focus 10 points under the threshold.
*/
if (load < (dbs_tuners_ins.up_threshold - 10)) {
- unsigned int freq_next;
- freq_next = (policy->cur * load) /
+ unsigned int freq_next = (policy->cur * load) /
(dbs_tuners_ins.up_threshold - 10);
-
- __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
+ if (!dbs_tuners_ins.powersave_bias) {
+ __cpufreq_driver_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ } else {
+ int freq = powersave_bias_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, freq,
+ CPUFREQ_RELATION_L);
+ }
}
}
+/* Sampling types */
+enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
+
static void do_dbs_timer(void *data)
{
unsigned int cpu = smp_processor_id();
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ /* We want all CPUs to do sampling nearly on same jiffy */
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ delay -= jiffies % delay;
if (!dbs_info->enable)
return;
-
- lock_cpu_hotplug();
- dbs_check_cpu(dbs_info);
- unlock_cpu_hotplug();
- queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ /* Common NORMAL_SAMPLE setup */
+ INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+ if (!dbs_tuners_ins.powersave_bias ||
+ (unsigned long) data == DBS_NORMAL_SAMPLE) {
+ lock_cpu_hotplug();
+ dbs_check_cpu(dbs_info);
+ unlock_cpu_hotplug();
+ if (dbs_info->freq_lo) {
+ /* Setup timer for SUB_SAMPLE */
+ INIT_WORK(&dbs_info->work, do_dbs_timer,
+ (void *)DBS_SUB_SAMPLE);
+ delay = dbs_info->freq_hi_jiffies;
+ }
+ } else {
+ __cpufreq_driver_target(dbs_info->cur_policy,
+ dbs_info->freq_lo,
+ CPUFREQ_RELATION_H);
+ }
+ queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
}
static inline void dbs_timer_init(unsigned int cpu)
{
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ /* We want all CPUs to do sampling nearly on same jiffy */
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ delay -= jiffies % delay;
- INIT_WORK(&dbs_info->work, do_dbs_timer, 0);
- queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
- usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
- return;
+ ondemand_powersave_bias_init();
+ INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+ queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 25eee539420..c2ecc599dc5 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -350,12 +350,10 @@ __init cpufreq_stats_init(void)
}
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
- lock_cpu_hotplug();
for_each_online_cpu(cpu) {
cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
(void *)(long)cpu);
}
- unlock_cpu_hotplug();
return 0;
}
static void
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4263935443c..adb554153f6 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -2,22 +2,53 @@ menu "Hardware crypto devices"
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
- depends on CRYPTO && X86_32
+ depends on X86_32
+ select CRYPTO_ALGAPI
+ default m
help
Some VIA processors come with an integrated crypto engine
(so called VIA PadLock ACE, Advanced Cryptography Engine)
- that provides instructions for very fast {en,de}cryption
- with some algorithms.
+ that provides instructions for very fast cryptographic
+ operations with supported algorithms.
The instructions are used only when the CPU supports them.
- Otherwise software encryption is used. If you are unsure,
- say Y.
+ Otherwise software encryption is used.
+
+ Selecting M for this option will compile a helper module
+ padlock.ko that should autoload all below configured
+ algorithms. Don't worry if your hardware does not support
+ some or all of them. In such case padlock.ko will
+ simply write a single line into the kernel log informing
+ about its failure but everything will keep working fine.
+
+ If you are unsure, say M. The compiled module will be
+ called padlock.ko
config CRYPTO_DEV_PADLOCK_AES
- bool "Support for AES in VIA PadLock"
+ tristate "PadLock driver for AES algorithm"
depends on CRYPTO_DEV_PADLOCK
- default y
+ select CRYPTO_BLKCIPHER
+ default m
help
Use VIA PadLock for AES algorithm.
+ Available in VIA C3 and newer CPUs.
+
+ If unsure say M. The compiled module will be
+ called padlock-aes.ko
+
+config CRYPTO_DEV_PADLOCK_SHA
+ tristate "PadLock driver for SHA1 and SHA256 algorithms"
+ depends on CRYPTO_DEV_PADLOCK
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ default m
+ help
+ Use VIA PadLock for SHA1/SHA256 algorithms.
+
+ Available in VIA C7 and newer processors.
+
+ If unsure say M. The compiled module will be
+ called padlock-sha.ko
+
endmenu
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 45426ca19a2..4c3d0ec1cf8 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,7 +1,3 @@
-
obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
-
-padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
-
-padlock-objs := padlock-generic.o $(padlock-objs-y)
-
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
+obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index b643d71298a..d4501dc7e65 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -43,11 +43,11 @@
* ---------------------------------------------------------------------------
*/
+#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <asm/byteorder.h>
@@ -59,6 +59,17 @@
#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
+/* Control word. */
+struct cword {
+ unsigned int __attribute__ ((__packed__))
+ rounds:4,
+ algo:3,
+ keygen:1,
+ interm:1,
+ encdec:1,
+ ksize:2;
+} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
+
/* Whenever making any changes to the following
* structure *make sure* you keep E, d_data
* and cword aligned on 16 Bytes boundaries!!! */
@@ -286,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len)
return 0;
}
-static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
+static inline struct aes_ctx *aes_ctx_common(void *ctx)
{
- unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
+ unsigned long addr = (unsigned long)ctx;
unsigned long align = PADLOCK_ALIGNMENT;
if (align <= crypto_tfm_ctx_alignment())
@@ -296,16 +307,27 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
return (struct aes_ctx *)ALIGN(addr, align);
}
+static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
+{
+ return aes_ctx_common(crypto_tfm_ctx(tfm));
+}
+
+static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
+{
+ return aes_ctx_common(crypto_blkcipher_ctx(tfm));
+}
+
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len, u32 *flags)
+ unsigned int key_len)
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
+ u32 *flags = &tfm->crt_flags;
uint32_t i, t, u, v, w;
uint32_t P[AES_EXTENDED_KEY_SIZE];
uint32_t rounds;
- if (key_len != 16 && key_len != 24 && key_len != 32) {
+ if (key_len % 8) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
@@ -430,80 +452,212 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
}
-static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
- const u8 *in, unsigned int nbytes)
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aes_ctx),
+ .cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_set_key,
+ .cia_encrypt = aes_encrypt,
+ .cia_decrypt = aes_decrypt,
+ }
+ }
+};
+
+static int ecb_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(desc->tfm);
- padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt,
- nbytes / AES_BLOCK_SIZE);
- return nbytes & ~(AES_BLOCK_SIZE - 1);
+ struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+ ctx->E, &ctx->cword.encrypt,
+ nbytes / AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ return err;
}
-static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
- const u8 *in, unsigned int nbytes)
+static int ecb_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(desc->tfm);
- padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt,
- nbytes / AES_BLOCK_SIZE);
- return nbytes & ~(AES_BLOCK_SIZE - 1);
+ struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
+ ctx->D, &ctx->cword.decrypt,
+ nbytes / AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ return err;
}
-static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
- const u8 *in, unsigned int nbytes)
-{
- struct aes_ctx *ctx = aes_ctx(desc->tfm);
- u8 *iv;
+static struct crypto_alg ecb_aes_alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-padlock",
+ .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aes_ctx),
+ .cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
+ }
+ }
+};
- iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info,
- &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE);
- memcpy(desc->info, iv, AES_BLOCK_SIZE);
+static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
+ walk.dst.virt.addr, ctx->E,
+ walk.iv, &ctx->cword.encrypt,
+ nbytes / AES_BLOCK_SIZE);
+ memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
- return nbytes & ~(AES_BLOCK_SIZE - 1);
+ return err;
}
-static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
- const u8 *in, unsigned int nbytes)
+static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
{
- struct aes_ctx *ctx = aes_ctx(desc->tfm);
- padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt,
- nbytes / AES_BLOCK_SIZE);
- return nbytes & ~(AES_BLOCK_SIZE - 1);
+ struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
+ ctx->D, walk.iv, &ctx->cword.decrypt,
+ nbytes / AES_BLOCK_SIZE);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ return err;
}
-static struct crypto_alg aes_alg = {
- .cra_name = "aes",
- .cra_driver_name = "aes-padlock",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+static struct crypto_alg cbc_aes_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-padlock",
+ .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
+ .cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_u = {
- .cipher = {
- .cia_min_keysize = AES_MIN_KEY_SIZE,
- .cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
- .cia_encrypt = aes_encrypt,
- .cia_decrypt = aes_decrypt,
- .cia_encrypt_ecb = aes_encrypt_ecb,
- .cia_decrypt_ecb = aes_decrypt_ecb,
- .cia_encrypt_cbc = aes_encrypt_cbc,
- .cia_decrypt_cbc = aes_decrypt_cbc,
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
}
}
};
-int __init padlock_init_aes(void)
+static int __init padlock_init(void)
{
- printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+ int ret;
+
+ if (!cpu_has_xcrypt) {
+ printk(KERN_ERR PFX "VIA PadLock not detected.\n");
+ return -ENODEV;
+ }
+
+ if (!cpu_has_xcrypt_enabled) {
+ printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
+ return -ENODEV;
+ }
gen_tabs();
- return crypto_register_alg(&aes_alg);
+ if ((ret = crypto_register_alg(&aes_alg)))
+ goto aes_err;
+
+ if ((ret = crypto_register_alg(&ecb_aes_alg)))
+ goto ecb_aes_err;
+
+ if ((ret = crypto_register_alg(&cbc_aes_alg)))
+ goto cbc_aes_err;
+
+ printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+
+out:
+ return ret;
+
+cbc_aes_err:
+ crypto_unregister_alg(&ecb_aes_alg);
+ecb_aes_err:
+ crypto_unregister_alg(&aes_alg);
+aes_err:
+ printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
+ goto out;
}
-void __exit padlock_fini_aes(void)
+static void __exit padlock_fini(void)
{
+ crypto_unregister_alg(&cbc_aes_alg);
+ crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
+
+module_init(padlock_init);
+module_exit(padlock_fini);
+
+MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Ludvig");
+
+MODULE_ALIAS("aes-padlock");
diff --git a/drivers/crypto/padlock-generic.c b/drivers/crypto/padlock-generic.c
deleted file mode 100644
index 18cf0e8274a..00000000000
--- a/drivers/crypto/padlock-generic.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support for VIA PadLock hardware crypto engine.
- *
- * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/crypto.h>
-#include <asm/byteorder.h>
-#include "padlock.h"
-
-static int __init
-padlock_init(void)
-{
- int ret = -ENOSYS;
-
- if (!cpu_has_xcrypt) {
- printk(KERN_ERR PFX "VIA PadLock not detected.\n");
- return -ENODEV;
- }
-
- if (!cpu_has_xcrypt_enabled) {
- printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
- return -ENODEV;
- }
-
-#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
- if ((ret = padlock_init_aes())) {
- printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
- return ret;
- }
-#endif
-
- if (ret == -ENOSYS)
- printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n");
-
- return ret;
-}
-
-static void __exit
-padlock_fini(void)
-{
-#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
- padlock_fini_aes();
-#endif
-}
-
-module_init(padlock_init);
-module_exit(padlock_fini);
-
-MODULE_DESCRIPTION("VIA PadLock crypto engine support.");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Michal Ludvig");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
new file mode 100644
index 00000000000..a781fd23b60
--- /dev/null
+++ b/drivers/crypto/padlock-sha.c
@@ -0,0 +1,318 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for VIA PadLock hardware crypto engine.
+ *
+ * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/cryptohash.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include "padlock.h"
+
+#define SHA1_DEFAULT_FALLBACK "sha1-generic"
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_HMAC_BLOCK_SIZE 64
+
+#define SHA256_DEFAULT_FALLBACK "sha256-generic"
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_HMAC_BLOCK_SIZE 64
+
+struct padlock_sha_ctx {
+ char *data;
+ size_t used;
+ int bypass;
+ void (*f_sha_padlock)(const char *in, char *out, int count);
+ struct hash_desc fallback;
+};
+
+static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
+{
+ return crypto_tfm_ctx(tfm);
+}
+
+/* We'll need aligned address on the stack */
+#define NEAREST_ALIGNED(ptr) \
+ ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
+
+static struct crypto_alg sha1_alg, sha256_alg;
+
+static void padlock_sha_bypass(struct crypto_tfm *tfm)
+{
+ if (ctx(tfm)->bypass)
+ return;
+
+ crypto_hash_init(&ctx(tfm)->fallback);
+ if (ctx(tfm)->data && ctx(tfm)->used) {
+ struct scatterlist sg;
+
+ sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used);
+ crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
+ }
+
+ ctx(tfm)->used = 0;
+ ctx(tfm)->bypass = 1;
+}
+
+static void padlock_sha_init(struct crypto_tfm *tfm)
+{
+ ctx(tfm)->used = 0;
+ ctx(tfm)->bypass = 0;
+}
+
+static void padlock_sha_update(struct crypto_tfm *tfm,
+ const uint8_t *data, unsigned int length)
+{
+ /* Our buffer is always one page. */
+ if (unlikely(!ctx(tfm)->bypass &&
+ (ctx(tfm)->used + length > PAGE_SIZE)))
+ padlock_sha_bypass(tfm);
+
+ if (unlikely(ctx(tfm)->bypass)) {
+ struct scatterlist sg;
+ sg_set_buf(&sg, (uint8_t *)data, length);
+ crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
+ return;
+ }
+
+ memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
+ ctx(tfm)->used += length;
+}
+
+static inline void padlock_output_block(uint32_t *src,
+ uint32_t *dst, size_t count)
+{
+ while (count--)
+ *dst++ = swab32(*src++);
+}
+
+static void padlock_do_sha1(const char *in, char *out, int count)
+{
+ /* We can't store directly to *out as it may be unaligned. */
+ /* BTW Don't reduce the buffer size below 128 Bytes!
+ * PadLock microcode needs it that big. */
+ char buf[128+16];
+ char *result = NEAREST_ALIGNED(buf);
+
+ ((uint32_t *)result)[0] = 0x67452301;
+ ((uint32_t *)result)[1] = 0xEFCDAB89;
+ ((uint32_t *)result)[2] = 0x98BADCFE;
+ ((uint32_t *)result)[3] = 0x10325476;
+ ((uint32_t *)result)[4] = 0xC3D2E1F0;
+
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
+ : "+S"(in), "+D"(result)
+ : "c"(count), "a"(0));
+
+ padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
+}
+
+static void padlock_do_sha256(const char *in, char *out, int count)
+{
+ /* We can't store directly to *out as it may be unaligned. */
+ /* BTW Don't reduce the buffer size below 128 Bytes!
+ * PadLock microcode needs it that big. */
+ char buf[128+16];
+ char *result = NEAREST_ALIGNED(buf);
+
+ ((uint32_t *)result)[0] = 0x6A09E667;
+ ((uint32_t *)result)[1] = 0xBB67AE85;
+ ((uint32_t *)result)[2] = 0x3C6EF372;
+ ((uint32_t *)result)[3] = 0xA54FF53A;
+ ((uint32_t *)result)[4] = 0x510E527F;
+ ((uint32_t *)result)[5] = 0x9B05688C;
+ ((uint32_t *)result)[6] = 0x1F83D9AB;
+ ((uint32_t *)result)[7] = 0x5BE0CD19;
+
+ asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
+ : "+S"(in), "+D"(result)
+ : "c"(count), "a"(0));
+
+ padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
+}
+
+static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
+{
+ if (unlikely(ctx(tfm)->bypass)) {
+ crypto_hash_final(&ctx(tfm)->fallback, out);
+ ctx(tfm)->bypass = 0;
+ return;
+ }
+
+ /* Pass the input buffer to PadLock microcode... */
+ ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
+
+ ctx(tfm)->used = 0;
+}
+
+static int padlock_cra_init(struct crypto_tfm *tfm)
+{
+ const char *fallback_driver_name = tfm->__crt_alg->cra_name;
+ struct crypto_hash *fallback_tfm;
+
+ /* For now we'll allocate one page. This
+ * could eventually be configurable one day. */
+ ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
+ if (!ctx(tfm)->data)
+ return -ENOMEM;
+
+ /* Allocate a fallback and abort if it failed. */
+ fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback_tfm)) {
+ printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
+ fallback_driver_name);
+ free_page((unsigned long)(ctx(tfm)->data));
+ return PTR_ERR(fallback_tfm);
+ }
+
+ ctx(tfm)->fallback.tfm = fallback_tfm;
+ return 0;
+}
+
+static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ ctx(tfm)->f_sha_padlock = padlock_do_sha1;
+
+ return padlock_cra_init(tfm);
+}
+
+static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
+{
+ ctx(tfm)->f_sha_padlock = padlock_do_sha256;
+
+ return padlock_cra_init(tfm);
+}
+
+static void padlock_cra_exit(struct crypto_tfm *tfm)
+{
+ if (ctx(tfm)->data) {
+ free_page((unsigned long)(ctx(tfm)->data));
+ ctx(tfm)->data = NULL;
+ }
+
+ crypto_free_hash(ctx(tfm)->fallback.tfm);
+ ctx(tfm)->fallback.tfm = NULL;
+}
+
+static struct crypto_alg sha1_alg = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct padlock_sha_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
+ .cra_init = padlock_sha1_cra_init,
+ .cra_exit = padlock_cra_exit,
+ .cra_u = {
+ .digest = {
+ .dia_digestsize = SHA1_DIGEST_SIZE,
+ .dia_init = padlock_sha_init,
+ .dia_update = padlock_sha_update,
+ .dia_final = padlock_sha_final,
+ }
+ }
+};
+
+static struct crypto_alg sha256_alg = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-padlock",
+ .cra_priority = PADLOCK_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct padlock_sha_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
+ .cra_init = padlock_sha256_cra_init,
+ .cra_exit = padlock_cra_exit,
+ .cra_u = {
+ .digest = {
+ .dia_digestsize = SHA256_DIGEST_SIZE,
+ .dia_init = padlock_sha_init,
+ .dia_update = padlock_sha_update,
+ .dia_final = padlock_sha_final,
+ }
+ }
+};
+
+static void __init padlock_sha_check_fallbacks(void)
+{
+ if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK))
+ printk(KERN_WARNING PFX
+ "Couldn't load fallback module for sha1.\n");
+
+ if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK))
+ printk(KERN_WARNING PFX
+ "Couldn't load fallback module for sha256.\n");
+}
+
+static int __init padlock_init(void)
+{
+ int rc = -ENODEV;
+
+ if (!cpu_has_phe) {
+ printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
+ return -ENODEV;
+ }
+
+ if (!cpu_has_phe_enabled) {
+ printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
+ return -ENODEV;
+ }
+
+ padlock_sha_check_fallbacks();
+
+ rc = crypto_register_alg(&sha1_alg);
+ if (rc)
+ goto out;
+
+ rc = crypto_register_alg(&sha256_alg);
+ if (rc)
+ goto out_unreg1;
+
+ printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
+
+ return 0;
+
+out_unreg1:
+ crypto_unregister_alg(&sha1_alg);
+out:
+ printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
+ return rc;
+}
+
+static void __exit padlock_fini(void)
+{
+ crypto_unregister_alg(&sha1_alg);
+ crypto_unregister_alg(&sha256_alg);
+}
+
+module_init(padlock_init);
+module_exit(padlock_fini);
+
+MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Ludvig");
+
+MODULE_ALIAS("sha1-padlock");
+MODULE_ALIAS("sha256-padlock");
diff --git a/drivers/crypto/padlock.c b/drivers/crypto/padlock.c
new file mode 100644
index 00000000000..d6d7dd5bb98
--- /dev/null
+++ b/drivers/crypto/padlock.c
@@ -0,0 +1,58 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for VIA PadLock hardware crypto engine.
+ *
+ * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include "padlock.h"
+
+static int __init padlock_init(void)
+{
+ int success = 0;
+
+ if (crypto_has_cipher("aes-padlock", 0, 0))
+ success++;
+
+ if (crypto_has_hash("sha1-padlock", 0, 0))
+ success++;
+
+ if (crypto_has_hash("sha256-padlock", 0, 0))
+ success++;
+
+ if (!success) {
+ printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n");
+ return -ENODEV;
+ }
+
+ printk(KERN_NOTICE PFX "%d drivers are available.\n", success);
+
+ return 0;
+}
+
+static void __exit padlock_fini(void)
+{
+}
+
+module_init(padlock_init);
+module_exit(padlock_fini);
+
+MODULE_DESCRIPTION("Load all configured PadLock algorithms.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Ludvig");
+
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h
index b78489bc298..b728e4518bd 100644
--- a/drivers/crypto/padlock.h
+++ b/drivers/crypto/padlock.h
@@ -15,22 +15,9 @@
#define PADLOCK_ALIGNMENT 16
-/* Control word. */
-struct cword {
- unsigned int __attribute__ ((__packed__))
- rounds:4,
- algo:3,
- keygen:1,
- interm:1,
- encdec:1,
- ksize:2;
-} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
-
#define PFX "padlock: "
-#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
-int padlock_init_aes(void);
-void padlock_fini_aes(void);
-#endif
+#define PADLOCK_CRA_PRIORITY 300
+#define PADLOCK_COMPOSITE_PRIORITY 400
#endif /* _CRYPTO_PADLOCK_H */
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 53bb4359386..d658d910795 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -207,7 +207,8 @@ static int i2c_powermac_probe(struct device *dev)
struct pmac_i2c_bus *bus = dev->platform_data;
struct device_node *parent = NULL;
struct i2c_adapter *adapter;
- char name[32], *basename;
+ char name[32];
+ const char *basename;
int rc;
if (bus == NULL)
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index c12f1b71e93..41b74b13a00 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -376,6 +376,8 @@ static int proc_ide_read_media
break;
case ide_floppy:media = "floppy\n";
break;
+ case ide_optical:media = "optical\n";
+ break;
default: media = "UNKNOWN\n";
break;
}
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index a7c725f8bf6..f286079d233 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -425,12 +425,12 @@ static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_devi
return d->init_setup(dev, d);
}
-static const struct pci_device_id aec62xx_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF), 0 },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860), 1 },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R), 2 },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865), 3 },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R), 4 },
+static struct pci_device_id aec62xx_pci_tbl[] = {
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP850UF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP860R, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_ATP865R, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, aec62xx_pci_tbl);
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 351dab2fcac..d419e4bb54f 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -730,7 +730,7 @@ static unsigned int __devinit ata66_ali15x3 (ide_hwif_t *hwif)
if(m5229_revision <= 0x20)
tmpbyte = (tmpbyte & (~0x02)) | 0x01;
- else if (m5229_revision == 0xc7)
+ else if (m5229_revision == 0xc7 || m5229_revision == 0xc8)
tmpbyte |= 0x03;
else
tmpbyte |= 0x01;
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index 03677bff0d7..f063d954236 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -649,11 +649,11 @@ static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device
}
static struct pci_device_id svwks_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 1},
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 3},
- { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 4},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
+ { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, svwks_pci_tbl);
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index e125032bb40..d8a0d87df73 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -367,12 +367,13 @@ sgiioc4_INB(unsigned long port)
static void __devinit
ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
{
+ void __iomem *virt_dma_base;
int num_ports = sizeof (ioc4_dma_regs_t);
printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name,
dma_base, dma_base + num_ports - 1);
- if (!request_region(dma_base, num_ports, hwif->name)) {
+ if (!request_mem_region(dma_base, num_ports, hwif->name)) {
printk(KERN_ERR
"%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
"ALREADY in use\n",
@@ -381,13 +382,21 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
goto dma_alloc_failure;
}
- hwif->dma_base = dma_base;
+ virt_dma_base = ioremap(dma_base, num_ports);
+ if (virt_dma_base == NULL) {
+ printk(KERN_ERR
+ "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n",
+ __FUNCTION__, hwif->name, dma_base, dma_base + num_ports - 1);
+ goto dma_remap_failure;
+ }
+ hwif->dma_base = (unsigned long) virt_dma_base;
+
hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
&hwif->dmatable_dma);
if (!hwif->dmatable_cpu)
- goto dma_alloc_failure;
+ goto dma_pci_alloc_failure;
hwif->sg_max_nents = IOC4_PRD_ENTRIES;
@@ -411,6 +420,12 @@ dma_base2alloc_failure:
printk(KERN_INFO
"Changing from DMA to PIO mode for Drive %s\n", hwif->name);
+dma_pci_alloc_failure:
+ iounmap(virt_dma_base);
+
+dma_remap_failure:
+ release_mem_region(dma_base, num_ports);
+
dma_alloc_failure:
/* Disable DMA because we couldnot allocate any DMA maps */
hwif->autodma = 0;
@@ -607,18 +622,15 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq;
hwif->ide_dma_timeout = &__ide_dma_timeout;
- /*
- * The IOC4 uses MMIO rather than Port IO.
- * It also needs special workarounds for INB.
- */
- default_hwif_mmiops(hwif);
hwif->INB = &sgiioc4_INB;
}
static int __devinit
sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
{
- unsigned long base, ctl, dma_base, irqport;
+ unsigned long cmd_base, dma_base, irqport;
+ unsigned long bar0, cmd_phys_base, ctl;
+ void __iomem *virt_base;
ide_hwif_t *hwif;
int h;
@@ -636,23 +648,32 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
}
/* Get the CmdBlk and CtrlBlk Base Registers */
- base = pci_resource_start(dev, 0) + IOC4_CMD_OFFSET;
- ctl = pci_resource_start(dev, 0) + IOC4_CTRL_OFFSET;
- irqport = pci_resource_start(dev, 0) + IOC4_INTR_OFFSET;
+ bar0 = pci_resource_start(dev, 0);
+ virt_base = ioremap(bar0, pci_resource_len(dev, 0));
+ if (virt_base == NULL) {
+ printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n",
+ d->name, bar0);
+ return -ENOMEM;
+ }
+ cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET;
+ ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET;
+ irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET;
dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
- if (!request_region(base, IOC4_CMD_CTL_BLK_SIZE, hwif->name)) {
+ cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
+ if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
+ hwif->name)) {
printk(KERN_ERR
- "%s : %s -- ERROR, Port Addresses "
+ "%s : %s -- ERROR, Addresses "
"0x%p to 0x%p ALREADY in use\n",
- __FUNCTION__, hwif->name, (void *) base,
- (void *) base + IOC4_CMD_CTL_BLK_SIZE);
+ __FUNCTION__, hwif->name, (void *) cmd_phys_base,
+ (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
return -ENOMEM;
}
- if (hwif->io_ports[IDE_DATA_OFFSET] != base) {
+ if (hwif->io_ports[IDE_DATA_OFFSET] != cmd_base) {
/* Initialize the IO registers */
- sgiioc4_init_hwif_ports(&hwif->hw, base, ctl, irqport);
+ sgiioc4_init_hwif_ports(&hwif->hw, cmd_base, ctl, irqport);
memcpy(hwif->io_ports, hwif->hw.io_ports,
sizeof (hwif->io_ports));
hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
@@ -665,6 +686,9 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
hwif->cds = (struct ide_pci_device_s *) d;
hwif->gendev.parent = &dev->dev;/* setup proper ancestral information */
+ /* The IOC4 uses MMIO rather than Port IO. */
+ default_hwif_mmiops(hwif);
+
/* Initializing chipset IRQ Registers */
hwif->OUTL(0x03, irqport + IOC4_INTR_SET * 4);
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 25ceb4a39ed..20b392948f3 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -1082,10 +1082,10 @@ static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_devi
}
static struct pci_device_id siimage_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680), 0},
+ { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
#ifdef CONFIG_BLK_DEV_IDE_SATA
- { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_3112), 1},
- { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_1210SA), 2},
+ { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_1210SA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
#endif
{ 0, },
};
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 8a6c23ac8cc..f03196c5db3 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -86,6 +86,8 @@ static const struct {
u8 chipset_family;
u8 flags;
} SiSHostChipInfo[] = {
+ { "SiS968", PCI_DEVICE_ID_SI_968, ATA_133 },
+ { "SiS966", PCI_DEVICE_ID_SI_966, ATA_133 },
{ "SiS965", PCI_DEVICE_ID_SI_965, ATA_133 },
{ "SiS745", PCI_DEVICE_ID_SI_745, ATA_100 },
{ "SiS735", PCI_DEVICE_ID_SI_735, ATA_100 },
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index afdaee3c15c..9b7589e8e93 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -6,7 +6,7 @@
*
* vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
* vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
- * vt8235, vt8237
+ * vt8235, vt8237, vt8237a
*
* Copyright (c) 2000-2002 Vojtech Pavlik
*
@@ -81,6 +81,7 @@ static struct via_isa_bridge {
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
+ { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 },
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index ebf961f1718..996c694341b 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1154,7 +1154,7 @@ static int
pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
{
struct device_node *np = pmif->node;
- int *bidp;
+ const int *bidp;
pmif->cable_80 = 0;
pmif->broken_dma = pmif->broken_dma_warn = 0;
@@ -1176,14 +1176,14 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
pmif->broken_dma = 1;
}
- bidp = (int *)get_property(np, "AAPL,bus-id", NULL);
+ bidp = get_property(np, "AAPL,bus-id", NULL);
pmif->aapl_bus_id = bidp ? *bidp : 0;
/* Get cable type from device-tree */
if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6
|| pmif->kind == controller_k2_ata6
|| pmif->kind == controller_sh_ata6) {
- char* cable = get_property(np, "cable-type", NULL);
+ const char* cable = get_property(np, "cable-type", NULL);
if (cable && !strncmp(cable, "80-", 3))
pmif->cable_80 = 1;
}
@@ -1326,7 +1326,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
if (macio_irq_count(mdev) == 0) {
printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n",
i, mdev->ofdev.node->full_name);
- irq = 13;
+ irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 69a53d476b5..9edfacee7d8 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -14,7 +14,7 @@ config INFINIBAND_USER_MAD
---help---
Userspace InfiniBand Management Datagram (MAD) support. This
is the kernel side of the userspace MAD support, which allows
- userspace processes to send and receive MADs. You will also
+ userspace processes to send and receive MADs. You will also
need libibumad from <http://www.openib.org>.
config INFINIBAND_USER_ACCESS
@@ -36,6 +36,8 @@ config INFINIBAND_ADDR_TRANS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig"
+source "drivers/infiniband/hw/ehca/Kconfig"
+source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index c7ff58c1d0e..2b5d1098ef4 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,6 +1,8 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
-obj-$(CONFIG_IPATH_CORE) += hw/ipath/
+obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
+obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
+obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 68e73ec2d1f..163d991eb8c 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,7 +1,7 @@
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
- ib_cm.o $(infiniband-y)
+ ib_cm.o iw_cm.o $(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o
@@ -14,6 +14,8 @@ ib_sa-y := sa_query.o
ib_cm-y := cm.o
+iw_cm-y := iwcm.o
+
rdma_cm-y := cma.o
ib_addr-y := addr.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 1205e802782..9cbf09e2052 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -61,12 +61,15 @@ static LIST_HEAD(req_list);
static DECLARE_WORK(work, process_req, NULL);
static struct workqueue_struct *addr_wq;
-static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
- unsigned char *dst_dev_addr)
+int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
+ const unsigned char *dst_dev_addr)
{
switch (dev->type) {
case ARPHRD_INFINIBAND:
- dev_addr->dev_type = IB_NODE_CA;
+ dev_addr->dev_type = RDMA_NODE_IB_CA;
+ break;
+ case ARPHRD_ETHER:
+ dev_addr->dev_type = RDMA_NODE_RNIC;
break;
default:
return -EADDRNOTAVAIL;
@@ -78,6 +81,7 @@ static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
return 0;
}
+EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
{
@@ -89,7 +93,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
if (!dev)
return -EADDRNOTAVAIL;
- ret = copy_addr(dev_addr, dev, NULL);
+ ret = rdma_copy_addr(dev_addr, dev, NULL);
dev_put(dev);
return ret;
}
@@ -161,7 +165,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
/* If the device does ARP internally, return 'done' */
if (rt->idev->dev->flags & IFF_NOARP) {
- copy_addr(addr, rt->idev->dev, NULL);
+ rdma_copy_addr(addr, rt->idev->dev, NULL);
goto put;
}
@@ -181,7 +185,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
src_in->sin_addr.s_addr = rt->rt_src;
}
- ret = copy_addr(addr, neigh->dev, neigh->ha);
+ ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
release:
neigh_release(neigh);
put:
@@ -245,7 +249,7 @@ static int addr_resolve_local(struct sockaddr_in *src_in,
if (ZERONET(src_ip)) {
src_in->sin_family = dst_in->sin_family;
src_in->sin_addr.s_addr = dst_ip;
- ret = copy_addr(addr, dev, dev->dev_addr);
+ ret = rdma_copy_addr(addr, dev, dev->dev_addr);
} else if (LOOPBACK(src_ip)) {
ret = rdma_translate_ip((struct sockaddr *)dst_in, addr);
if (!ret)
@@ -327,10 +331,10 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
}
EXPORT_SYMBOL(rdma_addr_cancel);
-static int netevent_callback(struct notifier_block *self, unsigned long event,
+static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
{
- if (event == NETEVENT_NEIGH_UPDATE) {
+ if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
if (neigh->dev->type == ARPHRD_INFINIBAND &&
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 75313ade2e0..20e9f64e67a 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -62,12 +62,13 @@ struct ib_update_work {
static inline int start_port(struct ib_device *device)
{
- return device->node_type == IB_NODE_SWITCH ? 0 : 1;
+ return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
}
static inline int end_port(struct ib_device *device)
{
- return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;
+ return (device->node_type == RDMA_NODE_IB_SWITCH) ?
+ 0 : device->phys_port_cnt;
}
int ib_get_cached_gid(struct ib_device *device,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0de335b7bfc..f35fcc4c063 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
@@ -41,6 +41,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/random.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -73,6 +74,7 @@ static struct ib_cm {
struct rb_root remote_id_table;
struct rb_root remote_sidr_table;
struct idr local_id_table;
+ __be32 random_id_operand;
struct workqueue_struct *wq;
} cm;
@@ -177,7 +179,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
if (IS_ERR(ah))
return PTR_ERR(ah);
- m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
+ m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC);
@@ -299,15 +301,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
- int ret;
+ int ret, id;
static int next_id;
do {
spin_lock_irqsave(&cm.lock, flags);
- ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
- (__force int *) &cm_id_priv->id.local_id);
+ ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
+ next_id++, &id);
spin_unlock_irqrestore(&cm.lock, flags);
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
+
+ cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
return ret;
}
@@ -316,7 +320,8 @@ static void cm_free_id(__be32 local_id)
unsigned long flags;
spin_lock_irqsave(&cm.lock, flags);
- idr_remove(&cm.local_id_table, (__force int) local_id);
+ idr_remove(&cm.local_id_table,
+ (__force int) (local_id ^ cm.random_id_operand));
spin_unlock_irqrestore(&cm.lock, flags);
}
@@ -324,7 +329,8 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
- cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
+ cm_id_priv = idr_find(&cm.local_id_table,
+ (__force int) (local_id ^ cm.random_id_operand));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
@@ -679,6 +685,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{
int wait_time;
+ cm_cleanup_timewait(cm_id_priv->timewait_info);
+
/*
* The cm_id could be destroyed by the user before we exit timewait.
* To protect against this, we search for the cm_id after exiting
@@ -1354,7 +1362,7 @@ static int cm_req_handler(struct cm_work *work)
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
- goto error1;
+ goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
@@ -1363,7 +1371,8 @@ static int cm_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
ret = -EINVAL;
- goto error2;
+ kfree(cm_id_priv->timewait_info);
+ goto destroy;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -1373,12 +1382,22 @@ static int cm_req_handler(struct cm_work *work)
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
- if (ret)
- goto error3;
+ if (ret) {
+ ib_get_cached_gid(work->port->cm_dev->device,
+ work->port->port_num, 0, &work->path[0].sgid);
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ &work->path[0].sgid, sizeof work->path[0].sgid,
+ NULL, 0);
+ goto rejected;
+ }
if (req_msg->alt_local_lid) {
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
- if (ret)
- goto error3;
+ if (ret) {
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
+ &work->path[0].sgid,
+ sizeof work->path[0].sgid, NULL, 0);
+ goto rejected;
+ }
}
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms(
@@ -1400,12 +1419,11 @@ static int cm_req_handler(struct cm_work *work)
cm_deref_id(listen_cm_id_priv);
return 0;
-error3: atomic_dec(&cm_id_priv->refcount);
+rejected:
+ atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
-error2: kfree(cm_id_priv->timewait_info);
- cm_id_priv->timewait_info = NULL;
-error1: ib_destroy_cm_id(&cm_id_priv->id);
+destroy:
+ ib_destroy_cm_id(cm_id);
return ret;
}
@@ -2072,8 +2090,9 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
spin_unlock_irqrestore(&cm.lock, flags);
return NULL;
}
- cm_id_priv = idr_find(&cm.local_id_table,
- (__force int) timewait_info->work.local_id);
+ cm_id_priv = idr_find(&cm.local_id_table, (__force int)
+ (timewait_info->work.local_id ^
+ cm.random_id_operand));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
@@ -3125,7 +3144,8 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE;
if (cm_id_priv->responder_resources)
- qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_ATOMIC;
qp_attr->pkey_index = cm_id_priv->av.pkey_index;
qp_attr->port_num = cm_id_priv->av.port->port_num;
ret = 0;
@@ -3262,6 +3282,9 @@ static void cm_add_one(struct ib_device *device)
int ret;
u8 i;
+ if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ return;
+
cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
@@ -3349,6 +3372,7 @@ static int __init ib_cm_init(void)
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table);
+ get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
idr_pre_get(&cm.local_id_table, GFP_KERNEL);
cm.wq = create_workqueue("ib_cm");
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d6f99d5720f..1178bd434d1 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -35,6 +35,7 @@
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/idr.h>
+#include <linux/inetdevice.h>
#include <net/tcp.h>
@@ -43,13 +44,14 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_sa.h>
+#include <rdma/iw_cm.h>
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
-#define CMA_MAX_CM_RETRIES 3
+#define CMA_MAX_CM_RETRIES 15
static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device);
@@ -60,6 +62,7 @@ static struct ib_client cma_client = {
.remove = cma_remove_one
};
+static struct ib_sa_client sa_client;
static LIST_HEAD(dev_list);
static LIST_HEAD(listen_any_list);
static DEFINE_MUTEX(lock);
@@ -124,6 +127,7 @@ struct rdma_id_private {
int query_id;
union {
struct ib_cm_id *ib;
+ struct iw_cm_id *iw;
} cm_id;
u32 seq_num;
@@ -259,15 +263,24 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv)
id_priv->cma_dev = NULL;
}
-static int cma_acquire_ib_dev(struct rdma_id_private *id_priv)
+static int cma_acquire_dev(struct rdma_id_private *id_priv)
{
+ enum rdma_node_type dev_type = id_priv->id.route.addr.dev_addr.dev_type;
struct cma_device *cma_dev;
union ib_gid gid;
int ret = -ENODEV;
- ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid),
+ switch (rdma_node_get_transport(dev_type)) {
+ case RDMA_TRANSPORT_IB:
+ ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ break;
+ case RDMA_TRANSPORT_IWARP:
+ iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ break;
+ default:
+ return -ENODEV;
+ }
- mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list) {
ret = ib_find_cached_gid(cma_dev->device, &gid,
&id_priv->id.port_num, NULL);
@@ -276,20 +289,9 @@ static int cma_acquire_ib_dev(struct rdma_id_private *id_priv)
break;
}
}
- mutex_unlock(&lock);
return ret;
}
-static int cma_acquire_dev(struct rdma_id_private *id_priv)
-{
- switch (id_priv->id.route.addr.dev_addr.dev_type) {
- case IB_NODE_CA:
- return cma_acquire_ib_dev(id_priv);
- default:
- return -ENODEV;
- }
-}
-
static void cma_deref_id(struct rdma_id_private *id_priv)
{
if (atomic_dec_and_test(&id_priv->refcount))
@@ -347,6 +349,16 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
IB_QP_PKEY_INDEX | IB_QP_PORT);
}
+static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+
+ return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS);
+}
+
int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
@@ -362,10 +374,13 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
if (IS_ERR(qp))
return PTR_ERR(qp);
- switch (id->device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
ret = cma_init_ib_qp(id_priv, qp);
break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = cma_init_iw_qp(id_priv, qp);
+ break;
default:
ret = -ENOSYS;
break;
@@ -451,13 +466,17 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- switch (id_priv->id.device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
+ case RDMA_TRANSPORT_IB:
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
qp_attr_mask);
if (qp_attr->qp_state == IB_QPS_RTR)
qp_attr->rq_psn = id_priv->seq_num;
break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
+ qp_attr_mask);
+ break;
default:
ret = -ENOSYS;
break;
@@ -590,8 +609,8 @@ static int cma_notify_user(struct rdma_id_private *id_priv,
static void cma_cancel_route(struct rdma_id_private *id_priv)
{
- switch (id_priv->id.device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
+ case RDMA_TRANSPORT_IB:
if (id_priv->query)
ib_sa_cancel_query(id_priv->query_id, id_priv->query);
break;
@@ -611,11 +630,15 @@ static void cma_destroy_listen(struct rdma_id_private *id_priv)
cma_exch(id_priv, CMA_DESTROYING);
if (id_priv->cma_dev) {
- switch (id_priv->id.device->node_type) {
- case IB_NODE_CA:
- if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
+ switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
ib_destroy_cm_id(id_priv->cm_id.ib);
break;
+ case RDMA_TRANSPORT_IWARP:
+ if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
+ iw_destroy_cm_id(id_priv->cm_id.iw);
+ break;
default:
break;
}
@@ -689,19 +712,25 @@ void rdma_destroy_id(struct rdma_cm_id *id)
state = cma_exch(id_priv, CMA_DESTROYING);
cma_cancel_operation(id_priv, state);
+ mutex_lock(&lock);
if (id_priv->cma_dev) {
- switch (id->device->node_type) {
- case IB_NODE_CA:
- if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
+ mutex_unlock(&lock);
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
ib_destroy_cm_id(id_priv->cm_id.ib);
break;
+ case RDMA_TRANSPORT_IWARP:
+ if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
+ iw_destroy_cm_id(id_priv->cm_id.iw);
+ break;
default:
break;
}
- mutex_lock(&lock);
+ mutex_lock(&lock);
cma_detach_from_dev(id_priv);
- mutex_unlock(&lock);
}
+ mutex_unlock(&lock);
cma_release_port(id_priv);
cma_deref_id(id_priv);
@@ -869,7 +898,7 @@ static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
- rt->addr.dev_addr.dev_type = IB_NODE_CA;
+ rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT;
@@ -898,7 +927,9 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
}
atomic_inc(&conn_id->dev_remove);
- ret = cma_acquire_ib_dev(conn_id);
+ mutex_lock(&lock);
+ ret = cma_acquire_dev(conn_id);
+ mutex_unlock(&lock);
if (ret) {
ret = -ENODEV;
cma_release_remove(conn_id);
@@ -982,6 +1013,130 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
}
}
+static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
+{
+ struct rdma_id_private *id_priv = iw_id->context;
+ enum rdma_cm_event_type event = 0;
+ struct sockaddr_in *sin;
+ int ret = 0;
+
+ atomic_inc(&id_priv->dev_remove);
+
+ switch (iw_event->event) {
+ case IW_CM_EVENT_CLOSE:
+ event = RDMA_CM_EVENT_DISCONNECTED;
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
+ *sin = iw_event->local_addr;
+ sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
+ *sin = iw_event->remote_addr;
+ if (iw_event->status)
+ event = RDMA_CM_EVENT_REJECTED;
+ else
+ event = RDMA_CM_EVENT_ESTABLISHED;
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ event = RDMA_CM_EVENT_ESTABLISHED;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ ret = cma_notify_user(id_priv, event, iw_event->status,
+ iw_event->private_data,
+ iw_event->private_data_len);
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ id_priv->cm_id.iw = NULL;
+ cma_exch(id_priv, CMA_DESTROYING);
+ cma_release_remove(id_priv);
+ rdma_destroy_id(&id_priv->id);
+ return ret;
+ }
+
+ cma_release_remove(id_priv);
+ return ret;
+}
+
+static int iw_conn_req_handler(struct iw_cm_id *cm_id,
+ struct iw_cm_event *iw_event)
+{
+ struct rdma_cm_id *new_cm_id;
+ struct rdma_id_private *listen_id, *conn_id;
+ struct sockaddr_in *sin;
+ struct net_device *dev = NULL;
+ int ret;
+
+ listen_id = cm_id->context;
+ atomic_inc(&listen_id->dev_remove);
+ if (!cma_comp(listen_id, CMA_LISTEN)) {
+ ret = -ECONNABORTED;
+ goto out;
+ }
+
+ /* Create a new RDMA id for the new IW CM ID */
+ new_cm_id = rdma_create_id(listen_id->id.event_handler,
+ listen_id->id.context,
+ RDMA_PS_TCP);
+ if (!new_cm_id) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ conn_id = container_of(new_cm_id, struct rdma_id_private, id);
+ atomic_inc(&conn_id->dev_remove);
+ conn_id->state = CMA_CONNECT;
+
+ dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
+ if (!dev) {
+ ret = -EADDRNOTAVAIL;
+ cma_release_remove(conn_id);
+ rdma_destroy_id(new_cm_id);
+ goto out;
+ }
+ ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
+ if (ret) {
+ cma_release_remove(conn_id);
+ rdma_destroy_id(new_cm_id);
+ goto out;
+ }
+
+ mutex_lock(&lock);
+ ret = cma_acquire_dev(conn_id);
+ mutex_unlock(&lock);
+ if (ret) {
+ cma_release_remove(conn_id);
+ rdma_destroy_id(new_cm_id);
+ goto out;
+ }
+
+ conn_id->cm_id.iw = cm_id;
+ cm_id->context = conn_id;
+ cm_id->cm_handler = cma_iw_handler;
+
+ sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
+ *sin = iw_event->local_addr;
+ sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
+ *sin = iw_event->remote_addr;
+
+ ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
+ iw_event->private_data,
+ iw_event->private_data_len);
+ if (ret) {
+ /* User wants to destroy the CM ID */
+ conn_id->cm_id.iw = NULL;
+ cma_exch(conn_id, CMA_DESTROYING);
+ cma_release_remove(conn_id);
+ rdma_destroy_id(&conn_id->id);
+ }
+
+out:
+ if (dev)
+ dev_put(dev);
+ cma_release_remove(listen_id);
+ return ret;
+}
+
static int cma_ib_listen(struct rdma_id_private *id_priv)
{
struct ib_cm_compare_data compare_data;
@@ -1011,6 +1166,30 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
return ret;
}
+static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
+{
+ int ret;
+ struct sockaddr_in *sin;
+
+ id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
+ iw_conn_req_handler,
+ id_priv);
+ if (IS_ERR(id_priv->cm_id.iw))
+ return PTR_ERR(id_priv->cm_id.iw);
+
+ sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
+ id_priv->cm_id.iw->local_addr = *sin;
+
+ ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
+
+ if (ret) {
+ iw_destroy_cm_id(id_priv->cm_id.iw);
+ id_priv->cm_id.iw = NULL;
+ }
+
+ return ret;
+}
+
static int cma_listen_handler(struct rdma_cm_id *id,
struct rdma_cm_event *event)
{
@@ -1087,12 +1266,17 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
id_priv->backlog = backlog;
if (id->device) {
- switch (id->device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
ret = cma_ib_listen(id_priv);
if (ret)
goto err;
break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = cma_iw_listen(id_priv, backlog);
+ if (ret)
+ goto err;
+ break;
default:
ret = -ENOSYS;
goto err;
@@ -1140,7 +1324,7 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
path_rec.numb_path = 1;
- id_priv->query_id = ib_sa_path_rec_get(id_priv->id.device,
+ id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
id_priv->id.port_num, &path_rec,
IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH,
@@ -1231,6 +1415,23 @@ err:
}
EXPORT_SYMBOL(rdma_set_ib_paths);
+static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
+{
+ struct cma_work *work;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler, work);
+ work->old_state = CMA_ROUTE_QUERY;
+ work->new_state = CMA_ROUTE_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ queue_work(cma_wq, &work->work);
+ return 0;
+}
+
int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
{
struct rdma_id_private *id_priv;
@@ -1241,10 +1442,13 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
return -EINVAL;
atomic_inc(&id_priv->refcount);
- switch (id->device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
ret = cma_resolve_ib_route(id_priv, timeout_ms);
break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = cma_resolve_iw_route(id_priv, timeout_ms);
+ break;
default:
ret = -ENOSYS;
break;
@@ -1309,16 +1513,26 @@ static void addr_handler(int status, struct sockaddr *src_addr,
enum rdma_cm_event_type event;
atomic_inc(&id_priv->dev_remove);
- if (!id_priv->cma_dev && !status)
+
+ /*
+ * Grab mutex to block rdma_destroy_id() from removing the device while
+ * we're trying to acquire it.
+ */
+ mutex_lock(&lock);
+ if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
+ mutex_unlock(&lock);
+ goto out;
+ }
+
+ if (!status && !id_priv->cma_dev)
status = cma_acquire_dev(id_priv);
+ mutex_unlock(&lock);
if (status) {
- if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND))
+ if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
goto out;
event = RDMA_CM_EVENT_ADDR_ERROR;
} else {
- if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
- goto out;
memcpy(&id_priv->id.route.addr.src_addr, src_addr,
ip_addr_size(src_addr));
event = RDMA_CM_EVENT_ADDR_RESOLVED;
@@ -1492,7 +1706,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
if (cma_any_addr(&cur_id->id.route.addr.src_addr))
return -EADDRNOTAVAIL;
-
+
cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
return -EADDRINUSE;
@@ -1542,8 +1756,11 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (!cma_any_addr(addr)) {
ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
- if (!ret)
+ if (!ret) {
+ mutex_lock(&lock);
ret = cma_acquire_dev(id_priv);
+ mutex_unlock(&lock);
+ }
if (ret)
goto err;
}
@@ -1649,6 +1866,47 @@ out:
return ret;
}
+static int cma_connect_iw(struct rdma_id_private *id_priv,
+ struct rdma_conn_param *conn_param)
+{
+ struct iw_cm_id *cm_id;
+ struct sockaddr_in* sin;
+ int ret;
+ struct iw_cm_conn_param iw_param;
+
+ cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
+ goto out;
+ }
+
+ id_priv->cm_id.iw = cm_id;
+
+ sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
+ cm_id->local_addr = *sin;
+
+ sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
+ cm_id->remote_addr = *sin;
+
+ ret = cma_modify_qp_rtr(&id_priv->id);
+ if (ret) {
+ iw_destroy_cm_id(cm_id);
+ return ret;
+ }
+
+ iw_param.ord = conn_param->initiator_depth;
+ iw_param.ird = conn_param->responder_resources;
+ iw_param.private_data = conn_param->private_data;
+ iw_param.private_data_len = conn_param->private_data_len;
+ if (id_priv->id.qp)
+ iw_param.qpn = id_priv->qp_num;
+ else
+ iw_param.qpn = conn_param->qp_num;
+ ret = iw_cm_connect(cm_id, &iw_param);
+out:
+ return ret;
+}
+
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv;
@@ -1664,10 +1922,13 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
id_priv->srq = conn_param->srq;
}
- switch (id->device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
ret = cma_connect_ib(id_priv, conn_param);
break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = cma_connect_iw(id_priv, conn_param);
+ break;
default:
ret = -ENOSYS;
break;
@@ -1708,6 +1969,28 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
}
+static int cma_accept_iw(struct rdma_id_private *id_priv,
+ struct rdma_conn_param *conn_param)
+{
+ struct iw_cm_conn_param iw_param;
+ int ret;
+
+ ret = cma_modify_qp_rtr(&id_priv->id);
+ if (ret)
+ return ret;
+
+ iw_param.ord = conn_param->initiator_depth;
+ iw_param.ird = conn_param->responder_resources;
+ iw_param.private_data = conn_param->private_data;
+ iw_param.private_data_len = conn_param->private_data_len;
+ if (id_priv->id.qp) {
+ iw_param.qpn = id_priv->qp_num;
+ } else
+ iw_param.qpn = conn_param->qp_num;
+
+ return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
+}
+
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv;
@@ -1723,13 +2006,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
id_priv->srq = conn_param->srq;
}
- switch (id->device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
if (conn_param)
ret = cma_accept_ib(id_priv, conn_param);
else
ret = cma_rep_recv(id_priv);
break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = cma_accept_iw(id_priv, conn_param);
+ break;
default:
ret = -ENOSYS;
break;
@@ -1756,12 +2042,16 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
if (!cma_comp(id_priv, CMA_CONNECT))
return -EINVAL;
- switch (id->device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
ret = ib_send_cm_rej(id_priv->cm_id.ib,
IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
private_data, private_data_len);
break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = iw_cm_reject(id_priv->cm_id.iw,
+ private_data, private_data_len);
+ break;
default:
ret = -ENOSYS;
break;
@@ -1780,17 +2070,20 @@ int rdma_disconnect(struct rdma_cm_id *id)
!cma_comp(id_priv, CMA_DISCONNECT))
return -EINVAL;
- ret = cma_modify_qp_err(id);
- if (ret)
- goto out;
-
- switch (id->device->node_type) {
- case IB_NODE_CA:
+ switch (rdma_node_get_transport(id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ ret = cma_modify_qp_err(id);
+ if (ret)
+ goto out;
/* Initiate or respond to a disconnect. */
if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
break;
+ case RDMA_TRANSPORT_IWARP:
+ ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
+ break;
default:
+ ret = -EINVAL;
break;
}
out:
@@ -1907,12 +2200,15 @@ static int cma_init(void)
if (!cma_wq)
return -ENOMEM;
+ ib_sa_register_client(&sa_client);
+
ret = ib_register_client(&cma_client);
if (ret)
goto err;
return 0;
err:
+ ib_sa_unregister_client(&sa_client);
destroy_workqueue(cma_wq);
return ret;
}
@@ -1920,6 +2216,7 @@ err:
static void cma_cleanup(void)
{
ib_unregister_client(&cma_client);
+ ib_sa_unregister_client(&sa_client);
destroy_workqueue(cma_wq);
idr_destroy(&sdp_ps);
idr_destroy(&tcp_ps);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index b2f3cb91d9b..63d2a39fb82 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -385,7 +385,7 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
EXPORT_SYMBOL(ib_get_client_data);
/**
- * ib_set_client_data - Get IB client context
+ * ib_set_client_data - Set IB client context
* @device:Device to set context for
* @client:Client to set context for
* @data:Context to set
@@ -505,7 +505,7 @@ int ib_query_port(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr)
{
- if (device->node_type == IB_NODE_SWITCH) {
+ if (device->node_type == RDMA_NODE_IB_SWITCH) {
if (port_num)
return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt)
@@ -580,7 +580,7 @@ int ib_modify_port(struct ib_device *device,
u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
- if (device->node_type == IB_NODE_SWITCH) {
+ if (device->node_type == RDMA_NODE_IB_SWITCH) {
if (port_num)
return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt)
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
new file mode 100644
index 00000000000..c3fb304a4e8
--- /dev/null
+++ b/drivers/infiniband/core/iwcm.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#include <rdma/iw_cm.h>
+#include <rdma/ib_addr.h>
+
+#include "iwcm.h"
+
+MODULE_AUTHOR("Tom Tucker");
+MODULE_DESCRIPTION("iWARP CM");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct workqueue_struct *iwcm_wq;
+struct iwcm_work {
+ struct work_struct work;
+ struct iwcm_id_private *cm_id;
+ struct list_head list;
+ struct iw_cm_event event;
+ struct list_head free_list;
+};
+
+/*
+ * The following services provide a mechanism for pre-allocating iwcm_work
+ * elements. The design pre-allocates them based on the cm_id type:
+ * LISTENING IDS: Get enough elements preallocated to handle the
+ * listen backlog.
+ * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
+ * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
+ *
+ * Allocating them in connect and listen avoids having to deal
+ * with allocation failures on the event upcall from the provider (which
+ * is called in the interrupt context).
+ *
+ * One exception is when creating the cm_id for incoming connection requests.
+ * There are two cases:
+ * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
+ * the backlog is exceeded, then no more connection request events will
+ * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
+ * to the provider to reject the connectino request.
+ * 2) in the connection request workqueue handler, cm_conn_req_handler().
+ * If work elements cannot be allocated for the new connect request cm_id,
+ * then IWCM will call the provider reject method. This is ok since
+ * cm_conn_req_handler() runs in the workqueue thread context.
+ */
+
+static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
+{
+ struct iwcm_work *work;
+
+ if (list_empty(&cm_id_priv->work_free_list))
+ return NULL;
+ work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
+ free_list);
+ list_del_init(&work->free_list);
+ return work;
+}
+
+static void put_work(struct iwcm_work *work)
+{
+ list_add(&work->free_list, &work->cm_id->work_free_list);
+}
+
+static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
+{
+ struct list_head *e, *tmp;
+
+ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
+ kfree(list_entry(e, struct iwcm_work, free_list));
+}
+
+static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
+{
+ struct iwcm_work *work;
+
+ BUG_ON(!list_empty(&cm_id_priv->work_free_list));
+ while (count--) {
+ work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
+ if (!work) {
+ dealloc_work_entries(cm_id_priv);
+ return -ENOMEM;
+ }
+ work->cm_id = cm_id_priv;
+ INIT_LIST_HEAD(&work->list);
+ put_work(work);
+ }
+ return 0;
+}
+
+/*
+ * Save private data from incoming connection requests in the
+ * cm_id_priv so the low level driver doesn't have to. Adjust
+ * the event ptr to point to the local copy.
+ */
+static int copy_private_data(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *event)
+{
+ void *p;
+
+ p = kmalloc(event->private_data_len, GFP_ATOMIC);
+ if (!p)
+ return -ENOMEM;
+ memcpy(p, event->private_data, event->private_data_len);
+ event->private_data = p;
+ return 0;
+}
+
+/*
+ * Release a reference on cm_id. If the last reference is being removed
+ * and iw_destroy_cm_id is waiting, wake up the waiting thread.
+ */
+static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
+{
+ int ret = 0;
+
+ BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
+ if (atomic_dec_and_test(&cm_id_priv->refcount)) {
+ BUG_ON(!list_empty(&cm_id_priv->work_list));
+ if (waitqueue_active(&cm_id_priv->destroy_comp.wait)) {
+ BUG_ON(cm_id_priv->state != IW_CM_STATE_DESTROYING);
+ BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY,
+ &cm_id_priv->flags));
+ ret = 1;
+ }
+ complete(&cm_id_priv->destroy_comp);
+ }
+
+ return ret;
+}
+
+static void add_ref(struct iw_cm_id *cm_id)
+{
+ struct iwcm_id_private *cm_id_priv;
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ atomic_inc(&cm_id_priv->refcount);
+}
+
+static void rem_ref(struct iw_cm_id *cm_id)
+{
+ struct iwcm_id_private *cm_id_priv;
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ iwcm_deref_id(cm_id_priv);
+}
+
+static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
+
+struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
+ iw_cm_handler cm_handler,
+ void *context)
+{
+ struct iwcm_id_private *cm_id_priv;
+
+ cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
+ if (!cm_id_priv)
+ return ERR_PTR(-ENOMEM);
+
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ cm_id_priv->id.device = device;
+ cm_id_priv->id.cm_handler = cm_handler;
+ cm_id_priv->id.context = context;
+ cm_id_priv->id.event_handler = cm_event_handler;
+ cm_id_priv->id.add_ref = add_ref;
+ cm_id_priv->id.rem_ref = rem_ref;
+ spin_lock_init(&cm_id_priv->lock);
+ atomic_set(&cm_id_priv->refcount, 1);
+ init_waitqueue_head(&cm_id_priv->connect_wait);
+ init_completion(&cm_id_priv->destroy_comp);
+ INIT_LIST_HEAD(&cm_id_priv->work_list);
+ INIT_LIST_HEAD(&cm_id_priv->work_free_list);
+
+ return &cm_id_priv->id;
+}
+EXPORT_SYMBOL(iw_create_cm_id);
+
+
+static int iwcm_modify_qp_err(struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+
+ if (!qp)
+ return -EINVAL;
+
+ qp_attr.qp_state = IB_QPS_ERR;
+ return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+}
+
+/*
+ * This is really the RDMAC CLOSING state. It is most similar to the
+ * IB SQD QP state.
+ */
+static int iwcm_modify_qp_sqd(struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+
+ BUG_ON(qp == NULL);
+ qp_attr.qp_state = IB_QPS_SQD;
+ return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+}
+
+/*
+ * CM_ID <-- CLOSING
+ *
+ * Block if a passive or active connection is currenlty being processed. Then
+ * process the event as follows:
+ * - If we are ESTABLISHED, move to CLOSING and modify the QP state
+ * based on the abrupt flag
+ * - If the connection is already in the CLOSING or IDLE state, the peer is
+ * disconnecting concurrently with us and we've already seen the
+ * DISCONNECT event -- ignore the request and return 0
+ * - Disconnect on a listening endpoint returns -EINVAL
+ */
+int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
+{
+ struct iwcm_id_private *cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+ struct ib_qp *qp = NULL;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ /* Wait if we're currently in a connect or accept downcall */
+ wait_event(cm_id_priv->connect_wait,
+ !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ switch (cm_id_priv->state) {
+ case IW_CM_STATE_ESTABLISHED:
+ cm_id_priv->state = IW_CM_STATE_CLOSING;
+
+ /* QP could be <nul> for user-mode client */
+ if (cm_id_priv->qp)
+ qp = cm_id_priv->qp;
+ else
+ ret = -EINVAL;
+ break;
+ case IW_CM_STATE_LISTEN:
+ ret = -EINVAL;
+ break;
+ case IW_CM_STATE_CLOSING:
+ /* remote peer closed first */
+ case IW_CM_STATE_IDLE:
+ /* accept or connect returned !0 */
+ break;
+ case IW_CM_STATE_CONN_RECV:
+ /*
+ * App called disconnect before/without calling accept after
+ * connect_request event delivered.
+ */
+ break;
+ case IW_CM_STATE_CONN_SENT:
+ /* Can only get here if wait above fails */
+ default:
+ BUG();
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ if (qp) {
+ if (abrupt)
+ ret = iwcm_modify_qp_err(qp);
+ else
+ ret = iwcm_modify_qp_sqd(qp);
+
+ /*
+ * If both sides are disconnecting the QP could
+ * already be in ERR or SQD states
+ */
+ ret = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_disconnect);
+
+/*
+ * CM_ID <-- DESTROYING
+ *
+ * Clean up all resources associated with the connection and release
+ * the initial reference taken by iw_create_cm_id.
+ */
+static void destroy_cm_id(struct iw_cm_id *cm_id)
+{
+ struct iwcm_id_private *cm_id_priv;
+ unsigned long flags;
+ int ret;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ /*
+ * Wait if we're currently in a connect or accept downcall. A
+ * listening endpoint should never block here.
+ */
+ wait_event(cm_id_priv->connect_wait,
+ !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ switch (cm_id_priv->state) {
+ case IW_CM_STATE_LISTEN:
+ cm_id_priv->state = IW_CM_STATE_DESTROYING;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ /* destroy the listening endpoint */
+ ret = cm_id->device->iwcm->destroy_listen(cm_id);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ break;
+ case IW_CM_STATE_ESTABLISHED:
+ cm_id_priv->state = IW_CM_STATE_DESTROYING;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ /* Abrupt close of the connection */
+ (void)iwcm_modify_qp_err(cm_id_priv->qp);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ break;
+ case IW_CM_STATE_IDLE:
+ case IW_CM_STATE_CLOSING:
+ cm_id_priv->state = IW_CM_STATE_DESTROYING;
+ break;
+ case IW_CM_STATE_CONN_RECV:
+ /*
+ * App called destroy before/without calling accept after
+ * receiving connection request event notification.
+ */
+ cm_id_priv->state = IW_CM_STATE_DESTROYING;
+ break;
+ case IW_CM_STATE_CONN_SENT:
+ case IW_CM_STATE_DESTROYING:
+ default:
+ BUG();
+ break;
+ }
+ if (cm_id_priv->qp) {
+ cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
+ cm_id_priv->qp = NULL;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ (void)iwcm_deref_id(cm_id_priv);
+}
+
+/*
+ * This function is only called by the application thread and cannot
+ * be called by the event thread. The function will wait for all
+ * references to be released on the cm_id and then kfree the cm_id
+ * object.
+ */
+void iw_destroy_cm_id(struct iw_cm_id *cm_id)
+{
+ struct iwcm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
+
+ destroy_cm_id(cm_id);
+
+ wait_for_completion(&cm_id_priv->destroy_comp);
+
+ dealloc_work_entries(cm_id_priv);
+
+ kfree(cm_id_priv);
+}
+EXPORT_SYMBOL(iw_destroy_cm_id);
+
+/*
+ * CM_ID <-- LISTEN
+ *
+ * Start listening for connect requests. Generates one CONNECT_REQUEST
+ * event for each inbound connect request.
+ */
+int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct iwcm_id_private *cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ ret = alloc_work_entries(cm_id_priv, backlog);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ switch (cm_id_priv->state) {
+ case IW_CM_STATE_IDLE:
+ cm_id_priv->state = IW_CM_STATE_LISTEN;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
+ if (ret)
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_listen);
+
+/*
+ * CM_ID <-- IDLE
+ *
+ * Rejects an inbound connection request. No events are generated.
+ */
+int iw_cm_reject(struct iw_cm_id *cm_id,
+ const void *private_data,
+ u8 private_data_len)
+{
+ struct iwcm_id_private *cm_id_priv;
+ unsigned long flags;
+ int ret;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
+ return -EINVAL;
+ }
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ ret = cm_id->device->iwcm->reject(cm_id, private_data,
+ private_data_len);
+
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_reject);
+
+/*
+ * CM_ID <-- ESTABLISHED
+ *
+ * Accepts an inbound connection request and generates an ESTABLISHED
+ * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
+ * until the ESTABLISHED event is received from the provider.
+ */
+int iw_cm_accept(struct iw_cm_id *cm_id,
+ struct iw_cm_conn_param *iw_param)
+{
+ struct iwcm_id_private *cm_id_priv;
+ struct ib_qp *qp;
+ unsigned long flags;
+ int ret;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
+ return -EINVAL;
+ }
+ /* Get the ib_qp given the QPN */
+ qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
+ if (!qp) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return -EINVAL;
+ }
+ cm_id->device->iwcm->add_ref(qp);
+ cm_id_priv->qp = qp;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ ret = cm_id->device->iwcm->accept(cm_id, iw_param);
+ if (ret) {
+ /* An error on accept precludes provider events */
+ BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->qp) {
+ cm_id->device->iwcm->rem_ref(qp);
+ cm_id_priv->qp = NULL;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_accept);
+
+/*
+ * Active Side: CM_ID <-- CONN_SENT
+ *
+ * If successful, results in the generation of a CONNECT_REPLY
+ * event. iw_cm_disconnect and iw_cm_destroy will block until the
+ * CONNECT_REPLY event is received from the provider.
+ */
+int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
+{
+ struct iwcm_id_private *cm_id_priv;
+ int ret = 0;
+ unsigned long flags;
+ struct ib_qp *qp;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ ret = alloc_work_entries(cm_id_priv, 4);
+ if (ret)
+ return ret;
+
+ set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+
+ if (cm_id_priv->state != IW_CM_STATE_IDLE) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
+ return -EINVAL;
+ }
+
+ /* Get the ib_qp given the QPN */
+ qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
+ if (!qp) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return -EINVAL;
+ }
+ cm_id->device->iwcm->add_ref(qp);
+ cm_id_priv->qp = qp;
+ cm_id_priv->state = IW_CM_STATE_CONN_SENT;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ ret = cm_id->device->iwcm->connect(cm_id, iw_param);
+ if (ret) {
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->qp) {
+ cm_id->device->iwcm->rem_ref(qp);
+ cm_id_priv->qp = NULL;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_connect);
+
+/*
+ * Passive Side: new CM_ID <-- CONN_RECV
+ *
+ * Handles an inbound connect request. The function creates a new
+ * iw_cm_id to represent the new connection and inherits the client
+ * callback function and other attributes from the listening parent.
+ *
+ * The work item contains a pointer to the listen_cm_id and the event. The
+ * listen_cm_id contains the client cm_handler, context and
+ * device. These are copied when the device is cloned. The event
+ * contains the new four tuple.
+ *
+ * An error on the child should not affect the parent, so this
+ * function does not return a value.
+ */
+static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+ struct iw_cm_id *cm_id;
+ struct iwcm_id_private *cm_id_priv;
+ int ret;
+
+ /*
+ * The provider should never generate a connection request
+ * event with a bad status.
+ */
+ BUG_ON(iw_event->status);
+
+ /*
+ * We could be destroying the listening id. If so, ignore this
+ * upcall.
+ */
+ spin_lock_irqsave(&listen_id_priv->lock, flags);
+ if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
+ spin_unlock_irqrestore(&listen_id_priv->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&listen_id_priv->lock, flags);
+
+ cm_id = iw_create_cm_id(listen_id_priv->id.device,
+ listen_id_priv->id.cm_handler,
+ listen_id_priv->id.context);
+ /* If the cm_id could not be created, ignore the request */
+ if (IS_ERR(cm_id))
+ return;
+
+ cm_id->provider_data = iw_event->provider_data;
+ cm_id->local_addr = iw_event->local_addr;
+ cm_id->remote_addr = iw_event->remote_addr;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ cm_id_priv->state = IW_CM_STATE_CONN_RECV;
+
+ ret = alloc_work_entries(cm_id_priv, 3);
+ if (ret) {
+ iw_cm_reject(cm_id, NULL, 0);
+ iw_destroy_cm_id(cm_id);
+ return;
+ }
+
+ /* Call the client CM handler */
+ ret = cm_id->cm_handler(cm_id, iw_event);
+ if (ret) {
+ set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+ destroy_cm_id(cm_id);
+ if (atomic_read(&cm_id_priv->refcount)==0)
+ kfree(cm_id);
+ }
+
+ if (iw_event->private_data_len)
+ kfree(iw_event->private_data);
+}
+
+/*
+ * Passive Side: CM_ID <-- ESTABLISHED
+ *
+ * The provider generated an ESTABLISHED event which means that
+ * the MPA negotion has completed successfully and we are now in MPA
+ * FPDU mode.
+ *
+ * This event can only be received in the CONN_RECV state. If the
+ * remote peer closed, the ESTABLISHED event would be received followed
+ * by the CLOSE event. If the app closes, it will block until we wake
+ * it up after processing this event.
+ */
+static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+
+ /*
+ * We clear the CONNECT_WAIT bit here to allow the callback
+ * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
+ * from a callback handler is not allowed.
+ */
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
+ cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+ wake_up_all(&cm_id_priv->connect_wait);
+
+ return ret;
+}
+
+/*
+ * Active Side: CM_ID <-- ESTABLISHED
+ *
+ * The app has called connect and is waiting for the established event to
+ * post it's requests to the server. This event will wake up anyone
+ * blocked in iw_cm_disconnect or iw_destroy_id.
+ */
+static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ /*
+ * Clear the connect wait bit so a callback function calling
+ * iw_cm_disconnect will not wait and deadlock this thread
+ */
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
+ if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
+ cm_id_priv->id.local_addr = iw_event->local_addr;
+ cm_id_priv->id.remote_addr = iw_event->remote_addr;
+ cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
+ } else {
+ /* REJECTED or RESET */
+ cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
+ cm_id_priv->qp = NULL;
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+
+ if (iw_event->private_data_len)
+ kfree(iw_event->private_data);
+
+ /* Wake up waiters on connect complete */
+ wake_up_all(&cm_id_priv->connect_wait);
+
+ return ret;
+}
+
+/*
+ * CM_ID <-- CLOSING
+ *
+ * If in the ESTABLISHED state, move to CLOSING.
+ */
+static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
+ cm_id_priv->state = IW_CM_STATE_CLOSING;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+}
+
+/*
+ * CM_ID <-- IDLE
+ *
+ * If in the ESTBLISHED or CLOSING states, the QP will have have been
+ * moved by the provider to the ERR state. Disassociate the CM_ID from
+ * the QP, move to IDLE, and remove the 'connected' reference.
+ *
+ * If in some other state, the cm_id was destroyed asynchronously.
+ * This is the last reference that will result in waking up
+ * the app thread blocked in iw_destroy_cm_id.
+ */
+static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ unsigned long flags;
+ int ret = 0;
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+
+ if (cm_id_priv->qp) {
+ cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
+ cm_id_priv->qp = NULL;
+ }
+ switch (cm_id_priv->state) {
+ case IW_CM_STATE_ESTABLISHED:
+ case IW_CM_STATE_CLOSING:
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ break;
+ case IW_CM_STATE_DESTROYING:
+ break;
+ default:
+ BUG();
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ return ret;
+}
+
+static int process_event(struct iwcm_id_private *cm_id_priv,
+ struct iw_cm_event *iw_event)
+{
+ int ret = 0;
+
+ switch (iw_event->event) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ cm_conn_req_handler(cm_id_priv, iw_event);
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ ret = cm_conn_rep_handler(cm_id_priv, iw_event);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ ret = cm_conn_est_handler(cm_id_priv, iw_event);
+ break;
+ case IW_CM_EVENT_DISCONNECT:
+ cm_disconnect_handler(cm_id_priv, iw_event);
+ break;
+ case IW_CM_EVENT_CLOSE:
+ ret = cm_close_handler(cm_id_priv, iw_event);
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+/*
+ * Process events on the work_list for the cm_id. If the callback
+ * function requests that the cm_id be deleted, a flag is set in the
+ * cm_id flags to indicate that when the last reference is
+ * removed, the cm_id is to be destroyed. This is necessary to
+ * distinguish between an object that will be destroyed by the app
+ * thread asleep on the destroy_comp list vs. an object destroyed
+ * here synchronously when the last reference is removed.
+ */
+static void cm_work_handler(void *arg)
+{
+ struct iwcm_work *work = arg, lwork;
+ struct iwcm_id_private *cm_id_priv = work->cm_id;
+ unsigned long flags;
+ int empty;
+ int ret = 0;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ empty = list_empty(&cm_id_priv->work_list);
+ while (!empty) {
+ work = list_entry(cm_id_priv->work_list.next,
+ struct iwcm_work, list);
+ list_del_init(&work->list);
+ empty = list_empty(&cm_id_priv->work_list);
+ lwork = *work;
+ put_work(work);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ ret = process_event(cm_id_priv, &work->event);
+ if (ret) {
+ set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+ destroy_cm_id(&cm_id_priv->id);
+ }
+ BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
+ if (iwcm_deref_id(cm_id_priv))
+ return;
+
+ if (atomic_read(&cm_id_priv->refcount)==0 &&
+ test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
+ dealloc_work_entries(cm_id_priv);
+ kfree(cm_id_priv);
+ return;
+ }
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+}
+
+/*
+ * This function is called on interrupt context. Schedule events on
+ * the iwcm_wq thread to allow callback functions to downcall into
+ * the CM and/or block. Events are queued to a per-CM_ID
+ * work_list. If this is the first event on the work_list, the work
+ * element is also queued on the iwcm_wq thread.
+ *
+ * Each event holds a reference on the cm_id. Until the last posted
+ * event has been delivered and processed, the cm_id cannot be
+ * deleted.
+ *
+ * Returns:
+ * 0 - the event was handled.
+ * -ENOMEM - the event was not handled due to lack of resources.
+ */
+static int cm_event_handler(struct iw_cm_id *cm_id,
+ struct iw_cm_event *iw_event)
+{
+ struct iwcm_work *work;
+ struct iwcm_id_private *cm_id_priv;
+ unsigned long flags;
+ int ret = 0;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ work = get_work(cm_id_priv);
+ if (!work) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_WORK(&work->work, cm_work_handler, work);
+ work->cm_id = cm_id_priv;
+ work->event = *iw_event;
+
+ if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
+ work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
+ work->event.private_data_len) {
+ ret = copy_private_data(cm_id_priv, &work->event);
+ if (ret) {
+ put_work(work);
+ goto out;
+ }
+ }
+
+ atomic_inc(&cm_id_priv->refcount);
+ if (list_empty(&cm_id_priv->work_list)) {
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ queue_work(iwcm_wq, &work->work);
+ } else
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+out:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return ret;
+}
+
+static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
+ struct ib_qp_attr *qp_attr,
+ int *qp_attr_mask)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ switch (cm_id_priv->state) {
+ case IW_CM_STATE_IDLE:
+ case IW_CM_STATE_CONN_SENT:
+ case IW_CM_STATE_CONN_RECV:
+ case IW_CM_STATE_ESTABLISHED:
+ *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
+ qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE|
+ IB_ACCESS_REMOTE_READ;
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return ret;
+}
+
+static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
+ struct ib_qp_attr *qp_attr,
+ int *qp_attr_mask)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ switch (cm_id_priv->state) {
+ case IW_CM_STATE_IDLE:
+ case IW_CM_STATE_CONN_SENT:
+ case IW_CM_STATE_CONN_RECV:
+ case IW_CM_STATE_ESTABLISHED:
+ *qp_attr_mask = 0;
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ return ret;
+}
+
+int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
+ struct ib_qp_attr *qp_attr,
+ int *qp_attr_mask)
+{
+ struct iwcm_id_private *cm_id_priv;
+ int ret;
+
+ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ switch (qp_attr->qp_state) {
+ case IB_QPS_INIT:
+ case IB_QPS_RTR:
+ ret = iwcm_init_qp_init_attr(cm_id_priv,
+ qp_attr, qp_attr_mask);
+ break;
+ case IB_QPS_RTS:
+ ret = iwcm_init_qp_rts_attr(cm_id_priv,
+ qp_attr, qp_attr_mask);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(iw_cm_init_qp_attr);
+
+static int __init iw_cm_init(void)
+{
+ iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
+ if (!iwcm_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit iw_cm_cleanup(void)
+{
+ destroy_workqueue(iwcm_wq);
+}
+
+module_init(iw_cm_init);
+module_exit(iw_cm_cleanup);
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h
new file mode 100644
index 00000000000..3f6cc82564c
--- /dev/null
+++ b/drivers/infiniband/core/iwcm.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef IWCM_H
+#define IWCM_H
+
+enum iw_cm_state {
+ IW_CM_STATE_IDLE, /* unbound, inactive */
+ IW_CM_STATE_LISTEN, /* listen waiting for connect */
+ IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */
+ IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */
+ IW_CM_STATE_ESTABLISHED, /* established */
+ IW_CM_STATE_CLOSING, /* disconnect */
+ IW_CM_STATE_DESTROYING /* object being deleted */
+};
+
+struct iwcm_id_private {
+ struct iw_cm_id id;
+ enum iw_cm_state state;
+ unsigned long flags;
+ struct ib_qp *qp;
+ struct completion destroy_comp;
+ wait_queue_head_t connect_wait;
+ struct list_head work_list;
+ spinlock_t lock;
+ atomic_t refcount;
+ struct list_head work_free_list;
+};
+
+#define IWCM_F_CALLBACK_DESTROY 1
+#define IWCM_F_CONNECT_WAIT 2
+
+#endif /* IWCM_H */
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1c3cfbbe6a9..082f03c158f 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1246,8 +1246,8 @@ static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
int i;
for (i = 0; i < MAX_MGMT_OUI; i++)
- /* Is there matching OUI for this vendor class ? */
- if (!memcmp(vendor_class->oui[i], oui, 3))
+ /* Is there matching OUI for this vendor class ? */
+ if (!memcmp(vendor_class->oui[i], oui, 3))
return i;
return -1;
@@ -2237,7 +2237,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
&mad_agent_priv->send_list, agent_list) {
if (mad_send_wr->status == IB_WC_SUCCESS) {
- mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
+ mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
}
}
@@ -2528,10 +2528,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
}
}
sg_list.addr = dma_map_single(qp_info->port_priv->
- device->dma_device,
+ device->dma_device,
&mad_priv->grh,
sizeof *mad_priv -
- sizeof mad_priv->header,
+ sizeof mad_priv->header,
DMA_FROM_DEVICE);
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
@@ -2606,7 +2606,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
struct ib_qp *qp;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
- if (!attr) {
+ if (!attr) {
printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
return -ENOMEM;
}
@@ -2876,7 +2876,10 @@ static void ib_mad_init_device(struct ib_device *device)
{
int start, end, i;
- if (device->node_type == IB_NODE_SWITCH) {
+ if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ return;
+
+ if (device->node_type == RDMA_NODE_IB_SWITCH) {
start = 0;
end = 0;
} else {
@@ -2923,7 +2926,7 @@ static void ib_mad_remove_device(struct ib_device *device)
{
int i, num_ports, cur_port;
- if (device->node_type == IB_NODE_SWITCH) {
+ if (device->node_type == RDMA_NODE_IB_SWITCH) {
num_ports = 1;
cur_port = 0;
} else {
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d147f3bad2c..1da9adbccae 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -39,7 +39,6 @@
#include <linux/completion.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index ebcd5b18177..1ef79d015a1 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -33,8 +33,6 @@
* $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
*/
-#include <linux/dma-mapping.h>
-
#include "mad_priv.h"
#include "mad_rmpp.h"
@@ -60,6 +58,7 @@ struct mad_rmpp_recv {
int last_ack;
int seg_num;
int newwin;
+ int repwin;
__be64 tid;
u32 src_qp;
@@ -170,6 +169,32 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
return msg;
}
+static void ack_ds_ack(struct ib_mad_agent_private *agent,
+ struct ib_mad_recv_wc *recv_wc)
+{
+ struct ib_mad_send_buf *msg;
+ struct ib_rmpp_mad *rmpp_mad;
+ int ret;
+
+ msg = alloc_response_msg(&agent->agent, recv_wc);
+ if (IS_ERR(msg))
+ return;
+
+ rmpp_mad = msg->mad;
+ memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
+
+ rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
+ ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
+ rmpp_mad->rmpp_hdr.seg_num = 0;
+ rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
+
+ ret = ib_post_send_mad(msg, NULL);
+ if (ret) {
+ ib_destroy_ah(msg->ah);
+ ib_free_send_mad(msg);
+ }
+}
+
void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
{
struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
@@ -271,6 +296,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
rmpp_recv->newwin = 1;
rmpp_recv->seg_num = 1;
rmpp_recv->last_ack = 0;
+ rmpp_recv->repwin = 1;
mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
rmpp_recv->tid = mad_hdr->tid;
@@ -365,7 +391,7 @@ static inline int window_size(struct ib_mad_agent_private *agent)
static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
int seg_num)
{
- struct ib_mad_recv_buf *seg_buf;
+ struct ib_mad_recv_buf *seg_buf;
int cur_seg_num;
list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
@@ -591,6 +617,16 @@ static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
break;
}
+static void process_ds_ack(struct ib_mad_agent_private *agent,
+ struct ib_mad_recv_wc *mad_recv_wc, int newwin)
+{
+ struct mad_rmpp_recv *rmpp_recv;
+
+ rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
+ if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
+ rmpp_recv->repwin = newwin;
+}
+
static void process_rmpp_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
@@ -616,8 +652,18 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
spin_lock_irqsave(&agent->lock, flags);
mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
- if (!mad_send_wr)
- goto out; /* Unmatched ACK */
+ if (!mad_send_wr) {
+ if (!seg_num)
+ process_ds_ack(agent, mad_recv_wc, newwin);
+ goto out; /* Unmatched or DS RMPP ACK */
+ }
+
+ if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
+ (mad_send_wr->timeout)) {
+ spin_unlock_irqrestore(&agent->lock, flags);
+ ack_ds_ack(agent, mad_recv_wc);
+ return; /* Repeated ACK for DS RMPP transaction */
+ }
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
(!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
@@ -656,6 +702,9 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (mad_send_wr->refcount == 1)
ib_reset_mad_timeout(mad_send_wr,
mad_send_wr->send_buf.timeout_ms);
+ spin_unlock_irqrestore(&agent->lock, flags);
+ ack_ds_ack(agent, mad_recv_wc);
+ return;
} else if (mad_send_wr->refcount == 1 &&
mad_send_wr->seg_num < mad_send_wr->newwin &&
mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
@@ -772,6 +821,39 @@ out:
return NULL;
}
+static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
+{
+ struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
+ struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
+ struct mad_rmpp_recv *rmpp_recv;
+ struct ib_ah_attr ah_attr;
+ unsigned long flags;
+ int newwin = 1;
+
+ if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
+ goto out;
+
+ spin_lock_irqsave(&agent->lock, flags);
+ list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
+ if (rmpp_recv->tid != mad_hdr->tid ||
+ rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
+ rmpp_recv->class_version != mad_hdr->class_version ||
+ (rmpp_recv->method & IB_MGMT_METHOD_RESP))
+ continue;
+
+ if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
+ continue;
+
+ if (rmpp_recv->slid == ah_attr.dlid) {
+ newwin = rmpp_recv->repwin;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&agent->lock, flags);
+out:
+ return newwin;
+}
+
int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_rmpp_mad *rmpp_mad;
@@ -787,7 +869,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
return IB_RMPP_RESULT_INTERNAL;
}
- mad_send_wr->newwin = 1;
+ mad_send_wr->newwin = init_newwin(mad_send_wr);
/* We need to wait for the final ACK even if there isn't a response */
mad_send_wr->refcount += (mad_send_wr->timeout == 0);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index d6b84226bba..1706d3c7e95 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
+ * Copyright (c) 2006 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -75,6 +76,7 @@ struct ib_sa_device {
struct ib_sa_query {
void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
void (*release)(struct ib_sa_query *);
+ struct ib_sa_client *client;
struct ib_sa_port *port;
struct ib_mad_send_buf *mad_buf;
struct ib_sa_sm_ah *sm_ah;
@@ -415,6 +417,31 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
}
}
+void ib_sa_register_client(struct ib_sa_client *client)
+{
+ atomic_set(&client->users, 1);
+ init_completion(&client->comp);
+}
+EXPORT_SYMBOL(ib_sa_register_client);
+
+static inline void ib_sa_client_get(struct ib_sa_client *client)
+{
+ atomic_inc(&client->users);
+}
+
+static inline void ib_sa_client_put(struct ib_sa_client *client)
+{
+ if (atomic_dec_and_test(&client->users))
+ complete(&client->comp);
+}
+
+void ib_sa_unregister_client(struct ib_sa_client *client)
+{
+ ib_sa_client_put(client);
+ wait_for_completion(&client->comp);
+}
+EXPORT_SYMBOL(ib_sa_unregister_client);
+
/**
* ib_sa_cancel_query - try to cancel an SA query
* @id:ID of query to cancel
@@ -557,6 +584,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
/**
* ib_sa_path_rec_get - Start a Path get query
+ * @client:SA client
* @device:device to send query on
* @port_num: port number to send query on
* @rec:Path Record to send in query
@@ -579,7 +607,8 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
* error code. Otherwise it is a query ID that can be used to cancel
* the query.
*/
-int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
+int ib_sa_path_rec_get(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
int timeout_ms, gfp_t gfp_mask,
@@ -614,8 +643,10 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
goto err1;
}
- query->callback = callback;
- query->context = context;
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent);
@@ -639,6 +670,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
err2:
*sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf);
err1:
@@ -671,6 +703,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
/**
* ib_sa_service_rec_query - Start Service Record operation
+ * @client:SA client
* @device:device to send request on
* @port_num: port number to send request on
* @method:SA method - should be get, set, or delete
@@ -695,7 +728,8 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
* error code. Otherwise it is a request ID that can be used to cancel
* the query.
*/
-int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
+int ib_sa_service_rec_query(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
int timeout_ms, gfp_t gfp_mask,
@@ -735,8 +769,10 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
goto err1;
}
- query->callback = callback;
- query->context = context;
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent);
@@ -761,6 +797,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
err2:
*sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf);
err1:
@@ -791,7 +828,8 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
}
-int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
+int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
@@ -827,8 +865,10 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
goto err1;
}
- query->callback = callback;
- query->context = context;
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
mad = query->sa_query.mad_buf->mad;
init_mad(mad, agent);
@@ -853,6 +893,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
err2:
*sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf);
err1:
@@ -887,8 +928,9 @@ static void send_handler(struct ib_mad_agent *agent,
idr_remove(&query_idr, query->id);
spin_unlock_irqrestore(&idr_lock, flags);
- ib_free_send_mad(mad_send_wc->send_buf);
+ ib_free_send_mad(mad_send_wc->send_buf);
kref_put(&query->sm_ah->ref, free_sm_ah);
+ ib_sa_client_put(query->client);
query->release(query);
}
@@ -919,7 +961,10 @@ static void ib_sa_add_one(struct ib_device *device)
struct ib_sa_device *sa_dev;
int s, e, i;
- if (device->node_type == IB_NODE_SWITCH)
+ if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ return;
+
+ if (device->node_type == RDMA_NODE_IB_SWITCH)
s = e = 0;
else {
s = 1;
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 35852e794e2..54b81e17ad5 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -64,7 +64,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
/* C14-9:2 */
if (hop_ptr && hop_ptr < hop_cnt) {
- if (node_type != IB_NODE_SWITCH)
+ if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
/* smp->return_path set when received */
@@ -77,7 +77,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
if (hop_ptr == hop_cnt) {
/* smp->return_path set when received */
smp->hop_ptr++;
- return (node_type == IB_NODE_SWITCH ||
+ return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_dlid == IB_LID_PERMISSIVE);
}
@@ -95,7 +95,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
/* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
- if (node_type != IB_NODE_SWITCH)
+ if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
smp->hop_ptr--;
@@ -107,7 +107,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
if (hop_ptr == 1) {
smp->hop_ptr--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
- return (node_type == IB_NODE_SWITCH ||
+ return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_slid == IB_LID_PERMISSIVE);
}
@@ -142,7 +142,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
/* C14-9:2 -- intermediate hop */
if (hop_ptr && hop_ptr < hop_cnt) {
- if (node_type != IB_NODE_SWITCH)
+ if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
smp->return_path[hop_ptr] = port_num;
@@ -156,7 +156,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
smp->return_path[hop_ptr] = port_num;
/* smp->hop_ptr updated when sending */
- return (node_type == IB_NODE_SWITCH ||
+ return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_dlid == IB_LID_PERMISSIVE);
}
@@ -175,7 +175,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
/* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
- if (node_type != IB_NODE_SWITCH)
+ if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
/* smp->hop_ptr updated when sending */
@@ -190,7 +190,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
return 1;
}
/* smp->hop_ptr updated when sending */
- return (node_type == IB_NODE_SWITCH);
+ return (node_type == RDMA_NODE_IB_SWITCH);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 21f9282c1b2..709323c14c5 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -68,7 +68,7 @@ struct port_table_attribute {
int index;
};
-static inline int ibdev_is_alive(const struct ib_device *dev)
+static inline int ibdev_is_alive(const struct ib_device *dev)
{
return dev->reg_state == IB_DEV_REGISTERED;
}
@@ -589,10 +589,11 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf)
return -ENODEV;
switch (dev->node_type) {
- case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
- case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
- case IB_NODE_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
- default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
+ case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
+ case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
+ case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
+ case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
+ default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
}
}
@@ -708,7 +709,7 @@ int ib_device_register_sysfs(struct ib_device *device)
if (ret)
goto err_put;
- if (device->node_type == IB_NODE_SWITCH) {
+ if (device->node_type == RDMA_NODE_IB_SWITCH) {
ret = add_port(device, 0);
if (ret)
goto err_put;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index c1c6fda9452..ad4f4d5c292 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -309,9 +309,9 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
info = evt->param.apr_rcvd.apr_info;
break;
case IB_CM_SIDR_REQ_RECEIVED:
- uvt->resp.u.sidr_req_resp.pkey =
+ uvt->resp.u.sidr_req_resp.pkey =
evt->param.sidr_req_rcvd.pkey;
- uvt->resp.u.sidr_req_resp.port =
+ uvt->resp.u.sidr_req_resp.port =
evt->param.sidr_req_rcvd.port;
uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
break;
@@ -1237,7 +1237,7 @@ static struct class ucm_class = {
static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
{
struct ib_ucm_device *dev;
-
+
dev = container_of(class_dev, struct ib_ucm_device, class_dev);
return sprintf(buf, "%s\n", dev->ib_dev->name);
}
@@ -1247,7 +1247,8 @@ static void ib_ucm_add_one(struct ib_device *device)
{
struct ib_ucm_device *ucm_dev;
- if (!device->alloc_ucontext)
+ if (!device->alloc_ucontext ||
+ rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 1273f8807e8..807fbd6b841 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -1032,7 +1032,10 @@ static void ib_umad_add_one(struct ib_device *device)
struct ib_umad_device *umad_dev;
int s, e, i;
- if (device->node_type == IB_NODE_SWITCH)
+ if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ return;
+
+ if (device->node_type == RDMA_NODE_IB_SWITCH)
s = e = 0;
else {
s = 1;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 30923eb68ec..b72c7f69ca9 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -155,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
}
static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
- struct ib_ucontext *context)
+ struct ib_ucontext *context, int nested)
{
struct ib_uobject *uobj;
@@ -163,7 +163,10 @@ static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
if (!uobj)
return NULL;
- down_read(&uobj->mutex);
+ if (nested)
+ down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
+ else
+ down_read(&uobj->mutex);
if (!uobj->live) {
put_uobj_read(uobj);
return NULL;
@@ -190,17 +193,18 @@ static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
return uobj;
}
-static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context)
+static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
+ int nested)
{
struct ib_uobject *uobj;
- uobj = idr_read_uobj(idr, id, context);
+ uobj = idr_read_uobj(idr, id, context, nested);
return uobj ? uobj->object : NULL;
}
static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
{
- return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context);
+ return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
}
static void put_pd_read(struct ib_pd *pd)
@@ -208,9 +212,9 @@ static void put_pd_read(struct ib_pd *pd)
put_uobj_read(pd->uobject);
}
-static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context)
+static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
{
- return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context);
+ return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
}
static void put_cq_read(struct ib_cq *cq)
@@ -220,7 +224,7 @@ static void put_cq_read(struct ib_cq *cq)
static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
{
- return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context);
+ return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
}
static void put_ah_read(struct ib_ah *ah)
@@ -230,7 +234,7 @@ static void put_ah_read(struct ib_ah *ah)
static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
{
- return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context);
+ return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
}
static void put_qp_read(struct ib_qp *qp)
@@ -240,7 +244,7 @@ static void put_qp_read(struct ib_qp *qp)
static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
{
- return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context);
+ return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
}
static void put_srq_read(struct ib_srq *srq)
@@ -837,7 +841,6 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
err_copy:
idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
-
err_free:
ib_destroy_cq(cq);
@@ -867,7 +870,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
- cq = idr_read_cq(cmd.cq_handle, file->ucontext);
+ cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
if (!cq)
return -EINVAL;
@@ -875,11 +878,10 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
if (ret)
goto out;
- memset(&resp, 0, sizeof resp);
resp.cqe = cq->cqe;
if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ &resp, sizeof resp.cqe))
ret = -EFAULT;
out:
@@ -894,7 +896,6 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
{
struct ib_uverbs_poll_cq cmd;
struct ib_uverbs_poll_cq_resp *resp;
- struct ib_uobject *uobj;
struct ib_cq *cq;
struct ib_wc *wc;
int ret = 0;
@@ -915,16 +916,15 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
goto out_wc;
}
- uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
- if (!uobj) {
+ cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
+ if (!cq) {
ret = -EINVAL;
goto out;
}
- cq = uobj->object;
resp->count = ib_poll_cq(cq, cmd.ne, wc);
- put_uobj_read(uobj);
+ put_cq_read(cq);
for (i = 0; i < resp->count; i++) {
resp->wc[i].wr_id = wc[i].wr_id;
@@ -959,21 +959,19 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_req_notify_cq cmd;
- struct ib_uobject *uobj;
struct ib_cq *cq;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
- if (!uobj)
+ cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
+ if (!cq)
return -EINVAL;
- cq = uobj->object;
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
- put_uobj_read(uobj);
+ put_cq_read(cq);
return in_len;
}
@@ -1064,9 +1062,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
- scq = idr_read_cq(cmd.send_cq_handle, file->ucontext);
+ scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
- scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext);
+ scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
ret = -EINVAL;
@@ -1274,6 +1272,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_modify_qp cmd;
+ struct ib_udata udata;
struct ib_qp *qp;
struct ib_qp_attr *attr;
int ret;
@@ -1281,6 +1280,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
+ out_len);
+
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr)
return -ENOMEM;
@@ -1337,7 +1339,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
- ret = ib_modify_qp(qp, attr, cmd.attr_mask);
+ ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
put_qp_read(qp);
@@ -1674,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
break;
}
-
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
@@ -1724,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break;
}
-
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
@@ -2055,6 +2055,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_modify_srq cmd;
+ struct ib_udata udata;
struct ib_srq *srq;
struct ib_srq_attr attr;
int ret;
@@ -2062,6 +2063,9 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
+ out_len);
+
srq = idr_read_srq(cmd.srq_handle, file->ucontext);
if (!srq)
return -EINVAL;
@@ -2069,7 +2073,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
- ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
+ ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
put_srq_read(srq);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 468999c3880..8b5dd3649bb 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -79,6 +79,23 @@ enum ib_rate mult_to_ib_rate(int mult)
}
EXPORT_SYMBOL(mult_to_ib_rate);
+enum rdma_transport_type
+rdma_node_get_transport(enum rdma_node_type node_type)
+{
+ switch (node_type) {
+ case RDMA_NODE_IB_CA:
+ case RDMA_NODE_IB_SWITCH:
+ case RDMA_NODE_IB_ROUTER:
+ return RDMA_TRANSPORT_IB;
+ case RDMA_NODE_RNIC:
+ return RDMA_TRANSPORT_IWARP;
+ default:
+ BUG();
+ return 0;
+ }
+}
+EXPORT_SYMBOL(rdma_node_get_transport);
+
/* Protection domains */
struct ib_pd *ib_alloc_pd(struct ib_device *device)
@@ -231,7 +248,7 @@ int ib_modify_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask)
{
- return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
+ return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_srq);
@@ -547,7 +564,7 @@ int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
+ return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
diff --git a/drivers/infiniband/hw/amso1100/Kbuild b/drivers/infiniband/hw/amso1100/Kbuild
new file mode 100644
index 00000000000..06964c4af84
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/Kbuild
@@ -0,0 +1,8 @@
+ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
+
+iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
+ c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
diff --git a/drivers/infiniband/hw/amso1100/Kconfig b/drivers/infiniband/hw/amso1100/Kconfig
new file mode 100644
index 00000000000..809cb14ac6d
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/Kconfig
@@ -0,0 +1,15 @@
+config INFINIBAND_AMSO1100
+ tristate "Ammasso 1100 HCA support"
+ depends on PCI && INET && INFINIBAND
+ ---help---
+ This is a low-level driver for the Ammasso 1100 host
+ channel adapter (HCA).
+
+config INFINIBAND_AMSO1100_DEBUG
+ bool "Verbose debugging output"
+ depends on INFINIBAND_AMSO1100
+ default n
+ ---help---
+ This option causes the amso1100 driver to produce a bunch of
+ debug messages. Select this if you are developing the driver
+ or trying to diagnose a problem.
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
new file mode 100644
index 00000000000..9e9120f3601
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -0,0 +1,1255 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#include <rdma/ib_smi.h>
+#include "c2.h"
+#include "c2_provider.h"
+
+MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
+MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int c2_up(struct net_device *netdev);
+static int c2_down(struct net_device *netdev);
+static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+static void c2_tx_interrupt(struct net_device *netdev);
+static void c2_rx_interrupt(struct net_device *netdev);
+static irqreturn_t c2_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static void c2_tx_timeout(struct net_device *netdev);
+static int c2_change_mtu(struct net_device *netdev, int new_mtu);
+static void c2_reset(struct c2_port *c2_port);
+static struct net_device_stats *c2_get_stats(struct net_device *netdev);
+
+static struct pci_device_id c2_pci_table[] = {
+ { PCI_DEVICE(0x18b8, 0xb001) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, c2_pci_table);
+
+static void c2_print_macaddr(struct net_device *netdev)
+{
+ pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
+ "IRQ %u\n", netdev->name,
+ netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+ netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
+ netdev->irq);
+}
+
+static void c2_set_rxbufsize(struct c2_port *c2_port)
+{
+ struct net_device *netdev = c2_port->netdev;
+
+ if (netdev->mtu > RX_BUF_SIZE)
+ c2_port->rx_buf_size =
+ netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
+ NET_IP_ALIGN;
+ else
+ c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
+}
+
+/*
+ * Allocate TX ring elements and chain them together.
+ * One-to-one association of adapter descriptors with ring elements.
+ */
+static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
+ dma_addr_t base, void __iomem * mmio_txp_ring)
+{
+ struct c2_tx_desc *tx_desc;
+ struct c2_txp_desc __iomem *txp_desc;
+ struct c2_element *elem;
+ int i;
+
+ tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
+ if (!tx_ring->start)
+ return -ENOMEM;
+
+ elem = tx_ring->start;
+ tx_desc = vaddr;
+ txp_desc = mmio_txp_ring;
+ for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
+ tx_desc->len = 0;
+ tx_desc->status = 0;
+
+ /* Set TXP_HTXD_UNINIT */
+ __raw_writeq(cpu_to_be64(0x1122334455667788ULL),
+ (void __iomem *) txp_desc + C2_TXP_ADDR);
+ __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
+ __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
+ (void __iomem *) txp_desc + C2_TXP_FLAGS);
+
+ elem->skb = NULL;
+ elem->ht_desc = tx_desc;
+ elem->hw_desc = txp_desc;
+
+ if (i == tx_ring->count - 1) {
+ elem->next = tx_ring->start;
+ tx_desc->next_offset = base;
+ } else {
+ elem->next = elem + 1;
+ tx_desc->next_offset =
+ base + (i + 1) * sizeof(*tx_desc);
+ }
+ }
+
+ tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
+
+ return 0;
+}
+
+/*
+ * Allocate RX ring elements and chain them together.
+ * One-to-one association of adapter descriptors with ring elements.
+ */
+static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
+ dma_addr_t base, void __iomem * mmio_rxp_ring)
+{
+ struct c2_rx_desc *rx_desc;
+ struct c2_rxp_desc __iomem *rxp_desc;
+ struct c2_element *elem;
+ int i;
+
+ rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
+ if (!rx_ring->start)
+ return -ENOMEM;
+
+ elem = rx_ring->start;
+ rx_desc = vaddr;
+ rxp_desc = mmio_rxp_ring;
+ for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
+ rx_desc->len = 0;
+ rx_desc->status = 0;
+
+ /* Set RXP_HRXD_UNINIT */
+ __raw_writew(cpu_to_be16(RXP_HRXD_OK),
+ (void __iomem *) rxp_desc + C2_RXP_STATUS);
+ __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
+ __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
+ __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
+ (void __iomem *) rxp_desc + C2_RXP_ADDR);
+ __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
+ (void __iomem *) rxp_desc + C2_RXP_FLAGS);
+
+ elem->skb = NULL;
+ elem->ht_desc = rx_desc;
+ elem->hw_desc = rxp_desc;
+
+ if (i == rx_ring->count - 1) {
+ elem->next = rx_ring->start;
+ rx_desc->next_offset = base;
+ } else {
+ elem->next = elem + 1;
+ rx_desc->next_offset =
+ base + (i + 1) * sizeof(*rx_desc);
+ }
+ }
+
+ rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
+
+ return 0;
+}
+
+/* Setup buffer for receiving */
+static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
+{
+ struct c2_dev *c2dev = c2_port->c2dev;
+ struct c2_rx_desc *rx_desc = elem->ht_desc;
+ struct sk_buff *skb;
+ dma_addr_t mapaddr;
+ u32 maplen;
+ struct c2_rxp_hdr *rxp_hdr;
+
+ skb = dev_alloc_skb(c2_port->rx_buf_size);
+ if (unlikely(!skb)) {
+ pr_debug("%s: out of memory for receive\n",
+ c2_port->netdev->name);
+ return -ENOMEM;
+ }
+
+ /* Zero out the rxp hdr in the sk_buff */
+ memset(skb->data, 0, sizeof(*rxp_hdr));
+
+ skb->dev = c2_port->netdev;
+
+ maplen = c2_port->rx_buf_size;
+ mapaddr =
+ pci_map_single(c2dev->pcidev, skb->data, maplen,
+ PCI_DMA_FROMDEVICE);
+
+ /* Set the sk_buff RXP_header to RXP_HRXD_READY */
+ rxp_hdr = (struct c2_rxp_hdr *) skb->data;
+ rxp_hdr->flags = RXP_HRXD_READY;
+
+ __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
+ __raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
+ elem->hw_desc + C2_RXP_LEN);
+ __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
+ __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
+
+ elem->skb = skb;
+ elem->mapaddr = mapaddr;
+ elem->maplen = maplen;
+ rx_desc->len = maplen;
+
+ return 0;
+}
+
+/*
+ * Allocate buffers for the Rx ring
+ * For receive: rx_ring.to_clean is next received frame
+ */
+static int c2_rx_fill(struct c2_port *c2_port)
+{
+ struct c2_ring *rx_ring = &c2_port->rx_ring;
+ struct c2_element *elem;
+ int ret = 0;
+
+ elem = rx_ring->start;
+ do {
+ if (c2_rx_alloc(c2_port, elem)) {
+ ret = 1;
+ break;
+ }
+ } while ((elem = elem->next) != rx_ring->start);
+
+ rx_ring->to_clean = rx_ring->start;
+ return ret;
+}
+
+/* Free all buffers in RX ring, assumes receiver stopped */
+static void c2_rx_clean(struct c2_port *c2_port)
+{
+ struct c2_dev *c2dev = c2_port->c2dev;
+ struct c2_ring *rx_ring = &c2_port->rx_ring;
+ struct c2_element *elem;
+ struct c2_rx_desc *rx_desc;
+
+ elem = rx_ring->start;
+ do {
+ rx_desc = elem->ht_desc;
+ rx_desc->len = 0;
+
+ __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
+ __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
+ __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
+ __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
+ elem->hw_desc + C2_RXP_ADDR);
+ __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
+ elem->hw_desc + C2_RXP_FLAGS);
+
+ if (elem->skb) {
+ pci_unmap_single(c2dev->pcidev, elem->mapaddr,
+ elem->maplen, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(elem->skb);
+ elem->skb = NULL;
+ }
+ } while ((elem = elem->next) != rx_ring->start);
+}
+
+static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
+{
+ struct c2_tx_desc *tx_desc = elem->ht_desc;
+
+ tx_desc->len = 0;
+
+ pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
+ PCI_DMA_TODEVICE);
+
+ if (elem->skb) {
+ dev_kfree_skb_any(elem->skb);
+ elem->skb = NULL;
+ }
+
+ return 0;
+}
+
+/* Free all buffers in TX ring, assumes transmitter stopped */
+static void c2_tx_clean(struct c2_port *c2_port)
+{
+ struct c2_ring *tx_ring = &c2_port->tx_ring;
+ struct c2_element *elem;
+ struct c2_txp_desc txp_htxd;
+ int retry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c2_port->tx_lock, flags);
+
+ elem = tx_ring->start;
+
+ do {
+ retry = 0;
+ do {
+ txp_htxd.flags =
+ readw(elem->hw_desc + C2_TXP_FLAGS);
+
+ if (txp_htxd.flags == TXP_HTXD_READY) {
+ retry = 1;
+ __raw_writew(0,
+ elem->hw_desc + C2_TXP_LEN);
+ __raw_writeq(0,
+ elem->hw_desc + C2_TXP_ADDR);
+ __raw_writew(cpu_to_be16(TXP_HTXD_DONE),
+ elem->hw_desc + C2_TXP_FLAGS);
+ c2_port->netstats.tx_dropped++;
+ break;
+ } else {
+ __raw_writew(0,
+ elem->hw_desc + C2_TXP_LEN);
+ __raw_writeq(cpu_to_be64(0x1122334455667788ULL),
+ elem->hw_desc + C2_TXP_ADDR);
+ __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
+ elem->hw_desc + C2_TXP_FLAGS);
+ }
+
+ c2_tx_free(c2_port->c2dev, elem);
+
+ } while ((elem = elem->next) != tx_ring->start);
+ } while (retry);
+
+ c2_port->tx_avail = c2_port->tx_ring.count - 1;
+ c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
+
+ if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
+ netif_wake_queue(c2_port->netdev);
+
+ spin_unlock_irqrestore(&c2_port->tx_lock, flags);
+}
+
+/*
+ * Process transmit descriptors marked 'DONE' by the firmware,
+ * freeing up their unneeded sk_buffs.
+ */
+static void c2_tx_interrupt(struct net_device *netdev)
+{
+ struct c2_port *c2_port = netdev_priv(netdev);
+ struct c2_dev *c2dev = c2_port->c2dev;
+ struct c2_ring *tx_ring = &c2_port->tx_ring;
+ struct c2_element *elem;
+ struct c2_txp_desc txp_htxd;
+
+ spin_lock(&c2_port->tx_lock);
+
+ for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
+ elem = elem->next) {
+ txp_htxd.flags =
+ be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS));
+
+ if (txp_htxd.flags != TXP_HTXD_DONE)
+ break;
+
+ if (netif_msg_tx_done(c2_port)) {
+ /* PCI reads are expensive in fast path */
+ txp_htxd.len =
+ be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN));
+ pr_debug("%s: tx done slot %3Zu status 0x%x len "
+ "%5u bytes\n",
+ netdev->name, elem - tx_ring->start,
+ txp_htxd.flags, txp_htxd.len);
+ }
+
+ c2_tx_free(c2dev, elem);
+ ++(c2_port->tx_avail);
+ }
+
+ tx_ring->to_clean = elem;
+
+ if (netif_queue_stopped(netdev)
+ && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
+ netif_wake_queue(netdev);
+
+ spin_unlock(&c2_port->tx_lock);
+}
+
+static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
+{
+ struct c2_rx_desc *rx_desc = elem->ht_desc;
+ struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
+
+ if (rxp_hdr->status != RXP_HRXD_OK ||
+ rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
+ pr_debug("BAD RXP_HRXD\n");
+ pr_debug(" rx_desc : %p\n", rx_desc);
+ pr_debug(" index : %Zu\n",
+ elem - c2_port->rx_ring.start);
+ pr_debug(" len : %u\n", rx_desc->len);
+ pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr,
+ (void *) __pa((unsigned long) rxp_hdr));
+ pr_debug(" flags : 0x%x\n", rxp_hdr->flags);
+ pr_debug(" status: 0x%x\n", rxp_hdr->status);
+ pr_debug(" len : %u\n", rxp_hdr->len);
+ pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd);
+ }
+
+ /* Setup the skb for reuse since we're dropping this pkt */
+ elem->skb->tail = elem->skb->data = elem->skb->head;
+
+ /* Zero out the rxp hdr in the sk_buff */
+ memset(elem->skb->data, 0, sizeof(*rxp_hdr));
+
+ /* Write the descriptor to the adapter's rx ring */
+ __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
+ __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
+ __raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
+ elem->hw_desc + C2_RXP_LEN);
+ __raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR);
+ __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
+
+ pr_debug("packet dropped\n");
+ c2_port->netstats.rx_dropped++;
+}
+
+static void c2_rx_interrupt(struct net_device *netdev)
+{
+ struct c2_port *c2_port = netdev_priv(netdev);
+ struct c2_dev *c2dev = c2_port->c2dev;
+ struct c2_ring *rx_ring = &c2_port->rx_ring;
+ struct c2_element *elem;
+ struct c2_rx_desc *rx_desc;
+ struct c2_rxp_hdr *rxp_hdr;
+ struct sk_buff *skb;
+ dma_addr_t mapaddr;
+ u32 maplen, buflen;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c2dev->lock, flags);
+
+ /* Begin where we left off */
+ rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
+
+ for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
+ elem = elem->next) {
+ rx_desc = elem->ht_desc;
+ mapaddr = elem->mapaddr;
+ maplen = elem->maplen;
+ skb = elem->skb;
+ rxp_hdr = (struct c2_rxp_hdr *) skb->data;
+
+ if (rxp_hdr->flags != RXP_HRXD_DONE)
+ break;
+ buflen = rxp_hdr->len;
+
+ /* Sanity check the RXP header */
+ if (rxp_hdr->status != RXP_HRXD_OK ||
+ buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
+ c2_rx_error(c2_port, elem);
+ continue;
+ }
+
+ /*
+ * Allocate and map a new skb for replenishing the host
+ * RX desc
+ */
+ if (c2_rx_alloc(c2_port, elem)) {
+ c2_rx_error(c2_port, elem);
+ continue;
+ }
+
+ /* Unmap the old skb */
+ pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
+ PCI_DMA_FROMDEVICE);
+
+ prefetch(skb->data);
+
+ /*
+ * Skip past the leading 8 bytes comprising of the
+ * "struct c2_rxp_hdr", prepended by the adapter
+ * to the usual Ethernet header ("struct ethhdr"),
+ * to the start of the raw Ethernet packet.
+ *
+ * Fix up the various fields in the sk_buff before
+ * passing it up to netif_rx(). The transfer size
+ * (in bytes) specified by the adapter len field of
+ * the "struct rxp_hdr_t" does NOT include the
+ * "sizeof(struct c2_rxp_hdr)".
+ */
+ skb->data += sizeof(*rxp_hdr);
+ skb->tail = skb->data + buflen;
+ skb->len = buflen;
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netif_rx(skb);
+
+ netdev->last_rx = jiffies;
+ c2_port->netstats.rx_packets++;
+ c2_port->netstats.rx_bytes += buflen;
+ }
+
+ /* Save where we left off */
+ rx_ring->to_clean = elem;
+ c2dev->cur_rx = elem - rx_ring->start;
+ C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
+
+ spin_unlock_irqrestore(&c2dev->lock, flags);
+}
+
+/*
+ * Handle netisr0 TX & RX interrupts.
+ */
+static irqreturn_t c2_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned int netisr0, dmaisr;
+ int handled = 0;
+ struct c2_dev *c2dev = (struct c2_dev *) dev_id;
+
+ /* Process CCILNET interrupts */
+ netisr0 = readl(c2dev->regs + C2_NISR0);
+ if (netisr0) {
+
+ /*
+ * There is an issue with the firmware that always
+ * provides the status of RX for both TX & RX
+ * interrupts. So process both queues here.
+ */
+ c2_rx_interrupt(c2dev->netdev);
+ c2_tx_interrupt(c2dev->netdev);
+
+ /* Clear the interrupt */
+ writel(netisr0, c2dev->regs + C2_NISR0);
+ handled++;
+ }
+
+ /* Process RNIC interrupts */
+ dmaisr = readl(c2dev->regs + C2_DISR);
+ if (dmaisr) {
+ writel(dmaisr, c2dev->regs + C2_DISR);
+ c2_rnic_interrupt(c2dev);
+ handled++;
+ }
+
+ if (handled) {
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
+}
+
+static int c2_up(struct net_device *netdev)
+{
+ struct c2_port *c2_port = netdev_priv(netdev);
+ struct c2_dev *c2dev = c2_port->c2dev;
+ struct c2_element *elem;
+ struct c2_rxp_hdr *rxp_hdr;
+ struct in_device *in_dev;
+ size_t rx_size, tx_size;
+ int ret, i;
+ unsigned int netimr0;
+
+ if (netif_msg_ifup(c2_port))
+ pr_debug("%s: enabling interface\n", netdev->name);
+
+ /* Set the Rx buffer size based on MTU */
+ c2_set_rxbufsize(c2_port);
+
+ /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
+ rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
+ tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
+
+ c2_port->mem_size = tx_size + rx_size;
+ c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
+ &c2_port->dma);
+ if (c2_port->mem == NULL) {
+ pr_debug("Unable to allocate memory for "
+ "host descriptor rings\n");
+ return -ENOMEM;
+ }
+
+ memset(c2_port->mem, 0, c2_port->mem_size);
+
+ /* Create the Rx host descriptor ring */
+ if ((ret =
+ c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
+ c2dev->mmio_rxp_ring))) {
+ pr_debug("Unable to create RX ring\n");
+ goto bail0;
+ }
+
+ /* Allocate Rx buffers for the host descriptor ring */
+ if (c2_rx_fill(c2_port)) {
+ pr_debug("Unable to fill RX ring\n");
+ goto bail1;
+ }
+
+ /* Create the Tx host descriptor ring */
+ if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
+ c2_port->dma + rx_size,
+ c2dev->mmio_txp_ring))) {
+ pr_debug("Unable to create TX ring\n");
+ goto bail1;
+ }
+
+ /* Set the TX pointer to where we left off */
+ c2_port->tx_avail = c2_port->tx_ring.count - 1;
+ c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
+ c2_port->tx_ring.start + c2dev->cur_tx;
+
+ /* missing: Initialize MAC */
+
+ BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
+
+ /* Reset the adapter, ensures the driver is in sync with the RXP */
+ c2_reset(c2_port);
+
+ /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
+ for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
+ i++, elem++) {
+ rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
+ rxp_hdr->flags = 0;
+ __raw_writew(cpu_to_be16(RXP_HRXD_READY),
+ elem->hw_desc + C2_RXP_FLAGS);
+ }
+
+ /* Enable network packets */
+ netif_start_queue(netdev);
+
+ /* Enable IRQ */
+ writel(0, c2dev->regs + C2_IDIS);
+ netimr0 = readl(c2dev->regs + C2_NIMR0);
+ netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
+ writel(netimr0, c2dev->regs + C2_NIMR0);
+
+ /* Tell the stack to ignore arp requests for ipaddrs bound to
+ * other interfaces. This is needed to prevent the host stack
+ * from responding to arp requests to the ipaddr bound on the
+ * rdma interface.
+ */
+ in_dev = in_dev_get(netdev);
+ in_dev->cnf.arp_ignore = 1;
+ in_dev_put(in_dev);
+
+ return 0;
+
+ bail1:
+ c2_rx_clean(c2_port);
+ kfree(c2_port->rx_ring.start);
+
+ bail0:
+ pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
+ c2_port->dma);
+
+ return ret;
+}
+
+static int c2_down(struct net_device *netdev)
+{
+ struct c2_port *c2_port = netdev_priv(netdev);
+ struct c2_dev *c2dev = c2_port->c2dev;
+
+ if (netif_msg_ifdown(c2_port))
+ pr_debug("%s: disabling interface\n",
+ netdev->name);
+
+ /* Wait for all the queued packets to get sent */
+ c2_tx_interrupt(netdev);
+
+ /* Disable network packets */
+ netif_stop_queue(netdev);
+
+ /* Disable IRQs by clearing the interrupt mask */
+ writel(1, c2dev->regs + C2_IDIS);
+ writel(0, c2dev->regs + C2_NIMR0);
+
+ /* missing: Stop transmitter */
+
+ /* missing: Stop receiver */
+
+ /* Reset the adapter, ensures the driver is in sync with the RXP */
+ c2_reset(c2_port);
+
+ /* missing: Turn off LEDs here */
+
+ /* Free all buffers in the host descriptor rings */
+ c2_tx_clean(c2_port);
+ c2_rx_clean(c2_port);
+
+ /* Free the host descriptor rings */
+ kfree(c2_port->rx_ring.start);
+ kfree(c2_port->tx_ring.start);
+ pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
+ c2_port->dma);
+
+ return 0;
+}
+
+static void c2_reset(struct c2_port *c2_port)
+{
+ struct c2_dev *c2dev = c2_port->c2dev;
+ unsigned int cur_rx = c2dev->cur_rx;
+
+ /* Tell the hardware to quiesce */
+ C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
+
+ /*
+ * The hardware will reset the C2_PCI_HRX_QUI bit once
+ * the RXP is quiesced. Wait 2 seconds for this.
+ */
+ ssleep(2);
+
+ cur_rx = C2_GET_CUR_RX(c2dev);
+
+ if (cur_rx & C2_PCI_HRX_QUI)
+ pr_debug("c2_reset: failed to quiesce the hardware!\n");
+
+ cur_rx &= ~C2_PCI_HRX_QUI;
+
+ c2dev->cur_rx = cur_rx;
+
+ pr_debug("Current RX: %u\n", c2dev->cur_rx);
+}
+
+static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct c2_port *c2_port = netdev_priv(netdev);
+ struct c2_dev *c2dev = c2_port->c2dev;
+ struct c2_ring *tx_ring = &c2_port->tx_ring;
+ struct c2_element *elem;
+ dma_addr_t mapaddr;
+ u32 maplen;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&c2_port->tx_lock, flags);
+
+ if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&c2_port->tx_lock, flags);
+
+ pr_debug("%s: Tx ring full when queue awake!\n",
+ netdev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ maplen = skb_headlen(skb);
+ mapaddr =
+ pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
+
+ elem = tx_ring->to_use;
+ elem->skb = skb;
+ elem->mapaddr = mapaddr;
+ elem->maplen = maplen;
+
+ /* Tell HW to xmit */
+ __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR);
+ __raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN);
+ __raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS);
+
+ c2_port->netstats.tx_packets++;
+ c2_port->netstats.tx_bytes += maplen;
+
+ /* Loop thru additional data fragments and queue them */
+ if (skb_shinfo(skb)->nr_frags) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ maplen = frag->size;
+ mapaddr =
+ pci_map_page(c2dev->pcidev, frag->page,
+ frag->page_offset, maplen,
+ PCI_DMA_TODEVICE);
+
+ elem = elem->next;
+ elem->skb = NULL;
+ elem->mapaddr = mapaddr;
+ elem->maplen = maplen;
+
+ /* Tell HW to xmit */
+ __raw_writeq(cpu_to_be64(mapaddr),
+ elem->hw_desc + C2_TXP_ADDR);
+ __raw_writew(cpu_to_be16(maplen),
+ elem->hw_desc + C2_TXP_LEN);
+ __raw_writew(cpu_to_be16(TXP_HTXD_READY),
+ elem->hw_desc + C2_TXP_FLAGS);
+
+ c2_port->netstats.tx_packets++;
+ c2_port->netstats.tx_bytes += maplen;
+ }
+ }
+
+ tx_ring->to_use = elem->next;
+ c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
+
+ if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
+ netif_stop_queue(netdev);
+ if (netif_msg_tx_queued(c2_port))
+ pr_debug("%s: transmit queue full\n",
+ netdev->name);
+ }
+
+ spin_unlock_irqrestore(&c2_port->tx_lock, flags);
+
+ netdev->trans_start = jiffies;
+
+ return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *c2_get_stats(struct net_device *netdev)
+{
+ struct c2_port *c2_port = netdev_priv(netdev);
+
+ return &c2_port->netstats;
+}
+
+static void c2_tx_timeout(struct net_device *netdev)
+{
+ struct c2_port *c2_port = netdev_priv(netdev);
+
+ if (netif_msg_timer(c2_port))
+ pr_debug("%s: tx timeout\n", netdev->name);
+
+ c2_tx_clean(c2_port);
+}
+
+static int c2_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int ret = 0;
+
+ if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
+ return -EINVAL;
+
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev)) {
+ c2_down(netdev);
+
+ c2_up(netdev);
+ }
+
+ return ret;
+}
+
+/* Initialize network device */
+static struct net_device *c2_devinit(struct c2_dev *c2dev,
+ void __iomem * mmio_addr)
+{
+ struct c2_port *c2_port = NULL;
+ struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
+
+ if (!netdev) {
+ pr_debug("c2_port etherdev alloc failed");
+ return NULL;
+ }
+
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
+
+ netdev->open = c2_up;
+ netdev->stop = c2_down;
+ netdev->hard_start_xmit = c2_xmit_frame;
+ netdev->get_stats = c2_get_stats;
+ netdev->tx_timeout = c2_tx_timeout;
+ netdev->change_mtu = c2_change_mtu;
+ netdev->watchdog_timeo = C2_TX_TIMEOUT;
+ netdev->irq = c2dev->pcidev->irq;
+
+ c2_port = netdev_priv(netdev);
+ c2_port->netdev = netdev;
+ c2_port->c2dev = c2dev;
+ c2_port->msg_enable = netif_msg_init(debug, default_msg);
+ c2_port->tx_ring.count = C2_NUM_TX_DESC;
+ c2_port->rx_ring.count = C2_NUM_RX_DESC;
+
+ spin_lock_init(&c2_port->tx_lock);
+
+ /* Copy our 48-bit ethernet hardware address */
+ memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
+
+ /* Validate the MAC address */
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ pr_debug("Invalid MAC Address\n");
+ c2_print_macaddr(netdev);
+ free_netdev(netdev);
+ return NULL;
+ }
+
+ c2dev->netdev = netdev;
+
+ return netdev;
+}
+
+static int __devinit c2_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *ent)
+{
+ int ret = 0, i;
+ unsigned long reg0_start, reg0_flags, reg0_len;
+ unsigned long reg2_start, reg2_flags, reg2_len;
+ unsigned long reg4_start, reg4_flags, reg4_len;
+ unsigned kva_map_size;
+ struct net_device *netdev = NULL;
+ struct c2_dev *c2dev = NULL;
+ void __iomem *mmio_regs = NULL;
+
+ printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
+ DRV_VERSION);
+
+ /* Enable PCI device */
+ ret = pci_enable_device(pcidev);
+ if (ret) {
+ printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
+ pci_name(pcidev));
+ goto bail0;
+ }
+
+ reg0_start = pci_resource_start(pcidev, BAR_0);
+ reg0_len = pci_resource_len(pcidev, BAR_0);
+ reg0_flags = pci_resource_flags(pcidev, BAR_0);
+
+ reg2_start = pci_resource_start(pcidev, BAR_2);
+ reg2_len = pci_resource_len(pcidev, BAR_2);
+ reg2_flags = pci_resource_flags(pcidev, BAR_2);
+
+ reg4_start = pci_resource_start(pcidev, BAR_4);
+ reg4_len = pci_resource_len(pcidev, BAR_4);
+ reg4_flags = pci_resource_flags(pcidev, BAR_4);
+
+ pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
+ pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
+ pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
+
+ /* Make sure PCI base addr are MMIO */
+ if (!(reg0_flags & IORESOURCE_MEM) ||
+ !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
+ ret = -ENODEV;
+ goto bail1;
+ }
+
+ /* Check for weird/broken PCI region reporting */
+ if ((reg0_len < C2_REG0_SIZE) ||
+ (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
+ printk(KERN_ERR PFX "Invalid PCI region sizes\n");
+ ret = -ENODEV;
+ goto bail1;
+ }
+
+ /* Reserve PCI I/O and memory resources */
+ ret = pci_request_regions(pcidev, DRV_NAME);
+ if (ret) {
+ printk(KERN_ERR PFX "%s: Unable to request regions\n",
+ pci_name(pcidev));
+ goto bail1;
+ }
+
+ if ((sizeof(dma_addr_t) > 4)) {
+ ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
+ if (ret < 0) {
+ printk(KERN_ERR PFX "64b DMA configuration failed\n");
+ goto bail2;
+ }
+ } else {
+ ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
+ if (ret < 0) {
+ printk(KERN_ERR PFX "32b DMA configuration failed\n");
+ goto bail2;
+ }
+ }
+
+ /* Enables bus-mastering on the device */
+ pci_set_master(pcidev);
+
+ /* Remap the adapter PCI registers in BAR4 */
+ mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
+ sizeof(struct c2_adapter_pci_regs));
+ if (mmio_regs == 0UL) {
+ printk(KERN_ERR PFX
+ "Unable to remap adapter PCI registers in BAR4\n");
+ ret = -EIO;
+ goto bail2;
+ }
+
+ /* Validate PCI regs magic */
+ for (i = 0; i < sizeof(c2_magic); i++) {
+ if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
+ printk(KERN_ERR PFX "Downlevel Firmware boot loader "
+ "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
+ "utility to update your boot loader\n",
+ i + 1, sizeof(c2_magic),
+ readb(mmio_regs + C2_REGS_MAGIC + i),
+ c2_magic[i]);
+ printk(KERN_ERR PFX "Adapter not claimed\n");
+ iounmap(mmio_regs);
+ ret = -EIO;
+ goto bail2;
+ }
+ }
+
+ /* Validate the adapter version */
+ if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
+ printk(KERN_ERR PFX "Version mismatch "
+ "[fw=%u, c2=%u], Adapter not claimed\n",
+ be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)),
+ C2_VERSION);
+ ret = -EINVAL;
+ iounmap(mmio_regs);
+ goto bail2;
+ }
+
+ /* Validate the adapter IVN */
+ if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
+ printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
+ "the OpenIB device support kit. "
+ "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
+ be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)),
+ C2_IVN);
+ ret = -EINVAL;
+ iounmap(mmio_regs);
+ goto bail2;
+ }
+
+ /* Allocate hardware structure */
+ c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
+ if (!c2dev) {
+ printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
+ pci_name(pcidev));
+ ret = -ENOMEM;
+ iounmap(mmio_regs);
+ goto bail2;
+ }
+
+ memset(c2dev, 0, sizeof(*c2dev));
+ spin_lock_init(&c2dev->lock);
+ c2dev->pcidev = pcidev;
+ c2dev->cur_tx = 0;
+
+ /* Get the last RX index */
+ c2dev->cur_rx =
+ (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) -
+ 0xffffc000) / sizeof(struct c2_rxp_desc);
+
+ /* Request an interrupt line for the driver */
+ ret = request_irq(pcidev->irq, c2_interrupt, SA_SHIRQ, DRV_NAME, c2dev);
+ if (ret) {
+ printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
+ pci_name(pcidev), pcidev->irq);
+ iounmap(mmio_regs);
+ goto bail3;
+ }
+
+ /* Set driver specific data */
+ pci_set_drvdata(pcidev, c2dev);
+
+ /* Initialize network device */
+ if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
+ iounmap(mmio_regs);
+ goto bail4;
+ }
+
+ /* Save off the actual size prior to unmapping mmio_regs */
+ kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE));
+
+ /* Unmap the adapter PCI registers in BAR4 */
+ iounmap(mmio_regs);
+
+ /* Register network device */
+ ret = register_netdev(netdev);
+ if (ret) {
+ printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
+ ret);
+ goto bail5;
+ }
+
+ /* Disable network packets */
+ netif_stop_queue(netdev);
+
+ /* Remap the adapter HRXDQ PA space to kernel VA space */
+ c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
+ C2_RXP_HRXDQ_SIZE);
+ if (c2dev->mmio_rxp_ring == 0UL) {
+ printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
+ ret = -EIO;
+ goto bail6;
+ }
+
+ /* Remap the adapter HTXDQ PA space to kernel VA space */
+ c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
+ C2_TXP_HTXDQ_SIZE);
+ if (c2dev->mmio_txp_ring == 0UL) {
+ printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
+ ret = -EIO;
+ goto bail7;
+ }
+
+ /* Save off the current RX index in the last 4 bytes of the TXP Ring */
+ C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
+
+ /* Remap the PCI registers in adapter BAR0 to kernel VA space */
+ c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
+ if (c2dev->regs == 0UL) {
+ printk(KERN_ERR PFX "Unable to remap BAR0\n");
+ ret = -EIO;
+ goto bail8;
+ }
+
+ /* Remap the PCI registers in adapter BAR4 to kernel VA space */
+ c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
+ c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
+ kva_map_size);
+ if (c2dev->kva == 0UL) {
+ printk(KERN_ERR PFX "Unable to remap BAR4\n");
+ ret = -EIO;
+ goto bail9;
+ }
+
+ /* Print out the MAC address */
+ c2_print_macaddr(netdev);
+
+ ret = c2_rnic_init(c2dev);
+ if (ret) {
+ printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
+ goto bail10;
+ }
+
+ c2_register_device(c2dev);
+
+ return 0;
+
+ bail10:
+ iounmap(c2dev->kva);
+
+ bail9:
+ iounmap(c2dev->regs);
+
+ bail8:
+ iounmap(c2dev->mmio_txp_ring);
+
+ bail7:
+ iounmap(c2dev->mmio_rxp_ring);
+
+ bail6:
+ unregister_netdev(netdev);
+
+ bail5:
+ free_netdev(netdev);
+
+ bail4:
+ free_irq(pcidev->irq, c2dev);
+
+ bail3:
+ ib_dealloc_device(&c2dev->ibdev);
+
+ bail2:
+ pci_release_regions(pcidev);
+
+ bail1:
+ pci_disable_device(pcidev);
+
+ bail0:
+ return ret;
+}
+
+static void __devexit c2_remove(struct pci_dev *pcidev)
+{
+ struct c2_dev *c2dev = pci_get_drvdata(pcidev);
+ struct net_device *netdev = c2dev->netdev;
+
+ /* Unregister with OpenIB */
+ c2_unregister_device(c2dev);
+
+ /* Clean up the RNIC resources */
+ c2_rnic_term(c2dev);
+
+ /* Remove network device from the kernel */
+ unregister_netdev(netdev);
+
+ /* Free network device */
+ free_netdev(netdev);
+
+ /* Free the interrupt line */
+ free_irq(pcidev->irq, c2dev);
+
+ /* missing: Turn LEDs off here */
+
+ /* Unmap adapter PA space */
+ iounmap(c2dev->kva);
+ iounmap(c2dev->regs);
+ iounmap(c2dev->mmio_txp_ring);
+ iounmap(c2dev->mmio_rxp_ring);
+
+ /* Free the hardware structure */
+ ib_dealloc_device(&c2dev->ibdev);
+
+ /* Release reserved PCI I/O and memory resources */
+ pci_release_regions(pcidev);
+
+ /* Disable PCI device */
+ pci_disable_device(pcidev);
+
+ /* Clear driver specific data */
+ pci_set_drvdata(pcidev, NULL);
+}
+
+static struct pci_driver c2_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = c2_pci_table,
+ .probe = c2_probe,
+ .remove = __devexit_p(c2_remove),
+};
+
+static int __init c2_init_module(void)
+{
+ return pci_module_init(&c2_pci_driver);
+}
+
+static void __exit c2_exit_module(void)
+{
+ pci_unregister_driver(&c2_pci_driver);
+}
+
+module_init(c2_init_module);
+module_exit(c2_exit_module);
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
new file mode 100644
index 00000000000..1b17dcdd050
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -0,0 +1,551 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __C2_H
+#define __C2_H
+
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <asm/semaphore.h>
+
+#include "c2_provider.h"
+#include "c2_mq.h"
+#include "c2_status.h"
+
+#define DRV_NAME "c2"
+#define DRV_VERSION "1.1"
+#define PFX DRV_NAME ": "
+
+#define BAR_0 0
+#define BAR_2 2
+#define BAR_4 4
+
+#define RX_BUF_SIZE (1536 + 8)
+#define ETH_JUMBO_MTU 9000
+#define C2_MAGIC "CEPHEUS"
+#define C2_VERSION 4
+#define C2_IVN (18 & 0x7fffffff)
+
+#define C2_REG0_SIZE (16 * 1024)
+#define C2_REG2_SIZE (2 * 1024 * 1024)
+#define C2_REG4_SIZE (256 * 1024 * 1024)
+#define C2_NUM_TX_DESC 341
+#define C2_NUM_RX_DESC 256
+#define C2_PCI_REGS_OFFSET (0x10000)
+#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
+#define C2_RXP_HRXDQ_SIZE (4096)
+#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
+#define C2_TXP_HTXDQ_SIZE (4096)
+#define C2_TX_TIMEOUT (6*HZ)
+
+/* CEPHEUS */
+static const u8 c2_magic[] = {
+ 0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
+};
+
+enum adapter_pci_regs {
+ C2_REGS_MAGIC = 0x0000,
+ C2_REGS_VERS = 0x0008,
+ C2_REGS_IVN = 0x000C,
+ C2_REGS_PCI_WINSIZE = 0x0010,
+ C2_REGS_Q0_QSIZE = 0x0014,
+ C2_REGS_Q0_MSGSIZE = 0x0018,
+ C2_REGS_Q0_POOLSTART = 0x001C,
+ C2_REGS_Q0_SHARED = 0x0020,
+ C2_REGS_Q1_QSIZE = 0x0024,
+ C2_REGS_Q1_MSGSIZE = 0x0028,
+ C2_REGS_Q1_SHARED = 0x0030,
+ C2_REGS_Q2_QSIZE = 0x0034,
+ C2_REGS_Q2_MSGSIZE = 0x0038,
+ C2_REGS_Q2_SHARED = 0x0040,
+ C2_REGS_ENADDR = 0x004C,
+ C2_REGS_RDMA_ENADDR = 0x0054,
+ C2_REGS_HRX_CUR = 0x006C,
+};
+
+struct c2_adapter_pci_regs {
+ char reg_magic[8];
+ u32 version;
+ u32 ivn;
+ u32 pci_window_size;
+ u32 q0_q_size;
+ u32 q0_msg_size;
+ u32 q0_pool_start;
+ u32 q0_shared;
+ u32 q1_q_size;
+ u32 q1_msg_size;
+ u32 q1_pool_start;
+ u32 q1_shared;
+ u32 q2_q_size;
+ u32 q2_msg_size;
+ u32 q2_pool_start;
+ u32 q2_shared;
+ u32 log_start;
+ u32 log_size;
+ u8 host_enaddr[8];
+ u8 rdma_enaddr[8];
+ u32 crash_entry;
+ u32 crash_ready[2];
+ u32 fw_txd_cur;
+ u32 fw_hrxd_cur;
+ u32 fw_rxd_cur;
+};
+
+enum pci_regs {
+ C2_HISR = 0x0000,
+ C2_DISR = 0x0004,
+ C2_HIMR = 0x0008,
+ C2_DIMR = 0x000C,
+ C2_NISR0 = 0x0010,
+ C2_NISR1 = 0x0014,
+ C2_NIMR0 = 0x0018,
+ C2_NIMR1 = 0x001C,
+ C2_IDIS = 0x0020,
+};
+
+enum {
+ C2_PCI_HRX_INT = 1 << 8,
+ C2_PCI_HTX_INT = 1 << 17,
+ C2_PCI_HRX_QUI = 1 << 31,
+};
+
+/*
+ * Cepheus registers in BAR0.
+ */
+struct c2_pci_regs {
+ u32 hostisr;
+ u32 dmaisr;
+ u32 hostimr;
+ u32 dmaimr;
+ u32 netisr0;
+ u32 netisr1;
+ u32 netimr0;
+ u32 netimr1;
+ u32 int_disable;
+};
+
+/* TXP flags */
+enum c2_txp_flags {
+ TXP_HTXD_DONE = 0,
+ TXP_HTXD_READY = 1 << 0,
+ TXP_HTXD_UNINIT = 1 << 1,
+};
+
+/* RXP flags */
+enum c2_rxp_flags {
+ RXP_HRXD_UNINIT = 0,
+ RXP_HRXD_READY = 1 << 0,
+ RXP_HRXD_DONE = 1 << 1,
+};
+
+/* RXP status */
+enum c2_rxp_status {
+ RXP_HRXD_ZERO = 0,
+ RXP_HRXD_OK = 1 << 0,
+ RXP_HRXD_BUF_OV = 1 << 1,
+};
+
+/* TXP descriptor fields */
+enum txp_desc {
+ C2_TXP_FLAGS = 0x0000,
+ C2_TXP_LEN = 0x0002,
+ C2_TXP_ADDR = 0x0004,
+};
+
+/* RXP descriptor fields */
+enum rxp_desc {
+ C2_RXP_FLAGS = 0x0000,
+ C2_RXP_STATUS = 0x0002,
+ C2_RXP_COUNT = 0x0004,
+ C2_RXP_LEN = 0x0006,
+ C2_RXP_ADDR = 0x0008,
+};
+
+struct c2_txp_desc {
+ u16 flags;
+ u16 len;
+ u64 addr;
+} __attribute__ ((packed));
+
+struct c2_rxp_desc {
+ u16 flags;
+ u16 status;
+ u16 count;
+ u16 len;
+ u64 addr;
+} __attribute__ ((packed));
+
+struct c2_rxp_hdr {
+ u16 flags;
+ u16 status;
+ u16 len;
+ u16 rsvd;
+} __attribute__ ((packed));
+
+struct c2_tx_desc {
+ u32 len;
+ u32 status;
+ dma_addr_t next_offset;
+};
+
+struct c2_rx_desc {
+ u32 len;
+ u32 status;
+ dma_addr_t next_offset;
+};
+
+struct c2_alloc {
+ u32 last;
+ u32 max;
+ spinlock_t lock;
+ unsigned long *table;
+};
+
+struct c2_array {
+ struct {
+ void **page;
+ int used;
+ } *page_list;
+};
+
+/*
+ * The MQ shared pointer pool is organized as a linked list of
+ * chunks. Each chunk contains a linked list of free shared pointers
+ * that can be allocated to a given user mode client.
+ *
+ */
+struct sp_chunk {
+ struct sp_chunk *next;
+ dma_addr_t dma_addr;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ u16 head;
+ u16 shared_ptr[0];
+};
+
+struct c2_pd_table {
+ u32 last;
+ u32 max;
+ spinlock_t lock;
+ unsigned long *table;
+};
+
+struct c2_qp_table {
+ struct idr idr;
+ spinlock_t lock;
+ int last;
+};
+
+struct c2_element {
+ struct c2_element *next;
+ void *ht_desc; /* host descriptor */
+ void __iomem *hw_desc; /* hardware descriptor */
+ struct sk_buff *skb;
+ dma_addr_t mapaddr;
+ u32 maplen;
+};
+
+struct c2_ring {
+ struct c2_element *to_clean;
+ struct c2_element *to_use;
+ struct c2_element *start;
+ unsigned long count;
+};
+
+struct c2_dev {
+ struct ib_device ibdev;
+ void __iomem *regs;
+ void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
+ void __iomem *mmio_rxp_ring;
+ spinlock_t lock;
+ struct pci_dev *pcidev;
+ struct net_device *netdev;
+ struct net_device *pseudo_netdev;
+ unsigned int cur_tx;
+ unsigned int cur_rx;
+ u32 adapter_handle;
+ int device_cap_flags;
+ void __iomem *kva; /* KVA device memory */
+ unsigned long pa; /* PA device memory */
+ void **qptr_array;
+
+ kmem_cache_t *host_msg_cache;
+
+ struct list_head cca_link; /* adapter list */
+ struct list_head eh_wakeup_list; /* event wakeup list */
+ wait_queue_head_t req_vq_wo;
+
+ /* Cached RNIC properties */
+ struct ib_device_attr props;
+
+ struct c2_pd_table pd_table;
+ struct c2_qp_table qp_table;
+ int ports; /* num of GigE ports */
+ int devnum;
+ spinlock_t vqlock; /* sync vbs req MQ */
+
+ /* Verbs Queues */
+ struct c2_mq req_vq; /* Verbs Request MQ */
+ struct c2_mq rep_vq; /* Verbs Reply MQ */
+ struct c2_mq aeq; /* Async Events MQ */
+
+ /* Kernel client MQs */
+ struct sp_chunk *kern_mqsp_pool;
+
+ /* Device updates these values when posting messages to a host
+ * target queue */
+ u16 req_vq_shared;
+ u16 rep_vq_shared;
+ u16 aeq_shared;
+ u16 irq_claimed;
+
+ /*
+ * Shared host target pages for user-accessible MQs.
+ */
+ int hthead; /* index of first free entry */
+ void *htpages; /* kernel vaddr */
+ int htlen; /* length of htpages memory */
+ void *htuva; /* user mapped vaddr */
+ spinlock_t htlock; /* serialize allocation */
+
+ u64 adapter_hint_uva; /* access to the activity FIFO */
+
+ // spinlock_t aeq_lock;
+ // spinlock_t rnic_lock;
+
+ u16 *hint_count;
+ dma_addr_t hint_count_dma;
+ u16 hints_read;
+
+ int init; /* TRUE if it's ready */
+ char ae_cache_name[16];
+ char vq_cache_name[16];
+};
+
+struct c2_port {
+ u32 msg_enable;
+ struct c2_dev *c2dev;
+ struct net_device *netdev;
+
+ spinlock_t tx_lock;
+ u32 tx_avail;
+ struct c2_ring tx_ring;
+ struct c2_ring rx_ring;
+
+ void *mem; /* PCI memory for host rings */
+ dma_addr_t dma;
+ unsigned long mem_size;
+
+ u32 rx_buf_size;
+
+ struct net_device_stats netstats;
+};
+
+/*
+ * Activity FIFO registers in BAR0.
+ */
+#define PCI_BAR0_HOST_HINT 0x100
+#define PCI_BAR0_ADAPTER_HINT 0x2000
+
+/*
+ * Ammasso PCI vendor id and Cepheus PCI device id.
+ */
+#define CQ_ARMED 0x01
+#define CQ_WAIT_FOR_DMA 0x80
+
+/*
+ * The format of a hint is as follows:
+ * Lower 16 bits are the count of hints for the queue.
+ * Next 15 bits are the qp_index
+ * Upper most bit depends on who reads it:
+ * If read by producer, then it means Full (1) or Not-Full (0)
+ * If read by consumer, then it means Empty (1) or Not-Empty (0)
+ */
+#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
+#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
+#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
+
+
+/*
+ * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
+ * struct.
+ */
+#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
+
+#ifndef readq
+static inline u64 readq(const void __iomem * addr)
+{
+ u64 ret = readl(addr + 4);
+ ret <<= 32;
+ ret |= readl(addr);
+
+ return ret;
+}
+#endif
+
+#ifndef writeq
+static inline void __raw_writeq(u64 val, void __iomem * addr)
+{
+ __raw_writel((u32) (val), addr);
+ __raw_writel((u32) (val >> 32), (addr + 4));
+}
+#endif
+
+#define C2_SET_CUR_RX(c2dev, cur_rx) \
+ __raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
+
+#define C2_GET_CUR_RX(c2dev) \
+ be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092))
+
+static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct c2_dev, ibdev);
+}
+
+static inline int c2_errno(void *reply)
+{
+ switch (c2_wr_get_result(reply)) {
+ case C2_OK:
+ return 0;
+ case CCERR_NO_BUFS:
+ case CCERR_INSUFFICIENT_RESOURCES:
+ case CCERR_ZERO_RDMA_READ_RESOURCES:
+ return -ENOMEM;
+ case CCERR_MR_IN_USE:
+ case CCERR_QP_IN_USE:
+ return -EBUSY;
+ case CCERR_ADDR_IN_USE:
+ return -EADDRINUSE;
+ case CCERR_ADDR_NOT_AVAIL:
+ return -EADDRNOTAVAIL;
+ case CCERR_CONN_RESET:
+ return -ECONNRESET;
+ case CCERR_NOT_IMPLEMENTED:
+ case CCERR_INVALID_WQE:
+ return -ENOSYS;
+ case CCERR_QP_NOT_PRIVILEGED:
+ return -EPERM;
+ case CCERR_STACK_ERROR:
+ return -EPROTO;
+ case CCERR_ACCESS_VIOLATION:
+ case CCERR_BASE_AND_BOUNDS_VIOLATION:
+ return -EFAULT;
+ case CCERR_STAG_STATE_NOT_INVALID:
+ case CCERR_INVALID_ADDRESS:
+ case CCERR_INVALID_CQ:
+ case CCERR_INVALID_EP:
+ case CCERR_INVALID_MODIFIER:
+ case CCERR_INVALID_MTU:
+ case CCERR_INVALID_PD_ID:
+ case CCERR_INVALID_QP:
+ case CCERR_INVALID_RNIC:
+ case CCERR_INVALID_STAG:
+ return -EINVAL;
+ default:
+ return -EAGAIN;
+ }
+}
+
+/* Device */
+extern int c2_register_device(struct c2_dev *c2dev);
+extern void c2_unregister_device(struct c2_dev *c2dev);
+extern int c2_rnic_init(struct c2_dev *c2dev);
+extern void c2_rnic_term(struct c2_dev *c2dev);
+extern void c2_rnic_interrupt(struct c2_dev *c2dev);
+extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
+extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
+
+/* QPs */
+extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
+ struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
+extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
+extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
+extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
+ struct ib_qp_attr *attr, int attr_mask);
+extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
+ int ord, int ird);
+extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ struct ib_send_wr **bad_wr);
+extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
+ struct ib_recv_wr **bad_wr);
+extern void __devinit c2_init_qp_table(struct c2_dev *c2dev);
+extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev);
+extern void c2_set_qp_state(struct c2_qp *, int);
+extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
+
+/* PDs */
+extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
+extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
+extern int __devinit c2_init_pd_table(struct c2_dev *c2dev);
+extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev);
+
+/* CQs */
+extern int c2_init_cq(struct c2_dev *c2dev, int entries,
+ struct c2_ucontext *ctx, struct c2_cq *cq);
+extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
+extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
+extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
+extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
+
+/* CM */
+extern int c2_llp_connect(struct iw_cm_id *cm_id,
+ struct iw_cm_conn_param *iw_param);
+extern int c2_llp_accept(struct iw_cm_id *cm_id,
+ struct iw_cm_conn_param *iw_param);
+extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
+ u8 pdata_len);
+extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
+extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
+
+/* MM */
+extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
+ int page_size, int pbl_depth, u32 length,
+ u32 off, u64 *va, enum c2_acf acf,
+ struct c2_mr *mr);
+extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
+
+/* AE */
+extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
+
+/* MQSP Allocator */
+extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
+ struct sp_chunk **root);
+extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
+extern u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
+ dma_addr_t *dma_addr, gfp_t gfp_mask);
+extern void c2_free_mqsp(u16 * mqsp);
+#endif
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
new file mode 100644
index 00000000000..08f46c83a3a
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "c2.h"
+#include <rdma/iw_cm.h>
+#include "c2_status.h"
+#include "c2_ae.h"
+
+static int c2_convert_cm_status(u32 c2_status)
+{
+ switch (c2_status) {
+ case C2_CONN_STATUS_SUCCESS:
+ return 0;
+ case C2_CONN_STATUS_REJECTED:
+ return -ENETRESET;
+ case C2_CONN_STATUS_REFUSED:
+ return -ECONNREFUSED;
+ case C2_CONN_STATUS_TIMEDOUT:
+ return -ETIMEDOUT;
+ case C2_CONN_STATUS_NETUNREACH:
+ return -ENETUNREACH;
+ case C2_CONN_STATUS_HOSTUNREACH:
+ return -EHOSTUNREACH;
+ case C2_CONN_STATUS_INVALID_RNIC:
+ return -EINVAL;
+ case C2_CONN_STATUS_INVALID_QP:
+ return -EINVAL;
+ case C2_CONN_STATUS_INVALID_QP_STATE:
+ return -EINVAL;
+ case C2_CONN_STATUS_ADDR_NOT_AVAIL:
+ return -EADDRNOTAVAIL;
+ default:
+ printk(KERN_ERR PFX
+ "%s - Unable to convert CM status: %d\n",
+ __FUNCTION__, c2_status);
+ return -EIO;
+ }
+}
+
+#ifdef DEBUG
+static const char* to_event_str(int event)
+{
+ static const char* event_str[] = {
+ "CCAE_REMOTE_SHUTDOWN",
+ "CCAE_ACTIVE_CONNECT_RESULTS",
+ "CCAE_CONNECTION_REQUEST",
+ "CCAE_LLP_CLOSE_COMPLETE",
+ "CCAE_TERMINATE_MESSAGE_RECEIVED",
+ "CCAE_LLP_CONNECTION_RESET",
+ "CCAE_LLP_CONNECTION_LOST",
+ "CCAE_LLP_SEGMENT_SIZE_INVALID",
+ "CCAE_LLP_INVALID_CRC",
+ "CCAE_LLP_BAD_FPDU",
+ "CCAE_INVALID_DDP_VERSION",
+ "CCAE_INVALID_RDMA_VERSION",
+ "CCAE_UNEXPECTED_OPCODE",
+ "CCAE_INVALID_DDP_QUEUE_NUMBER",
+ "CCAE_RDMA_READ_NOT_ENABLED",
+ "CCAE_RDMA_WRITE_NOT_ENABLED",
+ "CCAE_RDMA_READ_TOO_SMALL",
+ "CCAE_NO_L_BIT",
+ "CCAE_TAGGED_INVALID_STAG",
+ "CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
+ "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
+ "CCAE_TAGGED_INVALID_PD",
+ "CCAE_WRAP_ERROR",
+ "CCAE_BAD_CLOSE",
+ "CCAE_BAD_LLP_CLOSE",
+ "CCAE_INVALID_MSN_RANGE",
+ "CCAE_INVALID_MSN_GAP",
+ "CCAE_IRRQ_OVERFLOW",
+ "CCAE_IRRQ_MSN_GAP",
+ "CCAE_IRRQ_MSN_RANGE",
+ "CCAE_IRRQ_INVALID_STAG",
+ "CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
+ "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
+ "CCAE_IRRQ_INVALID_PD",
+ "CCAE_IRRQ_WRAP_ERROR",
+ "CCAE_CQ_SQ_COMPLETION_OVERFLOW",
+ "CCAE_CQ_RQ_COMPLETION_ERROR",
+ "CCAE_QP_SRQ_WQE_ERROR",
+ "CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
+ "CCAE_CQ_OVERFLOW",
+ "CCAE_CQ_OPERATION_ERROR",
+ "CCAE_SRQ_LIMIT_REACHED",
+ "CCAE_QP_RQ_LIMIT_REACHED",
+ "CCAE_SRQ_CATASTROPHIC_ERROR",
+ "CCAE_RNIC_CATASTROPHIC_ERROR"
+ };
+
+ if (event < CCAE_REMOTE_SHUTDOWN ||
+ event > CCAE_RNIC_CATASTROPHIC_ERROR)
+ return "<invalid event>";
+
+ event -= CCAE_REMOTE_SHUTDOWN;
+ return event_str[event];
+}
+
+static const char *to_qp_state_str(int state)
+{
+ switch (state) {
+ case C2_QP_STATE_IDLE:
+ return "C2_QP_STATE_IDLE";
+ case C2_QP_STATE_CONNECTING:
+ return "C2_QP_STATE_CONNECTING";
+ case C2_QP_STATE_RTS:
+ return "C2_QP_STATE_RTS";
+ case C2_QP_STATE_CLOSING:
+ return "C2_QP_STATE_CLOSING";
+ case C2_QP_STATE_TERMINATE:
+ return "C2_QP_STATE_TERMINATE";
+ case C2_QP_STATE_ERROR:
+ return "C2_QP_STATE_ERROR";
+ default:
+ return "<invalid QP state>";
+ };
+}
+#endif
+
+void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
+{
+ struct c2_mq *mq = c2dev->qptr_array[mq_index];
+ union c2wr *wr;
+ void *resource_user_context;
+ struct iw_cm_event cm_event;
+ struct ib_event ib_event;
+ enum c2_resource_indicator resource_indicator;
+ enum c2_event_id event_id;
+ unsigned long flags;
+ int status;
+
+ /*
+ * retreive the message
+ */
+ wr = c2_mq_consume(mq);
+ if (!wr)
+ return;
+
+ memset(&ib_event, 0, sizeof(ib_event));
+ memset(&cm_event, 0, sizeof(cm_event));
+
+ event_id = c2_wr_get_id(wr);
+ resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
+ resource_user_context =
+ (void *) (unsigned long) wr->ae.ae_generic.user_context;
+
+ status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
+
+ pr_debug("event received c2_dev=%p, event_id=%d, "
+ "resource_indicator=%d, user_context=%p, status = %d\n",
+ c2dev, event_id, resource_indicator, resource_user_context,
+ status);
+
+ switch (resource_indicator) {
+ case C2_RES_IND_QP:{
+
+ struct c2_qp *qp = (struct c2_qp *)resource_user_context;
+ struct iw_cm_id *cm_id = qp->cm_id;
+ struct c2wr_ae_active_connect_results *res;
+
+ if (!cm_id) {
+ pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
+ qp);
+ goto ignore_it;
+ }
+ pr_debug("%s: event = %s, user_context=%llx, "
+ "resource_type=%x, "
+ "resource=%x, qp_state=%s\n",
+ __FUNCTION__,
+ to_event_str(event_id),
+ be64_to_cpu(wr->ae.ae_generic.user_context),
+ be32_to_cpu(wr->ae.ae_generic.resource_type),
+ be32_to_cpu(wr->ae.ae_generic.resource),
+ to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
+
+ c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
+
+ switch (event_id) {
+ case CCAE_ACTIVE_CONNECT_RESULTS:
+ res = &wr->ae.ae_active_connect_results;
+ cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
+ cm_event.local_addr.sin_addr.s_addr = res->laddr;
+ cm_event.remote_addr.sin_addr.s_addr = res->raddr;
+ cm_event.local_addr.sin_port = res->lport;
+ cm_event.remote_addr.sin_port = res->rport;
+ if (status == 0) {
+ cm_event.private_data_len =
+ be32_to_cpu(res->private_data_length);
+ cm_event.private_data = res->private_data;
+ } else {
+ spin_lock_irqsave(&qp->lock, flags);
+ if (qp->cm_id) {
+ qp->cm_id->rem_ref(qp->cm_id);
+ qp->cm_id = NULL;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+ cm_event.private_data_len = 0;
+ cm_event.private_data = NULL;
+ }
+ if (cm_id->event_handler)
+ cm_id->event_handler(cm_id, &cm_event);
+ break;
+ case CCAE_TERMINATE_MESSAGE_RECEIVED:
+ case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
+ ib_event.device = &c2dev->ibdev;
+ ib_event.element.qp = &qp->ibqp;
+ ib_event.event = IB_EVENT_QP_REQ_ERR;
+
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&ib_event,
+ qp->ibqp.
+ qp_context);
+ break;
+ case CCAE_BAD_CLOSE:
+ case CCAE_LLP_CLOSE_COMPLETE:
+ case CCAE_LLP_CONNECTION_RESET:
+ case CCAE_LLP_CONNECTION_LOST:
+ BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
+
+ spin_lock_irqsave(&qp->lock, flags);
+ if (qp->cm_id) {
+ qp->cm_id->rem_ref(qp->cm_id);
+ qp->cm_id = NULL;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+ cm_event.event = IW_CM_EVENT_CLOSE;
+ cm_event.status = 0;
+ if (cm_id->event_handler)
+ cm_id->event_handler(cm_id, &cm_event);
+ break;
+ default:
+ BUG_ON(1);
+ pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
+ "CM_ID=%p\n",
+ __FUNCTION__, __LINE__,
+ event_id, qp, cm_id);
+ break;
+ }
+ break;
+ }
+
+ case C2_RES_IND_EP:{
+
+ struct c2wr_ae_connection_request *req =
+ &wr->ae.ae_connection_request;
+ struct iw_cm_id *cm_id =
+ (struct iw_cm_id *)resource_user_context;
+
+ pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
+ if (event_id != CCAE_CONNECTION_REQUEST) {
+ pr_debug("%s: Invalid event_id: %d\n",
+ __FUNCTION__, event_id);
+ break;
+ }
+ cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
+ cm_event.local_addr.sin_addr.s_addr = req->laddr;
+ cm_event.remote_addr.sin_addr.s_addr = req->raddr;
+ cm_event.local_addr.sin_port = req->lport;
+ cm_event.remote_addr.sin_port = req->rport;
+ cm_event.private_data_len =
+ be32_to_cpu(req->private_data_length);
+ cm_event.private_data = req->private_data;
+
+ if (cm_id->event_handler)
+ cm_id->event_handler(cm_id, &cm_event);
+ break;
+ }
+
+ case C2_RES_IND_CQ:{
+ struct c2_cq *cq =
+ (struct c2_cq *) resource_user_context;
+
+ pr_debug("IB_EVENT_CQ_ERR\n");
+ ib_event.device = &c2dev->ibdev;
+ ib_event.element.cq = &cq->ibcq;
+ ib_event.event = IB_EVENT_CQ_ERR;
+
+ if (cq->ibcq.event_handler)
+ cq->ibcq.event_handler(&ib_event,
+ cq->ibcq.cq_context);
+ }
+
+ default:
+ printk("Bad resource indicator = %d\n",
+ resource_indicator);
+ break;
+ }
+
+ ignore_it:
+ c2_mq_free(mq);
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.h b/drivers/infiniband/hw/amso1100/c2_ae.h
new file mode 100644
index 00000000000..3a065c33b83
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_ae.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _C2_AE_H_
+#define _C2_AE_H_
+
+/*
+ * WARNING: If you change this file, also bump C2_IVN_BASE
+ * in common/include/clustercore/c2_ivn.h.
+ */
+
+/*
+ * Asynchronous Event Identifiers
+ *
+ * These start at 0x80 only so it's obvious from inspection that
+ * they are not work-request statuses. This isn't critical.
+ *
+ * NOTE: these event id's must fit in eight bits.
+ */
+enum c2_event_id {
+ CCAE_REMOTE_SHUTDOWN = 0x80,
+ CCAE_ACTIVE_CONNECT_RESULTS,
+ CCAE_CONNECTION_REQUEST,
+ CCAE_LLP_CLOSE_COMPLETE,
+ CCAE_TERMINATE_MESSAGE_RECEIVED,
+ CCAE_LLP_CONNECTION_RESET,
+ CCAE_LLP_CONNECTION_LOST,
+ CCAE_LLP_SEGMENT_SIZE_INVALID,
+ CCAE_LLP_INVALID_CRC,
+ CCAE_LLP_BAD_FPDU,
+ CCAE_INVALID_DDP_VERSION,
+ CCAE_INVALID_RDMA_VERSION,
+ CCAE_UNEXPECTED_OPCODE,
+ CCAE_INVALID_DDP_QUEUE_NUMBER,
+ CCAE_RDMA_READ_NOT_ENABLED,
+ CCAE_RDMA_WRITE_NOT_ENABLED,
+ CCAE_RDMA_READ_TOO_SMALL,
+ CCAE_NO_L_BIT,
+ CCAE_TAGGED_INVALID_STAG,
+ CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
+ CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
+ CCAE_TAGGED_INVALID_PD,
+ CCAE_WRAP_ERROR,
+ CCAE_BAD_CLOSE,
+ CCAE_BAD_LLP_CLOSE,
+ CCAE_INVALID_MSN_RANGE,
+ CCAE_INVALID_MSN_GAP,
+ CCAE_IRRQ_OVERFLOW,
+ CCAE_IRRQ_MSN_GAP,
+ CCAE_IRRQ_MSN_RANGE,
+ CCAE_IRRQ_INVALID_STAG,
+ CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
+ CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
+ CCAE_IRRQ_INVALID_PD,
+ CCAE_IRRQ_WRAP_ERROR,
+ CCAE_CQ_SQ_COMPLETION_OVERFLOW,
+ CCAE_CQ_RQ_COMPLETION_ERROR,
+ CCAE_QP_SRQ_WQE_ERROR,
+ CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
+ CCAE_CQ_OVERFLOW,
+ CCAE_CQ_OPERATION_ERROR,
+ CCAE_SRQ_LIMIT_REACHED,
+ CCAE_QP_RQ_LIMIT_REACHED,
+ CCAE_SRQ_CATASTROPHIC_ERROR,
+ CCAE_RNIC_CATASTROPHIC_ERROR
+/* WARNING If you add more id's, make sure their values fit in eight bits. */
+};
+
+/*
+ * Resource Indicators and Identifiers
+ */
+enum c2_resource_indicator {
+ C2_RES_IND_QP = 1,
+ C2_RES_IND_EP,
+ C2_RES_IND_CQ,
+ C2_RES_IND_SRQ,
+};
+
+#endif /* _C2_AE_H_ */
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
new file mode 100644
index 00000000000..1d2529992c0
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+
+#include "c2.h"
+
+static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
+ struct sp_chunk **head)
+{
+ int i;
+ struct sp_chunk *new_head;
+
+ new_head = (struct sp_chunk *) __get_free_page(gfp_mask);
+ if (new_head == NULL)
+ return -ENOMEM;
+
+ new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
+
+ new_head->next = NULL;
+ new_head->head = 0;
+
+ /* build list where each index is the next free slot */
+ for (i = 0;
+ i < (PAGE_SIZE - sizeof(struct sp_chunk) -
+ sizeof(u16)) / sizeof(u16) - 1;
+ i++) {
+ new_head->shared_ptr[i] = i + 1;
+ }
+ /* terminate list */
+ new_head->shared_ptr[i] = 0xFFFF;
+
+ *head = new_head;
+ return 0;
+}
+
+int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
+ struct sp_chunk **root)
+{
+ return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
+}
+
+void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
+{
+ struct sp_chunk *next;
+
+ while (root) {
+ next = root->next;
+ dma_unmap_single(c2dev->ibdev.dma_device,
+ pci_unmap_addr(root, mapping), PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page((struct page *) root);
+ root = next;
+ }
+}
+
+u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
+ dma_addr_t *dma_addr, gfp_t gfp_mask)
+{
+ u16 mqsp;
+
+ while (head) {
+ mqsp = head->head;
+ if (mqsp != 0xFFFF) {
+ head->head = head->shared_ptr[mqsp];
+ break;
+ } else if (head->next == NULL) {
+ if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
+ 0) {
+ head = head->next;
+ mqsp = head->head;
+ head->head = head->shared_ptr[mqsp];
+ break;
+ } else
+ return NULL;
+ } else
+ head = head->next;
+ }
+ if (head) {
+ *dma_addr = head->dma_addr +
+ ((unsigned long) &(head->shared_ptr[mqsp]) -
+ (unsigned long) head);
+ pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__,
+ &(head->shared_ptr[mqsp]), (u64)*dma_addr);
+ return &(head->shared_ptr[mqsp]);
+ }
+ return NULL;
+}
+
+void c2_free_mqsp(u16 * mqsp)
+{
+ struct sp_chunk *head;
+ u16 idx;
+
+ /* The chunk containing this ptr begins at the page boundary */
+ head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
+
+ /* Link head to new mqsp */
+ *mqsp = head->head;
+
+ /* Compute the shared_ptr index */
+ idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1;
+ idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
+
+ /* Point this index at the head */
+ head->shared_ptr[idx] = head->head;
+
+ /* Point head at this index */
+ head->head = idx;
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_cm.c b/drivers/infiniband/hw/amso1100/c2_cm.c
new file mode 100644
index 00000000000..485254efdd1
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_cm.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include "c2.h"
+#include "c2_wr.h"
+#include "c2_vq.h"
+#include <rdma/iw_cm.h>
+
+int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
+{
+ struct c2_dev *c2dev = to_c2dev(cm_id->device);
+ struct ib_qp *ibqp;
+ struct c2_qp *qp;
+ struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
+ struct c2_vq_req *vq_req;
+ int err;
+
+ ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ qp = to_c2qp(ibqp);
+
+ /* Associate QP <--> CM_ID */
+ cm_id->provider_data = qp;
+ cm_id->add_ref(cm_id);
+ qp->cm_id = cm_id;
+
+ /*
+ * only support the max private_data length
+ */
+ if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
+ err = -EINVAL;
+ goto bail0;
+ }
+ /*
+ * Set the rdma read limits
+ */
+ err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
+ if (err)
+ goto bail0;
+
+ /*
+ * Create and send a WR_QP_CONNECT...
+ */
+ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
+ if (!wr) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+
+ c2_wr_set_id(wr, CCWR_QP_CONNECT);
+ wr->hdr.context = 0;
+ wr->rnic_handle = c2dev->adapter_handle;
+ wr->qp_handle = qp->adapter_handle;
+
+ wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
+ wr->remote_port = cm_id->remote_addr.sin_port;
+
+ /*
+ * Move any private data from the callers's buf into
+ * the WR.
+ */
+ if (iw_param->private_data) {
+ wr->private_data_length =
+ cpu_to_be32(iw_param->private_data_len);
+ memcpy(&wr->private_data[0], iw_param->private_data,
+ iw_param->private_data_len);
+ } else
+ wr->private_data_length = 0;
+
+ /*
+ * Send WR to adapter. NOTE: There is no synch reply from
+ * the adapter.
+ */
+ err = vq_send_wr(c2dev, (union c2wr *) wr);
+ vq_req_free(c2dev, vq_req);
+
+ bail1:
+ kfree(wr);
+ bail0:
+ if (err) {
+ /*
+ * If we fail, release reference on QP and
+ * disassociate QP from CM_ID
+ */
+ cm_id->provider_data = NULL;
+ qp->cm_id = NULL;
+ cm_id->rem_ref(cm_id);
+ }
+ return err;
+}
+
+int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
+{
+ struct c2_dev *c2dev;
+ struct c2wr_ep_listen_create_req wr;
+ struct c2wr_ep_listen_create_rep *reply;
+ struct c2_vq_req *vq_req;
+ int err;
+
+ c2dev = to_c2dev(cm_id->device);
+ if (c2dev == NULL)
+ return -EINVAL;
+
+ /*
+ * Allocate verbs request.
+ */
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ /*
+ * Build the WR
+ */
+ c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
+ wr.hdr.context = (u64) (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
+ wr.local_port = cm_id->local_addr.sin_port;
+ wr.backlog = cpu_to_be32(backlog);
+ wr.user_context = (u64) (unsigned long) cm_id;
+
+ /*
+ * Reference the request struct. Dereferenced in the int handler.
+ */
+ vq_req_get(c2dev, vq_req);
+
+ /*
+ * Send WR to adapter
+ */
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ /*
+ * Wait for reply from adapter
+ */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail0;
+
+ /*
+ * Process reply
+ */
+ reply =
+ (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+
+ if ((err = c2_errno(reply)) != 0)
+ goto bail1;
+
+ /*
+ * Keep the adapter handle. Used in subsequent destroy
+ */
+ cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
+
+ /*
+ * free vq stuff
+ */
+ vq_repbuf_free(c2dev, reply);
+ vq_req_free(c2dev, vq_req);
+
+ return 0;
+
+ bail1:
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+
+int c2_llp_service_destroy(struct iw_cm_id *cm_id)
+{
+
+ struct c2_dev *c2dev;
+ struct c2wr_ep_listen_destroy_req wr;
+ struct c2wr_ep_listen_destroy_rep *reply;
+ struct c2_vq_req *vq_req;
+ int err;
+
+ c2dev = to_c2dev(cm_id->device);
+ if (c2dev == NULL)
+ return -EINVAL;
+
+ /*
+ * Allocate verbs request.
+ */
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ /*
+ * Build the WR
+ */
+ c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
+
+ /*
+ * reference the request struct. dereferenced in the int handler.
+ */
+ vq_req_get(c2dev, vq_req);
+
+ /*
+ * Send WR to adapter
+ */
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ /*
+ * Wait for reply from adapter
+ */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail0;
+
+ /*
+ * Process reply
+ */
+ reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+ if ((err = c2_errno(reply)) != 0)
+ goto bail1;
+
+ bail1:
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
+{
+ struct c2_dev *c2dev = to_c2dev(cm_id->device);
+ struct c2_qp *qp;
+ struct ib_qp *ibqp;
+ struct c2wr_cr_accept_req *wr; /* variable length WR */
+ struct c2_vq_req *vq_req;
+ struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
+ int err;
+
+ ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ qp = to_c2qp(ibqp);
+
+ /* Set the RDMA read limits */
+ err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
+ if (err)
+ goto bail0;
+
+ /* Allocate verbs request. */
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+ vq_req->qp = qp;
+ vq_req->cm_id = cm_id;
+ vq_req->event = IW_CM_EVENT_ESTABLISHED;
+
+ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
+ if (!wr) {
+ err = -ENOMEM;
+ goto bail2;
+ }
+
+ /* Build the WR */
+ c2_wr_set_id(wr, CCWR_CR_ACCEPT);
+ wr->hdr.context = (unsigned long) vq_req;
+ wr->rnic_handle = c2dev->adapter_handle;
+ wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
+ wr->qp_handle = qp->adapter_handle;
+
+ /* Replace the cr_handle with the QP after accept */
+ cm_id->provider_data = qp;
+ cm_id->add_ref(cm_id);
+ qp->cm_id = cm_id;
+
+ cm_id->provider_data = qp;
+
+ /* Validate private_data length */
+ if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
+ err = -EINVAL;
+ goto bail2;
+ }
+
+ if (iw_param->private_data) {
+ wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
+ memcpy(&wr->private_data[0],
+ iw_param->private_data, iw_param->private_data_len);
+ } else
+ wr->private_data_length = 0;
+
+ /* Reference the request struct. Dereferenced in the int handler. */
+ vq_req_get(c2dev, vq_req);
+
+ /* Send WR to adapter */
+ err = vq_send_wr(c2dev, (union c2wr *) wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail2;
+ }
+
+ /* Wait for reply from adapter */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail2;
+
+ /* Check that reply is present */
+ reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail2;
+ }
+
+ err = c2_errno(reply);
+ vq_repbuf_free(c2dev, reply);
+
+ if (!err)
+ c2_set_qp_state(qp, C2_QP_STATE_RTS);
+ bail2:
+ kfree(wr);
+ bail1:
+ vq_req_free(c2dev, vq_req);
+ bail0:
+ if (err) {
+ /*
+ * If we fail, release reference on QP and
+ * disassociate QP from CM_ID
+ */
+ cm_id->provider_data = NULL;
+ qp->cm_id = NULL;
+ cm_id->rem_ref(cm_id);
+ }
+ return err;
+}
+
+int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct c2_dev *c2dev;
+ struct c2wr_cr_reject_req wr;
+ struct c2_vq_req *vq_req;
+ struct c2wr_cr_reject_rep *reply;
+ int err;
+
+ c2dev = to_c2dev(cm_id->device);
+
+ /*
+ * Allocate verbs request.
+ */
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ /*
+ * Build the WR
+ */
+ c2_wr_set_id(&wr, CCWR_CR_REJECT);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
+
+ /*
+ * reference the request struct. dereferenced in the int handler.
+ */
+ vq_req_get(c2dev, vq_req);
+
+ /*
+ * Send WR to adapter
+ */
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ /*
+ * Wait for reply from adapter
+ */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail0;
+
+ /*
+ * Process reply
+ */
+ reply = (struct c2wr_cr_reject_rep *) (unsigned long)
+ vq_req->reply_msg;
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+ err = c2_errno(reply);
+ /*
+ * free vq stuff
+ */
+ vq_repbuf_free(c2dev, reply);
+
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
new file mode 100644
index 00000000000..9d7bcc5ade9
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include "c2.h"
+#include "c2_vq.h"
+#include "c2_status.h"
+
+#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
+
+static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
+{
+ struct c2_cq *cq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c2dev->lock, flags);
+ cq = c2dev->qptr_array[cqn];
+ if (!cq) {
+ spin_unlock_irqrestore(&c2dev->lock, flags);
+ return NULL;
+ }
+ atomic_inc(&cq->refcount);
+ spin_unlock_irqrestore(&c2dev->lock, flags);
+ return cq;
+}
+
+static void c2_cq_put(struct c2_cq *cq)
+{
+ if (atomic_dec_and_test(&cq->refcount))
+ wake_up(&cq->wait);
+}
+
+void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
+{
+ struct c2_cq *cq;
+
+ cq = c2_cq_get(c2dev, mq_index);
+ if (!cq) {
+ printk("discarding events on destroyed CQN=%d\n", mq_index);
+ return;
+ }
+
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ c2_cq_put(cq);
+}
+
+void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
+{
+ struct c2_cq *cq;
+ struct c2_mq *q;
+
+ cq = c2_cq_get(c2dev, mq_index);
+ if (!cq)
+ return;
+
+ spin_lock_irq(&cq->lock);
+ q = &cq->mq;
+ if (q && !c2_mq_empty(q)) {
+ u16 priv = q->priv;
+ struct c2wr_ce *msg;
+
+ while (priv != be16_to_cpu(*q->shared)) {
+ msg = (struct c2wr_ce *)
+ (q->msg_pool.host + priv * q->msg_size);
+ if (msg->qp_user_context == (u64) (unsigned long) qp) {
+ msg->qp_user_context = (u64) 0;
+ }
+ priv = (priv + 1) % q->q_size;
+ }
+ }
+ spin_unlock_irq(&cq->lock);
+ c2_cq_put(cq);
+}
+
+static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
+{
+ switch (status) {
+ case C2_OK:
+ return IB_WC_SUCCESS;
+ case CCERR_FLUSHED:
+ return IB_WC_WR_FLUSH_ERR;
+ case CCERR_BASE_AND_BOUNDS_VIOLATION:
+ return IB_WC_LOC_PROT_ERR;
+ case CCERR_ACCESS_VIOLATION:
+ return IB_WC_LOC_ACCESS_ERR;
+ case CCERR_TOTAL_LENGTH_TOO_BIG:
+ return IB_WC_LOC_LEN_ERR;
+ case CCERR_INVALID_WINDOW:
+ return IB_WC_MW_BIND_ERR;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+
+static inline int c2_poll_one(struct c2_dev *c2dev,
+ struct c2_cq *cq, struct ib_wc *entry)
+{
+ struct c2wr_ce *ce;
+ struct c2_qp *qp;
+ int is_recv = 0;
+
+ ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
+ if (!ce) {
+ return -EAGAIN;
+ }
+
+ /*
+ * if the qp returned is null then this qp has already
+ * been freed and we are unable process the completion.
+ * try pulling the next message
+ */
+ while ((qp =
+ (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
+ c2_mq_free(&cq->mq);
+ ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
+ if (!ce)
+ return -EAGAIN;
+ }
+
+ entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
+ entry->wr_id = ce->hdr.context;
+ entry->qp_num = ce->handle;
+ entry->wc_flags = 0;
+ entry->slid = 0;
+ entry->sl = 0;
+ entry->src_qp = 0;
+ entry->dlid_path_bits = 0;
+ entry->pkey_index = 0;
+
+ switch (c2_wr_get_id(ce)) {
+ case C2_WR_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case C2_WR_TYPE_RDMA_WRITE:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case C2_WR_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case C2_WR_TYPE_BIND_MW:
+ entry->opcode = IB_WC_BIND_MW;
+ break;
+ case C2_WR_TYPE_RECV:
+ entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
+ entry->opcode = IB_WC_RECV;
+ is_recv = 1;
+ break;
+ default:
+ break;
+ }
+
+ /* consume the WQEs */
+ if (is_recv)
+ c2_mq_lconsume(&qp->rq_mq, 1);
+ else
+ c2_mq_lconsume(&qp->sq_mq,
+ be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
+
+ /* free the message */
+ c2_mq_free(&cq->mq);
+
+ return 0;
+}
+
+int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct c2_dev *c2dev = to_c2dev(ibcq->device);
+ struct c2_cq *cq = to_c2cq(ibcq);
+ unsigned long flags;
+ int npolled, err;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+
+ err = c2_poll_one(c2dev, cq, entry + npolled);
+ if (err)
+ break;
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return npolled;
+}
+
+int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
+{
+ struct c2_mq_shared __iomem *shared;
+ struct c2_cq *cq;
+
+ cq = to_c2cq(ibcq);
+ shared = cq->mq.peer;
+
+ if (notify == IB_CQ_NEXT_COMP)
+ writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
+ else if (notify == IB_CQ_SOLICITED)
+ writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
+ else
+ return -EINVAL;
+
+ writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
+
+ /*
+ * Now read back shared->armed to make the PCI
+ * write synchronous. This is necessary for
+ * correct cq notification semantics.
+ */
+ readb(&shared->armed);
+
+ return 0;
+}
+
+static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
+{
+
+ dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping),
+ mq->q_size * mq->msg_size, DMA_FROM_DEVICE);
+ free_pages((unsigned long) mq->msg_pool.host,
+ get_order(mq->q_size * mq->msg_size));
+}
+
+static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
+ int msg_size)
+{
+ unsigned long pool_start;
+
+ pool_start = __get_free_pages(GFP_KERNEL,
+ get_order(q_size * msg_size));
+ if (!pool_start)
+ return -ENOMEM;
+
+ c2_mq_rep_init(mq,
+ 0, /* index (currently unknown) */
+ q_size,
+ msg_size,
+ (u8 *) pool_start,
+ NULL, /* peer (currently unknown) */
+ C2_MQ_HOST_TARGET);
+
+ mq->host_dma = dma_map_single(c2dev->ibdev.dma_device,
+ (void *)pool_start,
+ q_size * msg_size, DMA_FROM_DEVICE);
+ pci_unmap_addr_set(mq, mapping, mq->host_dma);
+
+ return 0;
+}
+
+int c2_init_cq(struct c2_dev *c2dev, int entries,
+ struct c2_ucontext *ctx, struct c2_cq *cq)
+{
+ struct c2wr_cq_create_req wr;
+ struct c2wr_cq_create_rep *reply;
+ unsigned long peer_pa;
+ struct c2_vq_req *vq_req;
+ int err;
+
+ might_sleep();
+
+ cq->ibcq.cqe = entries - 1;
+ cq->is_kernel = !ctx;
+
+ /* Allocate a shared pointer */
+ cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
+ &cq->mq.shared_dma, GFP_KERNEL);
+ if (!cq->mq.shared)
+ return -ENOMEM;
+
+ /* Allocate pages for the message pool */
+ err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
+ if (err)
+ goto bail0;
+
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+
+ memset(&wr, 0, sizeof(wr));
+ c2_wr_set_id(&wr, CCWR_CQ_CREATE);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.msg_size = cpu_to_be32(cq->mq.msg_size);
+ wr.depth = cpu_to_be32(cq->mq.q_size);
+ wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
+ wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
+ wr.user_context = (u64) (unsigned long) (cq);
+
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail2;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail2;
+
+ reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail2;
+ }
+
+ if ((err = c2_errno(reply)) != 0)
+ goto bail3;
+
+ cq->adapter_handle = reply->cq_handle;
+ cq->mq.index = be32_to_cpu(reply->mq_index);
+
+ peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
+ cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
+ if (!cq->mq.peer) {
+ err = -ENOMEM;
+ goto bail3;
+ }
+
+ vq_repbuf_free(c2dev, reply);
+ vq_req_free(c2dev, vq_req);
+
+ spin_lock_init(&cq->lock);
+ atomic_set(&cq->refcount, 1);
+ init_waitqueue_head(&cq->wait);
+
+ /*
+ * Use the MQ index allocated by the adapter to
+ * store the CQ in the qptr_array
+ */
+ cq->cqn = cq->mq.index;
+ c2dev->qptr_array[cq->cqn] = cq;
+
+ return 0;
+
+ bail3:
+ vq_repbuf_free(c2dev, reply);
+ bail2:
+ vq_req_free(c2dev, vq_req);
+ bail1:
+ c2_free_cq_buf(c2dev, &cq->mq);
+ bail0:
+ c2_free_mqsp(cq->mq.shared);
+
+ return err;
+}
+
+void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
+{
+ int err;
+ struct c2_vq_req *vq_req;
+ struct c2wr_cq_destroy_req wr;
+ struct c2wr_cq_destroy_rep *reply;
+
+ might_sleep();
+
+ /* Clear CQ from the qptr array */
+ spin_lock_irq(&c2dev->lock);
+ c2dev->qptr_array[cq->mq.index] = NULL;
+ atomic_dec(&cq->refcount);
+ spin_unlock_irq(&c2dev->lock);
+
+ wait_event(cq->wait, !atomic_read(&cq->refcount));
+
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req) {
+ goto bail0;
+ }
+
+ memset(&wr, 0, sizeof(wr));
+ c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.cq_handle = cq->adapter_handle;
+
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail1;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail1;
+
+ reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
+
+ vq_repbuf_free(c2dev, reply);
+ bail1:
+ vq_req_free(c2dev, vq_req);
+ bail0:
+ if (cq->is_kernel) {
+ c2_free_cq_buf(c2dev, &cq->mq);
+ }
+
+ return;
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
new file mode 100644
index 00000000000..0d0bc33ca30
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "c2.h"
+#include <rdma/iw_cm.h>
+#include "c2_vq.h"
+
+static void handle_mq(struct c2_dev *c2dev, u32 index);
+static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
+
+/*
+ * Handle RNIC interrupts
+ */
+void c2_rnic_interrupt(struct c2_dev *c2dev)
+{
+ unsigned int mq_index;
+
+ while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
+ mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
+ if (mq_index & 0x80000000) {
+ break;
+ }
+
+ c2dev->hints_read++;
+ handle_mq(c2dev, mq_index);
+ }
+
+}
+
+/*
+ * Top level MQ handler
+ */
+static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
+{
+ if (c2dev->qptr_array[mq_index] == NULL) {
+ pr_debug(KERN_INFO "handle_mq: stray activity for mq_index=%d\n",
+ mq_index);
+ return;
+ }
+
+ switch (mq_index) {
+ case (0):
+ /*
+ * An index of 0 in the activity queue
+ * indicates the req vq now has messages
+ * available...
+ *
+ * Wake up any waiters waiting on req VQ
+ * message availability.
+ */
+ wake_up(&c2dev->req_vq_wo);
+ break;
+ case (1):
+ handle_vq(c2dev, mq_index);
+ break;
+ case (2):
+ /* We have to purge the VQ in case there are pending
+ * accept reply requests that would result in the
+ * generation of an ESTABLISHED event. If we don't
+ * generate these first, a CLOSE event could end up
+ * being delivered before the ESTABLISHED event.
+ */
+ handle_vq(c2dev, 1);
+
+ c2_ae_event(c2dev, mq_index);
+ break;
+ default:
+ /* There is no event synchronization between CQ events
+ * and AE or CM events. In fact, CQE could be
+ * delivered for all of the I/O up to and including the
+ * FLUSH for a peer disconenct prior to the ESTABLISHED
+ * event being delivered to the app. The reason for this
+ * is that CM events are delivered on a thread, while AE
+ * and CM events are delivered on interrupt context.
+ */
+ c2_cq_event(c2dev, mq_index);
+ break;
+ }
+
+ return;
+}
+
+/*
+ * Handles verbs WR replies.
+ */
+static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
+{
+ void *adapter_msg, *reply_msg;
+ struct c2wr_hdr *host_msg;
+ struct c2wr_hdr tmp;
+ struct c2_mq *reply_vq;
+ struct c2_vq_req *req;
+ struct iw_cm_event cm_event;
+ int err;
+
+ reply_vq = (struct c2_mq *) c2dev->qptr_array[mq_index];
+
+ /*
+ * get next msg from mq_index into adapter_msg.
+ * don't free it yet.
+ */
+ adapter_msg = c2_mq_consume(reply_vq);
+ if (adapter_msg == NULL) {
+ return;
+ }
+
+ host_msg = vq_repbuf_alloc(c2dev);
+
+ /*
+ * If we can't get a host buffer, then we'll still
+ * wakeup the waiter, we just won't give him the msg.
+ * It is assumed the waiter will deal with this...
+ */
+ if (!host_msg) {
+ pr_debug("handle_vq: no repbufs!\n");
+
+ /*
+ * just copy the WR header into a local variable.
+ * this allows us to still demux on the context
+ */
+ host_msg = &tmp;
+ memcpy(host_msg, adapter_msg, sizeof(tmp));
+ reply_msg = NULL;
+ } else {
+ memcpy(host_msg, adapter_msg, reply_vq->msg_size);
+ reply_msg = host_msg;
+ }
+
+ /*
+ * consume the msg from the MQ
+ */
+ c2_mq_free(reply_vq);
+
+ /*
+ * wakeup the waiter.
+ */
+ req = (struct c2_vq_req *) (unsigned long) host_msg->context;
+ if (req == NULL) {
+ /*
+ * We should never get here, as the adapter should
+ * never send us a reply that we're not expecting.
+ */
+ vq_repbuf_free(c2dev, host_msg);
+ pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
+ return;
+ }
+
+ err = c2_errno(reply_msg);
+ if (!err) switch (req->event) {
+ case IW_CM_EVENT_ESTABLISHED:
+ c2_set_qp_state(req->qp,
+ C2_QP_STATE_RTS);
+ case IW_CM_EVENT_CLOSE:
+
+ /*
+ * Move the QP to RTS if this is
+ * the established event
+ */
+ cm_event.event = req->event;
+ cm_event.status = 0;
+ cm_event.local_addr = req->cm_id->local_addr;
+ cm_event.remote_addr = req->cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+ req->cm_id->event_handler(req->cm_id, &cm_event);
+ break;
+ default:
+ break;
+ }
+
+ req->reply_msg = (u64) (unsigned long) (reply_msg);
+ atomic_set(&req->reply_ready, 1);
+ wake_up(&req->wait_object);
+
+ /*
+ * If the request was cancelled, then this put will
+ * free the vq_req memory...and reply_msg!!!
+ */
+ vq_req_put(c2dev, req);
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c
new file mode 100644
index 00000000000..1e4f46493fc
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_mm.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "c2.h"
+#include "c2_vq.h"
+
+#define PBL_VIRT 1
+#define PBL_PHYS 2
+
+/*
+ * Send all the PBL messages to convey the remainder of the PBL
+ * Wait for the adapter's reply on the last one.
+ * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
+ *
+ * NOTE: vq_req is _not_ freed by this function. The VQ Host
+ * Reply buffer _is_ freed by this function.
+ */
+static int
+send_pbl_messages(struct c2_dev *c2dev, u32 stag_index,
+ unsigned long va, u32 pbl_depth,
+ struct c2_vq_req *vq_req, int pbl_type)
+{
+ u32 pbe_count; /* amt that fits in a PBL msg */
+ u32 count; /* amt in this PBL MSG. */
+ struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
+ struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
+ int err, pbl_virt, pbl_index, i;
+
+ switch (pbl_type) {
+ case PBL_VIRT:
+ pbl_virt = 1;
+ break;
+ case PBL_PHYS:
+ pbl_virt = 0;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ pbe_count = (c2dev->req_vq.msg_size -
+ sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
+ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
+ if (!wr) {
+ return -ENOMEM;
+ }
+ c2_wr_set_id(wr, CCWR_NSMR_PBL);
+
+ /*
+ * Only the last PBL message will generate a reply from the verbs,
+ * so we set the context to 0 indicating there is no kernel verbs
+ * handler blocked awaiting this reply.
+ */
+ wr->hdr.context = 0;
+ wr->rnic_handle = c2dev->adapter_handle;
+ wr->stag_index = stag_index; /* already swapped */
+ wr->flags = 0;
+ pbl_index = 0;
+ while (pbl_depth) {
+ count = min(pbe_count, pbl_depth);
+ wr->addrs_length = cpu_to_be32(count);
+
+ /*
+ * If this is the last message, then reference the
+ * vq request struct cuz we're gonna wait for a reply.
+ * also make this PBL msg as the last one.
+ */
+ if (count == pbl_depth) {
+ /*
+ * reference the request struct. dereferenced in the
+ * int handler.
+ */
+ vq_req_get(c2dev, vq_req);
+ wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
+
+ /*
+ * This is the last PBL message.
+ * Set the context to our VQ Request Object so we can
+ * wait for the reply.
+ */
+ wr->hdr.context = (unsigned long) vq_req;
+ }
+
+ /*
+ * If pbl_virt is set then va is a virtual address
+ * that describes a virtually contiguous memory
+ * allocation. The wr needs the start of each virtual page
+ * to be converted to the corresponding physical address
+ * of the page. If pbl_virt is not set then va is an array
+ * of physical addresses and there is no conversion to do.
+ * Just fill in the wr with what is in the array.
+ */
+ for (i = 0; i < count; i++) {
+ if (pbl_virt) {
+ va += PAGE_SIZE;
+ } else {
+ wr->paddrs[i] =
+ cpu_to_be64(((u64 *)va)[pbl_index + i]);
+ }
+ }
+
+ /*
+ * Send WR to adapter
+ */
+ err = vq_send_wr(c2dev, (union c2wr *) wr);
+ if (err) {
+ if (count <= pbe_count) {
+ vq_req_put(c2dev, vq_req);
+ }
+ goto bail0;
+ }
+ pbl_depth -= count;
+ pbl_index += count;
+ }
+
+ /*
+ * Now wait for the reply...
+ */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err) {
+ goto bail0;
+ }
+
+ /*
+ * Process reply
+ */
+ reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ err = c2_errno(reply);
+
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ kfree(wr);
+ return err;
+}
+
+#define C2_PBL_MAX_DEPTH 131072
+int
+c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
+ int page_size, int pbl_depth, u32 length,
+ u32 offset, u64 *va, enum c2_acf acf,
+ struct c2_mr *mr)
+{
+ struct c2_vq_req *vq_req;
+ struct c2wr_nsmr_register_req *wr;
+ struct c2wr_nsmr_register_rep *reply;
+ u16 flags;
+ int i, pbe_count, count;
+ int err;
+
+ if (!va || !length || !addr_list || !pbl_depth)
+ return -EINTR;
+
+ /*
+ * Verify PBL depth is within rnic max
+ */
+ if (pbl_depth > C2_PBL_MAX_DEPTH) {
+ return -EINTR;
+ }
+
+ /*
+ * allocate verbs request object
+ */
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
+ if (!wr) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ /*
+ * build the WR
+ */
+ c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
+ wr->hdr.context = (unsigned long) vq_req;
+ wr->rnic_handle = c2dev->adapter_handle;
+
+ flags = (acf | MEM_VA_BASED | MEM_REMOTE);
+
+ /*
+ * compute how many pbes can fit in the message
+ */
+ pbe_count = (c2dev->req_vq.msg_size -
+ sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
+
+ if (pbl_depth <= pbe_count) {
+ flags |= MEM_PBL_COMPLETE;
+ }
+ wr->flags = cpu_to_be16(flags);
+ wr->stag_key = 0; //stag_key;
+ wr->va = cpu_to_be64(*va);
+ wr->pd_id = mr->pd->pd_id;
+ wr->pbe_size = cpu_to_be32(page_size);
+ wr->length = cpu_to_be32(length);
+ wr->pbl_depth = cpu_to_be32(pbl_depth);
+ wr->fbo = cpu_to_be32(offset);
+ count = min(pbl_depth, pbe_count);
+ wr->addrs_length = cpu_to_be32(count);
+
+ /*
+ * fill out the PBL for this message
+ */
+ for (i = 0; i < count; i++) {
+ wr->paddrs[i] = cpu_to_be64(addr_list[i]);
+ }
+
+ /*
+ * regerence the request struct
+ */
+ vq_req_get(c2dev, vq_req);
+
+ /*
+ * send the WR to the adapter
+ */
+ err = vq_send_wr(c2dev, (union c2wr *) wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail1;
+ }
+
+ /*
+ * wait for reply from adapter
+ */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err) {
+ goto bail1;
+ }
+
+ /*
+ * process reply
+ */
+ reply =
+ (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+ if ((err = c2_errno(reply))) {
+ goto bail2;
+ }
+ //*p_pb_entries = be32_to_cpu(reply->pbl_depth);
+ mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
+ vq_repbuf_free(c2dev, reply);
+
+ /*
+ * if there are still more PBEs we need to send them to
+ * the adapter and wait for a reply on the final one.
+ * reuse vq_req for this purpose.
+ */
+ pbl_depth -= count;
+ if (pbl_depth) {
+
+ vq_req->reply_msg = (unsigned long) NULL;
+ atomic_set(&vq_req->reply_ready, 0);
+ err = send_pbl_messages(c2dev,
+ cpu_to_be32(mr->ibmr.lkey),
+ (unsigned long) &addr_list[i],
+ pbl_depth, vq_req, PBL_PHYS);
+ if (err) {
+ goto bail1;
+ }
+ }
+
+ vq_req_free(c2dev, vq_req);
+ kfree(wr);
+
+ return err;
+
+ bail2:
+ vq_repbuf_free(c2dev, reply);
+ bail1:
+ kfree(wr);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
+{
+ struct c2_vq_req *vq_req; /* verbs request object */
+ struct c2wr_stag_dealloc_req wr; /* work request */
+ struct c2wr_stag_dealloc_rep *reply; /* WR reply */
+ int err;
+
+
+ /*
+ * allocate verbs request object
+ */
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req) {
+ return -ENOMEM;
+ }
+
+ /*
+ * Build the WR
+ */
+ c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
+ wr.hdr.context = (u64) (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.stag_index = cpu_to_be32(stag_index);
+
+ /*
+ * reference the request struct. dereferenced in the int handler.
+ */
+ vq_req_get(c2dev, vq_req);
+
+ /*
+ * Send WR to adapter
+ */
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ /*
+ * Wait for reply from adapter
+ */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err) {
+ goto bail0;
+ }
+
+ /*
+ * Process reply
+ */
+ reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ err = c2_errno(reply);
+
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/infiniband/hw/amso1100/c2_mq.c
new file mode 100644
index 00000000000..b88a7559210
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_mq.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "c2.h"
+#include "c2_mq.h"
+
+void *c2_mq_alloc(struct c2_mq *q)
+{
+ BUG_ON(q->magic != C2_MQ_MAGIC);
+ BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
+
+ if (c2_mq_full(q)) {
+ return NULL;
+ } else {
+#ifdef DEBUG
+ struct c2wr_hdr *m =
+ (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
+#ifdef CCMSGMAGIC
+ BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
+ m->magic = cpu_to_be32(CCWR_MAGIC);
+#endif
+ return m;
+#else
+ return q->msg_pool.host + q->priv * q->msg_size;
+#endif
+ }
+}
+
+void c2_mq_produce(struct c2_mq *q)
+{
+ BUG_ON(q->magic != C2_MQ_MAGIC);
+ BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
+
+ if (!c2_mq_full(q)) {
+ q->priv = (q->priv + 1) % q->q_size;
+ q->hint_count++;
+ /* Update peer's offset. */
+ __raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
+ }
+}
+
+void *c2_mq_consume(struct c2_mq *q)
+{
+ BUG_ON(q->magic != C2_MQ_MAGIC);
+ BUG_ON(q->type != C2_MQ_HOST_TARGET);
+
+ if (c2_mq_empty(q)) {
+ return NULL;
+ } else {
+#ifdef DEBUG
+ struct c2wr_hdr *m = (struct c2wr_hdr *)
+ (q->msg_pool.host + q->priv * q->msg_size);
+#ifdef CCMSGMAGIC
+ BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
+#endif
+ return m;
+#else
+ return q->msg_pool.host + q->priv * q->msg_size;
+#endif
+ }
+}
+
+void c2_mq_free(struct c2_mq *q)
+{
+ BUG_ON(q->magic != C2_MQ_MAGIC);
+ BUG_ON(q->type != C2_MQ_HOST_TARGET);
+
+ if (!c2_mq_empty(q)) {
+
+#ifdef CCMSGMAGIC
+ {
+ struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
+ (q->msg_pool.adapter + q->priv * q->msg_size);
+ __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
+ }
+#endif
+ q->priv = (q->priv + 1) % q->q_size;
+ /* Update peer's offset. */
+ __raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
+ }
+}
+
+
+void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
+{
+ BUG_ON(q->magic != C2_MQ_MAGIC);
+ BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
+
+ while (wqe_count--) {
+ BUG_ON(c2_mq_empty(q));
+ *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
+ }
+}
+
+#if 0
+u32 c2_mq_count(struct c2_mq *q)
+{
+ s32 count;
+
+ if (q->type == C2_MQ_HOST_TARGET)
+ count = be16_to_cpu(*q->shared) - q->priv;
+ else
+ count = q->priv - be16_to_cpu(*q->shared);
+
+ if (count < 0)
+ count += q->q_size;
+
+ return (u32) count;
+}
+#endif /* 0 */
+
+void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
+ u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
+{
+ BUG_ON(!q->shared);
+
+ /* This code assumes the byte swapping has already been done! */
+ q->index = index;
+ q->q_size = q_size;
+ q->msg_size = msg_size;
+ q->msg_pool.adapter = pool_start;
+ q->peer = (struct c2_mq_shared __iomem *) peer;
+ q->magic = C2_MQ_MAGIC;
+ q->type = type;
+ q->priv = 0;
+ q->hint_count = 0;
+ return;
+}
+void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
+ u8 *pool_start, u16 __iomem *peer, u32 type)
+{
+ BUG_ON(!q->shared);
+
+ /* This code assumes the byte swapping has already been done! */
+ q->index = index;
+ q->q_size = q_size;
+ q->msg_size = msg_size;
+ q->msg_pool.host = pool_start;
+ q->peer = (struct c2_mq_shared __iomem *) peer;
+ q->magic = C2_MQ_MAGIC;
+ q->type = type;
+ q->priv = 0;
+ q->hint_count = 0;
+ return;
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
new file mode 100644
index 00000000000..9185bbb2165
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _C2_MQ_H_
+#define _C2_MQ_H_
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include "c2_wr.h"
+
+enum c2_shared_regs {
+
+ C2_SHARED_ARMED = 0x10,
+ C2_SHARED_NOTIFY = 0x18,
+ C2_SHARED_SHARED = 0x40,
+};
+
+struct c2_mq_shared {
+ u16 unused1;
+ u8 armed;
+ u8 notification_type;
+ u32 unused2;
+ u16 shared;
+ /* Pad to 64 bytes. */
+ u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
+};
+
+enum c2_mq_type {
+ C2_MQ_HOST_TARGET = 1,
+ C2_MQ_ADAPTER_TARGET = 2,
+};
+
+/*
+ * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
+ * c2_user_mq_t (which is the same format) is for user-mode MQs...
+ */
+#define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */
+struct c2_mq {
+ u32 magic;
+ union {
+ u8 *host;
+ u8 __iomem *adapter;
+ } msg_pool;
+ dma_addr_t host_dma;
+ DECLARE_PCI_UNMAP_ADDR(mapping);
+ u16 hint_count;
+ u16 priv;
+ struct c2_mq_shared __iomem *peer;
+ u16 *shared;
+ dma_addr_t shared_dma;
+ u32 q_size;
+ u32 msg_size;
+ u32 index;
+ enum c2_mq_type type;
+};
+
+static __inline__ int c2_mq_empty(struct c2_mq *q)
+{
+ return q->priv == be16_to_cpu(*q->shared);
+}
+
+static __inline__ int c2_mq_full(struct c2_mq *q)
+{
+ return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
+}
+
+extern void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
+extern void *c2_mq_alloc(struct c2_mq *q);
+extern void c2_mq_produce(struct c2_mq *q);
+extern void *c2_mq_consume(struct c2_mq *q);
+extern void c2_mq_free(struct c2_mq *q);
+extern void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
+ u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
+extern void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
+ u8 *pool_start, u16 __iomem *peer, u32 type);
+
+#endif /* _C2_MQ_H_ */
diff --git a/drivers/infiniband/hw/amso1100/c2_pd.c b/drivers/infiniband/hw/amso1100/c2_pd.c
new file mode 100644
index 00000000000..00c709926c8
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_pd.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+
+#include "c2.h"
+#include "c2_provider.h"
+
+int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
+{
+ u32 obj;
+ int ret = 0;
+
+ spin_lock(&c2dev->pd_table.lock);
+ obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
+ c2dev->pd_table.last);
+ if (obj >= c2dev->pd_table.max)
+ obj = find_first_zero_bit(c2dev->pd_table.table,
+ c2dev->pd_table.max);
+ if (obj < c2dev->pd_table.max) {
+ pd->pd_id = obj;
+ __set_bit(obj, c2dev->pd_table.table);
+ c2dev->pd_table.last = obj+1;
+ if (c2dev->pd_table.last >= c2dev->pd_table.max)
+ c2dev->pd_table.last = 0;
+ } else
+ ret = -ENOMEM;
+ spin_unlock(&c2dev->pd_table.lock);
+ return ret;
+}
+
+void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
+{
+ spin_lock(&c2dev->pd_table.lock);
+ __clear_bit(pd->pd_id, c2dev->pd_table.table);
+ spin_unlock(&c2dev->pd_table.lock);
+}
+
+int __devinit c2_init_pd_table(struct c2_dev *c2dev)
+{
+
+ c2dev->pd_table.last = 0;
+ c2dev->pd_table.max = c2dev->props.max_pd;
+ spin_lock_init(&c2dev->pd_table.lock);
+ c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
+ sizeof(long), GFP_KERNEL);
+ if (!c2dev->pd_table.table)
+ return -ENOMEM;
+ bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
+ return 0;
+}
+
+void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev)
+{
+ kfree(c2dev->pd_table.table);
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
new file mode 100644
index 00000000000..8fddc8cccdf
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -0,0 +1,869 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_arp.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include "c2.h"
+#include "c2_provider.h"
+#include "c2_user.h"
+
+static int c2_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct c2_dev *c2dev = to_c2dev(ibdev);
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ *props = c2dev->props;
+ return 0;
+}
+
+static int c2_query_port(struct ib_device *ibdev,
+ u8 port, struct ib_port_attr *props)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ props->max_mtu = IB_MTU_4096;
+ props->lid = 0;
+ props->lmc = 0;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = 0;
+ props->port_cap_flags =
+ IB_PORT_CM_SUP |
+ IB_PORT_REINIT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->qkey_viol_cntr = 0;
+ props->active_width = 1;
+ props->active_speed = 1;
+
+ return 0;
+}
+
+static int c2_modify_port(struct ib_device *ibdev,
+ u8 port, int port_modify_mask,
+ struct ib_port_modify *props)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return 0;
+}
+
+static int c2_query_pkey(struct ib_device *ibdev,
+ u8 port, u16 index, u16 * pkey)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ *pkey = 0;
+ return 0;
+}
+
+static int c2_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ struct c2_dev *c2dev = to_c2dev(ibdev);
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ memset(&(gid->raw[0]), 0, sizeof(gid->raw));
+ memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
+
+ return 0;
+}
+
+/* Allocate the user context data structure. This keeps track
+ * of all objects associated with a particular user-mode client.
+ */
+static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct c2_ucontext *context;
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ return &context->ibucontext;
+}
+
+static int c2_dealloc_ucontext(struct ib_ucontext *context)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ kfree(context);
+ return 0;
+}
+
+static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return -ENOSYS;
+}
+
+static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct c2_pd *pd;
+ int err;
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
+ if (err) {
+ kfree(pd);
+ return ERR_PTR(err);
+ }
+
+ if (context) {
+ if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
+ c2_pd_free(to_c2dev(ibdev), pd);
+ kfree(pd);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ return &pd->ibpd;
+}
+
+static int c2_dealloc_pd(struct ib_pd *pd)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
+ kfree(pd);
+
+ return 0;
+}
+
+static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return ERR_PTR(-ENOSYS);
+}
+
+static int c2_ah_destroy(struct ib_ah *ah)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return -ENOSYS;
+}
+
+static void c2_add_ref(struct ib_qp *ibqp)
+{
+ struct c2_qp *qp;
+ BUG_ON(!ibqp);
+ qp = to_c2qp(ibqp);
+ atomic_inc(&qp->refcount);
+}
+
+static void c2_rem_ref(struct ib_qp *ibqp)
+{
+ struct c2_qp *qp;
+ BUG_ON(!ibqp);
+ qp = to_c2qp(ibqp);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
+struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
+{
+ struct c2_dev* c2dev = to_c2dev(device);
+ struct c2_qp *qp;
+
+ qp = c2_find_qpn(c2dev, qpn);
+ pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
+ __FUNCTION__, qp, qpn, device,
+ (qp?atomic_read(&qp->refcount):0));
+
+ return (qp?&qp->ibqp:NULL);
+}
+
+static struct ib_qp *c2_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct c2_qp *qp;
+ int err;
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC:
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ pr_debug("%s: Unable to allocate QP\n", __FUNCTION__);
+ return ERR_PTR(-ENOMEM);
+ }
+ spin_lock_init(&qp->lock);
+ if (pd->uobject) {
+ /* userspace specific */
+ }
+
+ err = c2_alloc_qp(to_c2dev(pd->device),
+ to_c2pd(pd), init_attr, qp);
+
+ if (err && pd->uobject) {
+ /* userspace specific */
+ }
+
+ break;
+ default:
+ pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__,
+ init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ break;
+ }
+
+ if (err) {
+ kfree(qp);
+ return ERR_PTR(err);
+ }
+
+ return &qp->ibqp;
+}
+
+static int c2_destroy_qp(struct ib_qp *ib_qp)
+{
+ struct c2_qp *qp = to_c2qp(ib_qp);
+
+ pr_debug("%s:%u qp=%p,qp->state=%d\n",
+ __FUNCTION__, __LINE__,ib_qp,qp->state);
+ c2_free_qp(to_c2dev(ib_qp->device), qp);
+ kfree(qp);
+ return 0;
+}
+
+static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct c2_cq *cq;
+ int err;
+
+ cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq) {
+ pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
+ if (err) {
+ pr_debug("%s: error initializing CQ\n", __FUNCTION__);
+ kfree(cq);
+ return ERR_PTR(err);
+ }
+
+ return &cq->ibcq;
+}
+
+static int c2_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct c2_cq *cq = to_c2cq(ib_cq);
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ c2_free_cq(to_c2dev(ib_cq->device), cq);
+ kfree(cq);
+
+ return 0;
+}
+
+static inline u32 c2_convert_access(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
+ C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
+}
+
+static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf, int acc, u64 * iova_start)
+{
+ struct c2_mr *mr;
+ u64 *page_list;
+ u32 total_len;
+ int err, i, j, k, page_shift, pbl_depth;
+
+ pbl_depth = 0;
+ total_len = 0;
+
+ page_shift = PAGE_SHIFT;
+ /*
+ * If there is only 1 buffer we assume this could
+ * be a map of all phy mem...use a 32k page_shift.
+ */
+ if (num_phys_buf == 1)
+ page_shift += 3;
+
+ for (i = 0; i < num_phys_buf; i++) {
+
+ if (buffer_list[i].addr & ~PAGE_MASK) {
+ pr_debug("Unaligned Memory Buffer: 0x%x\n",
+ (unsigned int) buffer_list[i].addr);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!buffer_list[i].size) {
+ pr_debug("Invalid Buffer Size\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ total_len += buffer_list[i].size;
+ pbl_depth += ALIGN(buffer_list[i].size,
+ (1 << page_shift)) >> page_shift;
+ }
+
+ page_list = vmalloc(sizeof(u64) * pbl_depth);
+ if (!page_list) {
+ pr_debug("couldn't vmalloc page_list of size %zd\n",
+ (sizeof(u64) * pbl_depth));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0, j = 0; i < num_phys_buf; i++) {
+
+ int naddrs;
+
+ naddrs = ALIGN(buffer_list[i].size,
+ (1 << page_shift)) >> page_shift;
+ for (k = 0; k < naddrs; k++)
+ page_list[j++] = (buffer_list[i].addr +
+ (k << page_shift));
+ }
+
+ mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->pd = to_c2pd(ib_pd);
+ pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
+ "*iova_start %llx, first pa %llx, last pa %llx\n",
+ __FUNCTION__, page_shift, pbl_depth, total_len,
+ *iova_start, page_list[0], page_list[pbl_depth-1]);
+ err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list,
+ (1 << page_shift), pbl_depth,
+ total_len, 0, iova_start,
+ c2_convert_access(acc), mr);
+ vfree(page_list);
+ if (err) {
+ kfree(mr);
+ return ERR_PTR(err);
+ }
+
+ return &mr->ibmr;
+}
+
+static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct ib_phys_buf bl;
+ u64 kva = 0;
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ /* AMSO1100 limit */
+ bl.size = 0xffffffff;
+ bl.addr = 0;
+ return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
+}
+
+static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
+ int acc, struct ib_udata *udata)
+{
+ u64 *pages;
+ u64 kva = 0;
+ int shift, n, len;
+ int i, j, k;
+ int err = 0;
+ struct ib_umem_chunk *chunk;
+ struct c2_pd *c2pd = to_c2pd(pd);
+ struct c2_mr *c2mr;
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ shift = ffs(region->page_size) - 1;
+
+ c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
+ if (!c2mr)
+ return ERR_PTR(-ENOMEM);
+ c2mr->pd = c2pd;
+
+ n = 0;
+ list_for_each_entry(chunk, &region->chunk_list, list)
+ n += chunk->nents;
+
+ pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
+ if (!pages) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ i = 0;
+ list_for_each_entry(chunk, &region->chunk_list, list) {
+ for (j = 0; j < chunk->nmap; ++j) {
+ len = sg_dma_len(&chunk->page_list[j]) >> shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] =
+ sg_dma_address(&chunk->page_list[j]) +
+ (region->page_size * k);
+ }
+ }
+ }
+
+ kva = (u64)region->virt_base;
+ err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
+ pages,
+ region->page_size,
+ i,
+ region->length,
+ region->offset,
+ &kva,
+ c2_convert_access(acc),
+ c2mr);
+ kfree(pages);
+ if (err) {
+ kfree(c2mr);
+ return ERR_PTR(err);
+ }
+ return &c2mr->ibmr;
+
+err:
+ kfree(c2mr);
+ return ERR_PTR(err);
+}
+
+static int c2_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct c2_mr *mr = to_c2mr(ib_mr);
+ int err;
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
+ if (err)
+ pr_debug("c2_stag_dealloc failed: %d\n", err);
+ else
+ kfree(mr);
+
+ return err;
+}
+
+static ssize_t show_rev(struct class_device *cdev, char *buf)
+{
+ struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return sprintf(buf, "%x\n", dev->props.hw_ver);
+}
+
+static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
+{
+ struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return sprintf(buf, "%x.%x.%x\n",
+ (int) (dev->props.fw_ver >> 32),
+ (int) (dev->props.fw_ver >> 16) & 0xffff,
+ (int) (dev->props.fw_ver & 0xffff));
+}
+
+static ssize_t show_hca(struct class_device *cdev, char *buf)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return sprintf(buf, "AMSO1100\n");
+}
+
+static ssize_t show_board(struct class_device *cdev, char *buf)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
+}
+
+static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+
+static struct class_device_attribute *c2_class_attributes[] = {
+ &class_device_attr_hw_rev,
+ &class_device_attr_fw_ver,
+ &class_device_attr_hca_type,
+ &class_device_attr_board_id
+};
+
+static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ int err;
+
+ err =
+ c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
+ attr_mask);
+
+ return err;
+}
+
+static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return -ENOSYS;
+}
+
+static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return -ENOSYS;
+}
+
+static int c2_process_mad(struct ib_device *ibdev,
+ int mad_flags,
+ u8 port_num,
+ struct ib_wc *in_wc,
+ struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return -ENOSYS;
+}
+
+static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ /* Request a connection */
+ return c2_llp_connect(cm_id, iw_param);
+}
+
+static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ /* Accept the new connection */
+ return c2_llp_accept(cm_id, iw_param);
+}
+
+static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ int err;
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ err = c2_llp_reject(cm_id, pdata, pdata_len);
+ return err;
+}
+
+static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
+{
+ int err;
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ err = c2_llp_service_create(cm_id, backlog);
+ pr_debug("%s:%u err=%d\n",
+ __FUNCTION__, __LINE__,
+ err);
+ return err;
+}
+
+static int c2_service_destroy(struct iw_cm_id *cm_id)
+{
+ int err;
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+
+ err = c2_llp_service_destroy(cm_id);
+
+ return err;
+}
+
+static int c2_pseudo_up(struct net_device *netdev)
+{
+ struct in_device *ind;
+ struct c2_dev *c2dev = netdev->priv;
+
+ ind = in_dev_get(netdev);
+ if (!ind)
+ return 0;
+
+ pr_debug("adding...\n");
+ for_ifa(ind) {
+#ifdef DEBUG
+ u8 *ip = (u8 *) & ifa->ifa_address;
+
+ pr_debug("%s: %d.%d.%d.%d\n",
+ ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
+#endif
+ c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
+ }
+ endfor_ifa(ind);
+ in_dev_put(ind);
+
+ return 0;
+}
+
+static int c2_pseudo_down(struct net_device *netdev)
+{
+ struct in_device *ind;
+ struct c2_dev *c2dev = netdev->priv;
+
+ ind = in_dev_get(netdev);
+ if (!ind)
+ return 0;
+
+ pr_debug("deleting...\n");
+ for_ifa(ind) {
+#ifdef DEBUG
+ u8 *ip = (u8 *) & ifa->ifa_address;
+
+ pr_debug("%s: %d.%d.%d.%d\n",
+ ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
+#endif
+ c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
+ }
+ endfor_ifa(ind);
+ in_dev_put(ind);
+
+ return 0;
+}
+
+static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ int ret = 0;
+
+ if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
+ return -EINVAL;
+
+ netdev->mtu = new_mtu;
+
+ /* TODO: Tell rnic about new rmda interface mtu */
+ return ret;
+}
+
+static void setup(struct net_device *netdev)
+{
+ SET_MODULE_OWNER(netdev);
+ netdev->open = c2_pseudo_up;
+ netdev->stop = c2_pseudo_down;
+ netdev->hard_start_xmit = c2_pseudo_xmit_frame;
+ netdev->get_stats = NULL;
+ netdev->tx_timeout = NULL;
+ netdev->set_mac_address = NULL;
+ netdev->change_mtu = c2_pseudo_change_mtu;
+ netdev->watchdog_timeo = 0;
+ netdev->type = ARPHRD_ETHER;
+ netdev->mtu = 1500;
+ netdev->hard_header_len = ETH_HLEN;
+ netdev->addr_len = ETH_ALEN;
+ netdev->tx_queue_len = 0;
+ netdev->flags |= IFF_NOARP;
+ return;
+}
+
+static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
+{
+ char name[IFNAMSIZ];
+ struct net_device *netdev;
+
+ /* change ethxxx to iwxxx */
+ strcpy(name, "iw");
+ strcat(name, &c2dev->netdev->name[3]);
+ netdev = alloc_netdev(sizeof(*netdev), name, setup);
+ if (!netdev) {
+ printk(KERN_ERR PFX "%s - etherdev alloc failed",
+ __FUNCTION__);
+ return NULL;
+ }
+
+ netdev->priv = c2dev;
+
+ SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
+
+ memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
+
+ /* Print out the MAC address */
+ pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n",
+ netdev->name,
+ netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+ netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
+
+#if 0
+ /* Disable network packets */
+ netif_stop_queue(netdev);
+#endif
+ return netdev;
+}
+
+int c2_register_device(struct c2_dev *dev)
+{
+ int ret;
+ int i;
+
+ /* Register pseudo network device */
+ dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
+ if (dev->pseudo_netdev) {
+ ret = register_netdev(dev->pseudo_netdev);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "Unable to register netdev, ret = %d\n", ret);
+ free_netdev(dev->pseudo_netdev);
+ return ret;
+ }
+ }
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
+ dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV);
+
+ dev->ibdev.node_type = RDMA_NODE_RNIC;
+ memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
+ memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.dma_device = &dev->pcidev->dev;
+ dev->ibdev.class_dev.dev = &dev->pcidev->dev;
+ dev->ibdev.query_device = c2_query_device;
+ dev->ibdev.query_port = c2_query_port;
+ dev->ibdev.modify_port = c2_modify_port;
+ dev->ibdev.query_pkey = c2_query_pkey;
+ dev->ibdev.query_gid = c2_query_gid;
+ dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
+ dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
+ dev->ibdev.mmap = c2_mmap_uar;
+ dev->ibdev.alloc_pd = c2_alloc_pd;
+ dev->ibdev.dealloc_pd = c2_dealloc_pd;
+ dev->ibdev.create_ah = c2_ah_create;
+ dev->ibdev.destroy_ah = c2_ah_destroy;
+ dev->ibdev.create_qp = c2_create_qp;
+ dev->ibdev.modify_qp = c2_modify_qp;
+ dev->ibdev.destroy_qp = c2_destroy_qp;
+ dev->ibdev.create_cq = c2_create_cq;
+ dev->ibdev.destroy_cq = c2_destroy_cq;
+ dev->ibdev.poll_cq = c2_poll_cq;
+ dev->ibdev.get_dma_mr = c2_get_dma_mr;
+ dev->ibdev.reg_phys_mr = c2_reg_phys_mr;
+ dev->ibdev.reg_user_mr = c2_reg_user_mr;
+ dev->ibdev.dereg_mr = c2_dereg_mr;
+
+ dev->ibdev.alloc_fmr = NULL;
+ dev->ibdev.unmap_fmr = NULL;
+ dev->ibdev.dealloc_fmr = NULL;
+ dev->ibdev.map_phys_fmr = NULL;
+
+ dev->ibdev.attach_mcast = c2_multicast_attach;
+ dev->ibdev.detach_mcast = c2_multicast_detach;
+ dev->ibdev.process_mad = c2_process_mad;
+
+ dev->ibdev.req_notify_cq = c2_arm_cq;
+ dev->ibdev.post_send = c2_post_send;
+ dev->ibdev.post_recv = c2_post_receive;
+
+ dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
+ dev->ibdev.iwcm->add_ref = c2_add_ref;
+ dev->ibdev.iwcm->rem_ref = c2_rem_ref;
+ dev->ibdev.iwcm->get_qp = c2_get_qp;
+ dev->ibdev.iwcm->connect = c2_connect;
+ dev->ibdev.iwcm->accept = c2_accept;
+ dev->ibdev.iwcm->reject = c2_reject;
+ dev->ibdev.iwcm->create_listen = c2_service_create;
+ dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
+
+ ret = ib_register_device(&dev->ibdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) {
+ ret = class_device_create_file(&dev->ibdev.class_dev,
+ c2_class_attributes[i]);
+ if (ret) {
+ unregister_netdev(dev->pseudo_netdev);
+ free_netdev(dev->pseudo_netdev);
+ ib_unregister_device(&dev->ibdev);
+ return ret;
+ }
+ }
+
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ return 0;
+}
+
+void c2_unregister_device(struct c2_dev *dev)
+{
+ pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
+ unregister_netdev(dev->pseudo_netdev);
+ free_netdev(dev->pseudo_netdev);
+ ib_unregister_device(&dev->ibdev);
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
new file mode 100644
index 00000000000..fc906223220
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef C2_PROVIDER_H
+#define C2_PROVIDER_H
+#include <linux/inetdevice.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+
+#include "c2_mq.h"
+#include <rdma/iw_cm.h>
+
+#define C2_MPT_FLAG_ATOMIC (1 << 14)
+#define C2_MPT_FLAG_REMOTE_WRITE (1 << 13)
+#define C2_MPT_FLAG_REMOTE_READ (1 << 12)
+#define C2_MPT_FLAG_LOCAL_WRITE (1 << 11)
+#define C2_MPT_FLAG_LOCAL_READ (1 << 10)
+
+struct c2_buf_list {
+ void *buf;
+ DECLARE_PCI_UNMAP_ADDR(mapping)
+};
+
+
+/* The user context keeps track of objects allocated for a
+ * particular user-mode client. */
+struct c2_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+struct c2_mtt;
+
+/* All objects associated with a PD are kept in the
+ * associated user context if present.
+ */
+struct c2_pd {
+ struct ib_pd ibpd;
+ u32 pd_id;
+};
+
+struct c2_mr {
+ struct ib_mr ibmr;
+ struct c2_pd *pd;
+};
+
+struct c2_av;
+
+enum c2_ah_type {
+ C2_AH_ON_HCA,
+ C2_AH_PCI_POOL,
+ C2_AH_KMALLOC
+};
+
+struct c2_ah {
+ struct ib_ah ibah;
+};
+
+struct c2_cq {
+ struct ib_cq ibcq;
+ spinlock_t lock;
+ atomic_t refcount;
+ int cqn;
+ int is_kernel;
+ wait_queue_head_t wait;
+
+ u32 adapter_handle;
+ struct c2_mq mq;
+};
+
+struct c2_wq {
+ spinlock_t lock;
+};
+struct iw_cm_id;
+struct c2_qp {
+ struct ib_qp ibqp;
+ struct iw_cm_id *cm_id;
+ spinlock_t lock;
+ atomic_t refcount;
+ wait_queue_head_t wait;
+ int qpn;
+
+ u32 adapter_handle;
+ u32 send_sgl_depth;
+ u32 recv_sgl_depth;
+ u32 rdma_write_sgl_depth;
+ u8 state;
+
+ struct c2_mq sq_mq;
+ struct c2_mq rq_mq;
+};
+
+struct c2_cr_query_attrs {
+ u32 local_addr;
+ u32 remote_addr;
+ u16 local_port;
+ u16 remote_port;
+};
+
+static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct c2_pd, ibpd);
+}
+
+static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct c2_ucontext, ibucontext);
+}
+
+static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct c2_mr, ibmr);
+}
+
+
+static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct c2_ah, ibah);
+}
+
+static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct c2_cq, ibcq);
+}
+
+static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct c2_qp, ibqp);
+}
+
+static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
+{
+ struct in_device *ind;
+ int ret = 0;
+
+ ind = in_dev_get(netdev);
+ if (!ind)
+ return 0;
+
+ for_ifa(ind) {
+ if (ifa->ifa_address == addr) {
+ ret = 1;
+ break;
+ }
+ }
+ endfor_ifa(ind);
+ in_dev_put(ind);
+ return ret;
+}
+#endif /* C2_PROVIDER_H */
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
new file mode 100644
index 00000000000..12261132b07
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -0,0 +1,975 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "c2.h"
+#include "c2_vq.h"
+#include "c2_status.h"
+
+#define C2_MAX_ORD_PER_QP 128
+#define C2_MAX_IRD_PER_QP 128
+
+#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
+#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
+#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
+
+#define NO_SUPPORT -1
+static const u8 c2_opcode[] = {
+ [IB_WR_SEND] = C2_WR_TYPE_SEND,
+ [IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
+ [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
+ [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
+};
+
+static int to_c2_state(enum ib_qp_state ib_state)
+{
+ switch (ib_state) {
+ case IB_QPS_RESET:
+ return C2_QP_STATE_IDLE;
+ case IB_QPS_RTS:
+ return C2_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return C2_QP_STATE_CLOSING;
+ case IB_QPS_SQE:
+ return C2_QP_STATE_CLOSING;
+ case IB_QPS_ERR:
+ return C2_QP_STATE_ERROR;
+ default:
+ return -1;
+ }
+}
+
+static int to_ib_state(enum c2_qp_state c2_state)
+{
+ switch (c2_state) {
+ case C2_QP_STATE_IDLE:
+ return IB_QPS_RESET;
+ case C2_QP_STATE_CONNECTING:
+ return IB_QPS_RTR;
+ case C2_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case C2_QP_STATE_CLOSING:
+ return IB_QPS_SQD;
+ case C2_QP_STATE_ERROR:
+ return IB_QPS_ERR;
+ case C2_QP_STATE_TERMINATE:
+ return IB_QPS_SQE;
+ default:
+ return -1;
+ }
+}
+
+static const char *to_ib_state_str(int ib_state)
+{
+ static const char *state_str[] = {
+ "IB_QPS_RESET",
+ "IB_QPS_INIT",
+ "IB_QPS_RTR",
+ "IB_QPS_RTS",
+ "IB_QPS_SQD",
+ "IB_QPS_SQE",
+ "IB_QPS_ERR"
+ };
+ if (ib_state < IB_QPS_RESET ||
+ ib_state > IB_QPS_ERR)
+ return "<invalid IB QP state>";
+
+ ib_state -= IB_QPS_RESET;
+ return state_str[ib_state];
+}
+
+void c2_set_qp_state(struct c2_qp *qp, int c2_state)
+{
+ int new_state = to_ib_state(c2_state);
+
+ pr_debug("%s: qp[%p] state modify %s --> %s\n",
+ __FUNCTION__,
+ qp,
+ to_ib_state_str(qp->state),
+ to_ib_state_str(new_state));
+ qp->state = new_state;
+}
+
+#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
+
+int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
+ struct ib_qp_attr *attr, int attr_mask)
+{
+ struct c2wr_qp_modify_req wr;
+ struct c2wr_qp_modify_rep *reply;
+ struct c2_vq_req *vq_req;
+ unsigned long flags;
+ u8 next_state;
+ int err;
+
+ pr_debug("%s:%d qp=%p, %s --> %s\n",
+ __FUNCTION__, __LINE__,
+ qp,
+ to_ib_state_str(qp->state),
+ to_ib_state_str(attr->qp_state));
+
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ c2_wr_set_id(&wr, CCWR_QP_MODIFY);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.qp_handle = qp->adapter_handle;
+ wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
+ wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
+ wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
+ wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
+
+ if (attr_mask & IB_QP_STATE) {
+ /* Ensure the state is valid */
+ if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
+ return -EINVAL;
+
+ wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
+
+ if (attr->qp_state == IB_QPS_ERR) {
+ spin_lock_irqsave(&qp->lock, flags);
+ if (qp->cm_id && qp->state == IB_QPS_RTS) {
+ pr_debug("Generating CLOSE event for QP-->ERR, "
+ "qp=%p, cm_id=%p\n",qp,qp->cm_id);
+ /* Generate an CLOSE event */
+ vq_req->cm_id = qp->cm_id;
+ vq_req->event = IW_CM_EVENT_CLOSE;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+ }
+ next_state = attr->qp_state;
+
+ } else if (attr_mask & IB_QP_CUR_STATE) {
+
+ if (attr->cur_qp_state != IB_QPS_RTR &&
+ attr->cur_qp_state != IB_QPS_RTS &&
+ attr->cur_qp_state != IB_QPS_SQD &&
+ attr->cur_qp_state != IB_QPS_SQE)
+ return -EINVAL;
+ else
+ wr.next_qp_state =
+ cpu_to_be32(to_c2_state(attr->cur_qp_state));
+
+ next_state = attr->cur_qp_state;
+
+ } else {
+ err = 0;
+ goto bail0;
+ }
+
+ /* reference the request struct */
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail0;
+
+ reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ err = c2_errno(reply);
+ if (!err)
+ qp->state = next_state;
+#ifdef DEBUG
+ else
+ pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err);
+#endif
+ /*
+ * If we're going to error and generating the event here, then
+ * we need to remove the reference because there will be no
+ * close event generated by the adapter
+ */
+ spin_lock_irqsave(&qp->lock, flags);
+ if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
+ qp->cm_id->rem_ref(qp->cm_id);
+ qp->cm_id = NULL;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+
+ pr_debug("%s:%d qp=%p, cur_state=%s\n",
+ __FUNCTION__, __LINE__,
+ qp,
+ to_ib_state_str(qp->state));
+ return err;
+}
+
+int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
+ int ord, int ird)
+{
+ struct c2wr_qp_modify_req wr;
+ struct c2wr_qp_modify_rep *reply;
+ struct c2_vq_req *vq_req;
+ int err;
+
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ c2_wr_set_id(&wr, CCWR_QP_MODIFY);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.qp_handle = qp->adapter_handle;
+ wr.ord = cpu_to_be32(ord);
+ wr.ird = cpu_to_be32(ird);
+ wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
+ wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
+ wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
+
+ /* reference the request struct */
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail0;
+
+ reply = (struct c2wr_qp_modify_rep *) (unsigned long)
+ vq_req->reply_msg;
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ err = c2_errno(reply);
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
+{
+ struct c2_vq_req *vq_req;
+ struct c2wr_qp_destroy_req wr;
+ struct c2wr_qp_destroy_rep *reply;
+ unsigned long flags;
+ int err;
+
+ /*
+ * Allocate a verb request message
+ */
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req) {
+ return -ENOMEM;
+ }
+
+ /*
+ * Initialize the WR
+ */
+ c2_wr_set_id(&wr, CCWR_QP_DESTROY);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.qp_handle = qp->adapter_handle;
+
+ /*
+ * reference the request struct. dereferenced in the int handler.
+ */
+ vq_req_get(c2dev, vq_req);
+
+ spin_lock_irqsave(&qp->lock, flags);
+ if (qp->cm_id && qp->state == IB_QPS_RTS) {
+ pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
+ "qp=%p, cm_id=%p\n",qp,qp->cm_id);
+ /* Generate an CLOSE event */
+ vq_req->qp = qp;
+ vq_req->cm_id = qp->cm_id;
+ vq_req->event = IW_CM_EVENT_CLOSE;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ /*
+ * Send WR to adapter
+ */
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ /*
+ * Wait for reply from adapter
+ */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err) {
+ goto bail0;
+ }
+
+ /*
+ * Process reply
+ */
+ reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ spin_lock_irqsave(&qp->lock, flags);
+ if (qp->cm_id) {
+ qp->cm_id->rem_ref(qp->cm_id);
+ qp->cm_id = NULL;
+ }
+ spin_unlock_irqrestore(&qp->lock, flags);
+
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
+{
+ int ret;
+
+ do {
+ spin_lock_irq(&c2dev->qp_table.lock);
+ ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
+ c2dev->qp_table.last++, &qp->qpn);
+ spin_unlock_irq(&c2dev->qp_table.lock);
+ } while ((ret == -EAGAIN) &&
+ idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
+ return ret;
+}
+
+static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
+{
+ spin_lock_irq(&c2dev->qp_table.lock);
+ idr_remove(&c2dev->qp_table.idr, qpn);
+ spin_unlock_irq(&c2dev->qp_table.lock);
+}
+
+struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
+{
+ unsigned long flags;
+ struct c2_qp *qp;
+
+ spin_lock_irqsave(&c2dev->qp_table.lock, flags);
+ qp = idr_find(&c2dev->qp_table.idr, qpn);
+ spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
+ return qp;
+}
+
+int c2_alloc_qp(struct c2_dev *c2dev,
+ struct c2_pd *pd,
+ struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
+{
+ struct c2wr_qp_create_req wr;
+ struct c2wr_qp_create_rep *reply;
+ struct c2_vq_req *vq_req;
+ struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
+ struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
+ unsigned long peer_pa;
+ u32 q_size, msg_size, mmap_size;
+ void __iomem *mmap;
+ int err;
+
+ err = c2_alloc_qpn(c2dev, qp);
+ if (err)
+ return err;
+ qp->ibqp.qp_num = qp->qpn;
+ qp->ibqp.qp_type = IB_QPT_RC;
+
+ /* Allocate the SQ and RQ shared pointers */
+ qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
+ &qp->sq_mq.shared_dma, GFP_KERNEL);
+ if (!qp->sq_mq.shared) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
+ &qp->rq_mq.shared_dma, GFP_KERNEL);
+ if (!qp->rq_mq.shared) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+
+ /* Allocate the verbs request */
+ vq_req = vq_req_alloc(c2dev);
+ if (vq_req == NULL) {
+ err = -ENOMEM;
+ goto bail2;
+ }
+
+ /* Initialize the work request */
+ memset(&wr, 0, sizeof(wr));
+ c2_wr_set_id(&wr, CCWR_QP_CREATE);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+ wr.sq_cq_handle = send_cq->adapter_handle;
+ wr.rq_cq_handle = recv_cq->adapter_handle;
+ wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
+ wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
+ wr.srq_handle = 0;
+ wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
+ QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
+ wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
+ wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
+ wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
+ wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
+ wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
+ wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
+ wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
+ wr.pd_id = pd->pd_id;
+ wr.user_context = (unsigned long) qp;
+
+ vq_req_get(c2dev, vq_req);
+
+ /* Send the WR to the adapter */
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail3;
+ }
+
+ /* Wait for the verb reply */
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err) {
+ goto bail3;
+ }
+
+ /* Process the reply */
+ reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail3;
+ }
+
+ if ((err = c2_wr_get_result(reply)) != 0) {
+ goto bail4;
+ }
+
+ /* Fill in the kernel QP struct */
+ atomic_set(&qp->refcount, 1);
+ qp->adapter_handle = reply->qp_handle;
+ qp->state = IB_QPS_RESET;
+ qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
+ qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
+ qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
+
+ /* Initialize the SQ MQ */
+ q_size = be32_to_cpu(reply->sq_depth);
+ msg_size = be32_to_cpu(reply->sq_msg_size);
+ peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
+ mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
+ mmap = ioremap_nocache(peer_pa, mmap_size);
+ if (!mmap) {
+ err = -ENOMEM;
+ goto bail5;
+ }
+
+ c2_mq_req_init(&qp->sq_mq,
+ be32_to_cpu(reply->sq_mq_index),
+ q_size,
+ msg_size,
+ mmap + sizeof(struct c2_mq_shared), /* pool start */
+ mmap, /* peer */
+ C2_MQ_ADAPTER_TARGET);
+
+ /* Initialize the RQ mq */
+ q_size = be32_to_cpu(reply->rq_depth);
+ msg_size = be32_to_cpu(reply->rq_msg_size);
+ peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
+ mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
+ mmap = ioremap_nocache(peer_pa, mmap_size);
+ if (!mmap) {
+ err = -ENOMEM;
+ goto bail6;
+ }
+
+ c2_mq_req_init(&qp->rq_mq,
+ be32_to_cpu(reply->rq_mq_index),
+ q_size,
+ msg_size,
+ mmap + sizeof(struct c2_mq_shared), /* pool start */
+ mmap, /* peer */
+ C2_MQ_ADAPTER_TARGET);
+
+ vq_repbuf_free(c2dev, reply);
+ vq_req_free(c2dev, vq_req);
+
+ return 0;
+
+ bail6:
+ iounmap(qp->sq_mq.peer);
+ bail5:
+ destroy_qp(c2dev, qp);
+ bail4:
+ vq_repbuf_free(c2dev, reply);
+ bail3:
+ vq_req_free(c2dev, vq_req);
+ bail2:
+ c2_free_mqsp(qp->rq_mq.shared);
+ bail1:
+ c2_free_mqsp(qp->sq_mq.shared);
+ bail0:
+ c2_free_qpn(c2dev, qp->qpn);
+ return err;
+}
+
+void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
+{
+ struct c2_cq *send_cq;
+ struct c2_cq *recv_cq;
+
+ send_cq = to_c2cq(qp->ibqp.send_cq);
+ recv_cq = to_c2cq(qp->ibqp.recv_cq);
+
+ /*
+ * Lock CQs here, so that CQ polling code can do QP lookup
+ * without taking a lock.
+ */
+ spin_lock_irq(&send_cq->lock);
+ if (send_cq != recv_cq)
+ spin_lock(&recv_cq->lock);
+
+ c2_free_qpn(c2dev, qp->qpn);
+
+ if (send_cq != recv_cq)
+ spin_unlock(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+
+ /*
+ * Destory qp in the rnic...
+ */
+ destroy_qp(c2dev, qp);
+
+ /*
+ * Mark any unreaped CQEs as null and void.
+ */
+ c2_cq_clean(c2dev, qp, send_cq->cqn);
+ if (send_cq != recv_cq)
+ c2_cq_clean(c2dev, qp, recv_cq->cqn);
+ /*
+ * Unmap the MQs and return the shared pointers
+ * to the message pool.
+ */
+ iounmap(qp->sq_mq.peer);
+ iounmap(qp->rq_mq.peer);
+ c2_free_mqsp(qp->sq_mq.shared);
+ c2_free_mqsp(qp->rq_mq.shared);
+
+ atomic_dec(&qp->refcount);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+}
+
+/*
+ * Function: move_sgl
+ *
+ * Description:
+ * Move an SGL from the user's work request struct into a CCIL Work Request
+ * message, swapping to WR byte order and ensure the total length doesn't
+ * overflow.
+ *
+ * IN:
+ * dst - ptr to CCIL Work Request message SGL memory.
+ * src - ptr to the consumers SGL memory.
+ *
+ * OUT: none
+ *
+ * Return:
+ * CCIL status codes.
+ */
+static int
+move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
+ u8 * actual_count)
+{
+ u32 tot = 0; /* running total */
+ u8 acount = 0; /* running total non-0 len sge's */
+
+ while (count > 0) {
+ /*
+ * If the addition of this SGE causes the
+ * total SGL length to exceed 2^32-1, then
+ * fail-n-bail.
+ *
+ * If the current total plus the next element length
+ * wraps, then it will go negative and be less than the
+ * current total...
+ */
+ if ((tot + src->length) < tot) {
+ return -EINVAL;
+ }
+ /*
+ * Bug: 1456 (as well as 1498 & 1643)
+ * Skip over any sge's supplied with len=0
+ */
+ if (src->length) {
+ tot += src->length;
+ dst->stag = cpu_to_be32(src->lkey);
+ dst->to = cpu_to_be64(src->addr);
+ dst->length = cpu_to_be32(src->length);
+ dst++;
+ acount++;
+ }
+ src++;
+ count--;
+ }
+
+ if (acount == 0) {
+ /*
+ * Bug: 1476 (as well as 1498, 1456 and 1643)
+ * Setup the SGL in the WR to make it easier for the RNIC.
+ * This way, the FW doesn't have to deal with special cases.
+ * Setting length=0 should be sufficient.
+ */
+ dst->stag = 0;
+ dst->to = 0;
+ dst->length = 0;
+ }
+
+ *p_len = tot;
+ *actual_count = acount;
+ return 0;
+}
+
+/*
+ * Function: c2_activity (private function)
+ *
+ * Description:
+ * Post an mq index to the host->adapter activity fifo.
+ *
+ * IN:
+ * c2dev - ptr to c2dev structure
+ * mq_index - mq index to post
+ * shared - value most recently written to shared
+ *
+ * OUT:
+ *
+ * Return:
+ * none
+ */
+static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
+{
+ /*
+ * First read the register to see if the FIFO is full, and if so,
+ * spin until it's not. This isn't perfect -- there is no
+ * synchronization among the clients of the register, but in
+ * practice it prevents multiple CPU from hammering the bus
+ * with PCI RETRY. Note that when this does happen, the card
+ * cannot get on the bus and the card and system hang in a
+ * deadlock -- thus the need for this code. [TOT]
+ */
+ while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(0);
+ }
+
+ __raw_writel(C2_HINT_MAKE(mq_index, shared),
+ c2dev->regs + PCI_BAR0_ADAPTER_HINT);
+}
+
+/*
+ * Function: qp_wr_post
+ *
+ * Description:
+ * This in-line function allocates a MQ msg, then moves the host-copy of
+ * the completed WR into msg. Then it posts the message.
+ *
+ * IN:
+ * q - ptr to user MQ.
+ * wr - ptr to host-copy of the WR.
+ * qp - ptr to user qp
+ * size - Number of bytes to post. Assumed to be divisible by 4.
+ *
+ * OUT: none
+ *
+ * Return:
+ * CCIL status codes.
+ */
+static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
+{
+ union c2wr *msg;
+
+ msg = c2_mq_alloc(q);
+ if (msg == NULL) {
+ return -EINVAL;
+ }
+#ifdef CCMSGMAGIC
+ ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
+#endif
+
+ /*
+ * Since all header fields in the WR are the same as the
+ * CQE, set the following so the adapter need not.
+ */
+ c2_wr_set_result(wr, CCERR_PENDING);
+
+ /*
+ * Copy the wr down to the adapter
+ */
+ memcpy((void *) msg, (void *) wr, size);
+
+ c2_mq_produce(q);
+ return 0;
+}
+
+
+int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct c2_dev *c2dev = to_c2dev(ibqp->device);
+ struct c2_qp *qp = to_c2qp(ibqp);
+ union c2wr wr;
+ int err = 0;
+
+ u32 flags;
+ u32 tot_len;
+ u8 actual_sge_count;
+ u32 msg_size;
+
+ if (qp->state > IB_QPS_RTS)
+ return -EINVAL;
+
+ while (ib_wr) {
+
+ flags = 0;
+ wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
+ if (ib_wr->send_flags & IB_SEND_SIGNALED) {
+ flags |= SQ_SIGNALED;
+ }
+
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND:
+ if (ib_wr->send_flags & IB_SEND_SOLICITED) {
+ c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
+ msg_size = sizeof(struct c2wr_send_req);
+ } else {
+ c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
+ msg_size = sizeof(struct c2wr_send_req);
+ }
+
+ wr.sqwr.send.remote_stag = 0;
+ msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge;
+ if (ib_wr->num_sge > qp->send_sgl_depth) {
+ err = -EINVAL;
+ break;
+ }
+ if (ib_wr->send_flags & IB_SEND_FENCE) {
+ flags |= SQ_READ_FENCE;
+ }
+ err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
+ ib_wr->sg_list,
+ ib_wr->num_sge,
+ &tot_len, &actual_sge_count);
+ wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
+ c2_wr_set_sge_count(&wr, actual_sge_count);
+ break;
+ case IB_WR_RDMA_WRITE:
+ c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
+ msg_size = sizeof(struct c2wr_rdma_write_req) +
+ (sizeof(struct c2_data_addr) * ib_wr->num_sge);
+ if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
+ err = -EINVAL;
+ break;
+ }
+ if (ib_wr->send_flags & IB_SEND_FENCE) {
+ flags |= SQ_READ_FENCE;
+ }
+ wr.sqwr.rdma_write.remote_stag =
+ cpu_to_be32(ib_wr->wr.rdma.rkey);
+ wr.sqwr.rdma_write.remote_to =
+ cpu_to_be64(ib_wr->wr.rdma.remote_addr);
+ err = move_sgl((struct c2_data_addr *)
+ & (wr.sqwr.rdma_write.data),
+ ib_wr->sg_list,
+ ib_wr->num_sge,
+ &tot_len, &actual_sge_count);
+ wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
+ c2_wr_set_sge_count(&wr, actual_sge_count);
+ break;
+ case IB_WR_RDMA_READ:
+ c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
+ msg_size = sizeof(struct c2wr_rdma_read_req);
+
+ /* IWarp only suppots 1 sge for RDMA reads */
+ if (ib_wr->num_sge > 1) {
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Move the local and remote stag/to/len into the WR.
+ */
+ wr.sqwr.rdma_read.local_stag =
+ cpu_to_be32(ib_wr->sg_list->lkey);
+ wr.sqwr.rdma_read.local_to =
+ cpu_to_be64(ib_wr->sg_list->addr);
+ wr.sqwr.rdma_read.remote_stag =
+ cpu_to_be32(ib_wr->wr.rdma.rkey);
+ wr.sqwr.rdma_read.remote_to =
+ cpu_to_be64(ib_wr->wr.rdma.remote_addr);
+ wr.sqwr.rdma_read.length =
+ cpu_to_be32(ib_wr->sg_list->length);
+ break;
+ default:
+ /* error */
+ msg_size = 0;
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * If we had an error on the last wr build, then
+ * break out. Possible errors include bogus WR
+ * type, and a bogus SGL length...
+ */
+ if (err) {
+ break;
+ }
+
+ /*
+ * Store flags
+ */
+ c2_wr_set_flags(&wr, flags);
+
+ /*
+ * Post the puppy!
+ */
+ err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
+ if (err) {
+ break;
+ }
+
+ /*
+ * Enqueue mq index to activity FIFO.
+ */
+ c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
+
+ ib_wr = ib_wr->next;
+ }
+
+ if (err)
+ *bad_wr = ib_wr;
+ return err;
+}
+
+int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct c2_dev *c2dev = to_c2dev(ibqp->device);
+ struct c2_qp *qp = to_c2qp(ibqp);
+ union c2wr wr;
+ int err = 0;
+
+ if (qp->state > IB_QPS_RTS)
+ return -EINVAL;
+
+ /*
+ * Try and post each work request
+ */
+ while (ib_wr) {
+ u32 tot_len;
+ u8 actual_sge_count;
+
+ if (ib_wr->num_sge > qp->recv_sgl_depth) {
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Create local host-copy of the WR
+ */
+ wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
+ c2_wr_set_id(&wr, CCWR_RECV);
+ c2_wr_set_flags(&wr, 0);
+
+ /* sge_count is limited to eight bits. */
+ BUG_ON(ib_wr->num_sge >= 256);
+ err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
+ ib_wr->sg_list,
+ ib_wr->num_sge, &tot_len, &actual_sge_count);
+ c2_wr_set_sge_count(&wr, actual_sge_count);
+
+ /*
+ * If we had an error on the last wr build, then
+ * break out. Possible errors include bogus WR
+ * type, and a bogus SGL length...
+ */
+ if (err) {
+ break;
+ }
+
+ err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
+ if (err) {
+ break;
+ }
+
+ /*
+ * Enqueue mq index to activity FIFO
+ */
+ c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
+
+ ib_wr = ib_wr->next;
+ }
+
+ if (err)
+ *bad_wr = ib_wr;
+ return err;
+}
+
+void __devinit c2_init_qp_table(struct c2_dev *c2dev)
+{
+ spin_lock_init(&c2dev->qp_table.lock);
+ idr_init(&c2dev->qp_table.idr);
+}
+
+void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev)
+{
+ idr_destroy(&c2dev->qp_table.idr);
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
new file mode 100644
index 00000000000..1c3c9d65ece
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -0,0 +1,663 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/inet.h>
+
+#include <linux/route.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <rdma/ib_smi.h>
+#include "c2.h"
+#include "c2_vq.h"
+
+/* Device capabilities */
+#define C2_MIN_PAGESIZE 1024
+
+#define C2_MAX_MRS 32768
+#define C2_MAX_QPS 16000
+#define C2_MAX_WQE_SZ 256
+#define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ)
+#define C2_MAX_SGES 4
+#define C2_MAX_SGE_RD 1
+#define C2_MAX_CQS 32768
+#define C2_MAX_CQES 4096
+#define C2_MAX_PDS 16384
+
+/*
+ * Send the adapter INIT message to the amso1100
+ */
+static int c2_adapter_init(struct c2_dev *c2dev)
+{
+ struct c2wr_init_req wr;
+ int err;
+
+ memset(&wr, 0, sizeof(wr));
+ c2_wr_set_id(&wr, CCWR_INIT);
+ wr.hdr.context = 0;
+ wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
+ wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
+ wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
+ wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
+ wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
+ wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
+
+ /* Post the init message */
+ err = vq_send_wr(c2dev, (union c2wr *) & wr);
+
+ return err;
+}
+
+/*
+ * Send the adapter TERM message to the amso1100
+ */
+static void c2_adapter_term(struct c2_dev *c2dev)
+{
+ struct c2wr_init_req wr;
+
+ memset(&wr, 0, sizeof(wr));
+ c2_wr_set_id(&wr, CCWR_TERM);
+ wr.hdr.context = 0;
+
+ /* Post the init message */
+ vq_send_wr(c2dev, (union c2wr *) & wr);
+ c2dev->init = 0;
+
+ return;
+}
+
+/*
+ * Query the adapter
+ */
+static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
+{
+ struct c2_vq_req *vq_req;
+ struct c2wr_rnic_query_req wr;
+ struct c2wr_rnic_query_rep *reply;
+ int err;
+
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
+ wr.hdr.context = (unsigned long) vq_req;
+ wr.rnic_handle = c2dev->adapter_handle;
+
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, (union c2wr *) &wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail1;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail1;
+
+ reply =
+ (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply)
+ err = -ENOMEM;
+
+ err = c2_errno(reply);
+ if (err)
+ goto bail2;
+
+ props->fw_ver =
+ ((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
+ ((be32_to_cpu(reply->fw_ver_minor) && 0xFFFF) << 16) |
+ (be32_to_cpu(reply->fw_ver_patch) && 0xFFFF);
+ memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
+ props->max_mr_size = 0xFFFFFFFF;
+ props->page_size_cap = ~(C2_MIN_PAGESIZE-1);
+ props->vendor_id = be32_to_cpu(reply->vendor_id);
+ props->vendor_part_id = be32_to_cpu(reply->part_number);
+ props->hw_ver = be32_to_cpu(reply->hw_version);
+ props->max_qp = be32_to_cpu(reply->max_qps);
+ props->max_qp_wr = be32_to_cpu(reply->max_qp_depth);
+ props->device_cap_flags = c2dev->device_cap_flags;
+ props->max_sge = C2_MAX_SGES;
+ props->max_sge_rd = C2_MAX_SGE_RD;
+ props->max_cq = be32_to_cpu(reply->max_cqs);
+ props->max_cqe = be32_to_cpu(reply->max_cq_depth);
+ props->max_mr = be32_to_cpu(reply->max_mrs);
+ props->max_pd = be32_to_cpu(reply->max_pds);
+ props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird);
+ props->max_ee_rd_atom = 0;
+ props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird);
+ props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
+ props->max_ee_init_rd_atom = 0;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->max_ee = 0;
+ props->max_rdd = 0;
+ props->max_mw = be32_to_cpu(reply->max_mws);
+ props->max_raw_ipv6_qp = 0;
+ props->max_raw_ethy_qp = 0;
+ props->max_mcast_grp = 0;
+ props->max_mcast_qp_attach = 0;
+ props->max_total_mcast_qp_attach = 0;
+ props->max_ah = 0;
+ props->max_fmr = 0;
+ props->max_map_per_fmr = 0;
+ props->max_srq = 0;
+ props->max_srq_wr = 0;
+ props->max_srq_sge = 0;
+ props->max_pkeys = 0;
+ props->local_ca_ack_delay = 0;
+
+ bail2:
+ vq_repbuf_free(c2dev, reply);
+
+ bail1:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+/*
+ * Add an IP address to the RNIC interface
+ */
+int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
+{
+ struct c2_vq_req *vq_req;
+ struct c2wr_rnic_setconfig_req *wr;
+ struct c2wr_rnic_setconfig_rep *reply;
+ struct c2_netaddr netaddr;
+ int err, len;
+
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ len = sizeof(struct c2_netaddr);
+ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
+ if (!wr) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
+ wr->hdr.context = (unsigned long) vq_req;
+ wr->rnic_handle = c2dev->adapter_handle;
+ wr->option = cpu_to_be32(C2_CFG_ADD_ADDR);
+
+ netaddr.ip_addr = inaddr;
+ netaddr.netmask = inmask;
+ netaddr.mtu = 0;
+
+ memcpy(wr->data, &netaddr, len);
+
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, (union c2wr *) wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail1;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail1;
+
+ reply =
+ (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+
+ err = c2_errno(reply);
+ vq_repbuf_free(c2dev, reply);
+
+ bail1:
+ kfree(wr);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+/*
+ * Delete an IP address from the RNIC interface
+ */
+int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
+{
+ struct c2_vq_req *vq_req;
+ struct c2wr_rnic_setconfig_req *wr;
+ struct c2wr_rnic_setconfig_rep *reply;
+ struct c2_netaddr netaddr;
+ int err, len;
+
+ vq_req = vq_req_alloc(c2dev);
+ if (!vq_req)
+ return -ENOMEM;
+
+ len = sizeof(struct c2_netaddr);
+ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
+ if (!wr) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
+ wr->hdr.context = (unsigned long) vq_req;
+ wr->rnic_handle = c2dev->adapter_handle;
+ wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);
+
+ netaddr.ip_addr = inaddr;
+ netaddr.netmask = inmask;
+ netaddr.mtu = 0;
+
+ memcpy(wr->data, &netaddr, len);
+
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, (union c2wr *) wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail1;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err)
+ goto bail1;
+
+ reply =
+ (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+
+ err = c2_errno(reply);
+ vq_repbuf_free(c2dev, reply);
+
+ bail1:
+ kfree(wr);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+/*
+ * Open a single RNIC instance to use with all
+ * low level openib calls
+ */
+static int c2_rnic_open(struct c2_dev *c2dev)
+{
+ struct c2_vq_req *vq_req;
+ union c2wr wr;
+ struct c2wr_rnic_open_rep *reply;
+ int err;
+
+ vq_req = vq_req_alloc(c2dev);
+ if (vq_req == NULL) {
+ return -ENOMEM;
+ }
+
+ memset(&wr, 0, sizeof(wr));
+ c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
+ wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
+ wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
+ wr.rnic_open.req.port_num = cpu_to_be16(0);
+ wr.rnic_open.req.user_context = (unsigned long) c2dev;
+
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, &wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err) {
+ goto bail0;
+ }
+
+ reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ if ((err = c2_errno(reply)) != 0) {
+ goto bail1;
+ }
+
+ c2dev->adapter_handle = reply->rnic_handle;
+
+ bail1:
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+/*
+ * Close the RNIC instance
+ */
+static int c2_rnic_close(struct c2_dev *c2dev)
+{
+ struct c2_vq_req *vq_req;
+ union c2wr wr;
+ struct c2wr_rnic_close_rep *reply;
+ int err;
+
+ vq_req = vq_req_alloc(c2dev);
+ if (vq_req == NULL) {
+ return -ENOMEM;
+ }
+
+ memset(&wr, 0, sizeof(wr));
+ c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
+ wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
+ wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;
+
+ vq_req_get(c2dev, vq_req);
+
+ err = vq_send_wr(c2dev, &wr);
+ if (err) {
+ vq_req_put(c2dev, vq_req);
+ goto bail0;
+ }
+
+ err = vq_wait_for_reply(c2dev, vq_req);
+ if (err) {
+ goto bail0;
+ }
+
+ reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
+ if (!reply) {
+ err = -ENOMEM;
+ goto bail0;
+ }
+
+ if ((err = c2_errno(reply)) != 0) {
+ goto bail1;
+ }
+
+ c2dev->adapter_handle = 0;
+
+ bail1:
+ vq_repbuf_free(c2dev, reply);
+ bail0:
+ vq_req_free(c2dev, vq_req);
+ return err;
+}
+
+/*
+ * Called by c2_probe to initialize the RNIC. This principally
+ * involves initalizing the various limits and resouce pools that
+ * comprise the RNIC instance.
+ */
+int c2_rnic_init(struct c2_dev *c2dev)
+{
+ int err;
+ u32 qsize, msgsize;
+ void *q1_pages;
+ void *q2_pages;
+ void __iomem *mmio_regs;
+
+ /* Device capabilities */
+ c2dev->device_cap_flags =
+ (IB_DEVICE_RESIZE_MAX_WR |
+ IB_DEVICE_CURR_QP_STATE_MOD |
+ IB_DEVICE_SYS_IMAGE_GUID |
+ IB_DEVICE_ZERO_STAG |
+ IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
+
+ /* Allocate the qptr_array */
+ c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
+ if (!c2dev->qptr_array) {
+ return -ENOMEM;
+ }
+
+ /* Inialize the qptr_array */
+ memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
+ c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
+ c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
+ c2dev->qptr_array[2] = (void *) &c2dev->aeq;
+
+ /* Initialize data structures */
+ init_waitqueue_head(&c2dev->req_vq_wo);
+ spin_lock_init(&c2dev->vqlock);
+ spin_lock_init(&c2dev->lock);
+
+ /* Allocate MQ shared pointer pool for kernel clients. User
+ * mode client pools are hung off the user context
+ */
+ err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
+ if (err) {
+ goto bail0;
+ }
+
+ /* Allocate shared pointers for Q0, Q1, and Q2 from
+ * the shared pointer pool.
+ */
+
+ c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
+ &c2dev->hint_count_dma,
+ GFP_KERNEL);
+ c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
+ &c2dev->req_vq.shared_dma,
+ GFP_KERNEL);
+ c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
+ &c2dev->rep_vq.shared_dma,
+ GFP_KERNEL);
+ c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
+ &c2dev->aeq.shared_dma, GFP_KERNEL);
+ if (!c2dev->hint_count || !c2dev->req_vq.shared ||
+ !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+
+ mmio_regs = c2dev->kva;
+ /* Initialize the Verbs Request Queue */
+ c2_mq_req_init(&c2dev->req_vq, 0,
+ be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)),
+ be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
+ mmio_regs +
+ be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
+ mmio_regs +
+ be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)),
+ C2_MQ_ADAPTER_TARGET);
+
+ /* Initialize the Verbs Reply Queue */
+ qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
+ msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
+ q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
+ if (!q1_pages) {
+ err = -ENOMEM;
+ goto bail1;
+ }
+ c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
+ (void *)q1_pages, qsize * msgsize,
+ DMA_FROM_DEVICE);
+ pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
+ pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
+ (u64)c2dev->rep_vq.host_dma);
+ c2_mq_rep_init(&c2dev->rep_vq,
+ 1,
+ qsize,
+ msgsize,
+ q1_pages,
+ mmio_regs +
+ be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)),
+ C2_MQ_HOST_TARGET);
+
+ /* Initialize the Asynchronus Event Queue */
+ qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
+ msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
+ q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
+ if (!q2_pages) {
+ err = -ENOMEM;
+ goto bail2;
+ }
+ c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
+ (void *)q2_pages, qsize * msgsize,
+ DMA_FROM_DEVICE);
+ pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
+ pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages,
+ (u64)c2dev->rep_vq.host_dma);
+ c2_mq_rep_init(&c2dev->aeq,
+ 2,
+ qsize,
+ msgsize,
+ q2_pages,
+ mmio_regs +
+ be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)),
+ C2_MQ_HOST_TARGET);
+
+ /* Initialize the verbs request allocator */
+ err = vq_init(c2dev);
+ if (err)
+ goto bail3;
+
+ /* Enable interrupts on the adapter */
+ writel(0, c2dev->regs + C2_IDIS);
+
+ /* create the WR init message */
+ err = c2_adapter_init(c2dev);
+ if (err)
+ goto bail4;
+ c2dev->init++;
+
+ /* open an adapter instance */
+ err = c2_rnic_open(c2dev);
+ if (err)
+ goto bail4;
+
+ /* Initialize cached the adapter limits */
+ if (c2_rnic_query(c2dev, &c2dev->props))
+ goto bail5;
+
+ /* Initialize the PD pool */
+ err = c2_init_pd_table(c2dev);
+ if (err)
+ goto bail5;
+
+ /* Initialize the QP pool */
+ c2_init_qp_table(c2dev);
+ return 0;
+
+ bail5:
+ c2_rnic_close(c2dev);
+ bail4:
+ vq_term(c2dev);
+ bail3:
+ dma_unmap_single(c2dev->ibdev.dma_device,
+ pci_unmap_addr(&c2dev->aeq, mapping),
+ c2dev->aeq.q_size * c2dev->aeq.msg_size,
+ DMA_FROM_DEVICE);
+ kfree(q2_pages);
+ bail2:
+ dma_unmap_single(c2dev->ibdev.dma_device,
+ pci_unmap_addr(&c2dev->rep_vq, mapping),
+ c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
+ DMA_FROM_DEVICE);
+ kfree(q1_pages);
+ bail1:
+ c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
+ bail0:
+ vfree(c2dev->qptr_array);
+
+ return err;
+}
+
+/*
+ * Called by c2_remove to cleanup the RNIC resources.
+ */
+void c2_rnic_term(struct c2_dev *c2dev)
+{
+
+ /* Close the open adapter instance */
+ c2_rnic_close(c2dev);
+
+ /* Send the TERM message to the adapter */
+ c2_adapter_term(c2dev);
+
+ /* Disable interrupts on the adapter */
+ writel(1, c2dev->regs + C2_IDIS);
+
+ /* Free the QP pool */
+ c2_cleanup_qp_table(c2dev);
+
+ /* Free the PD pool */
+ c2_cleanup_pd_table(c2dev);
+
+ /* Free the verbs request allocator */
+ vq_term(c2dev);
+
+ /* Unmap and free the asynchronus event queue */
+ dma_unmap_single(c2dev->ibdev.dma_device,
+ pci_unmap_addr(&c2dev->aeq, mapping),
+ c2dev->aeq.q_size * c2dev->aeq.msg_size,
+ DMA_FROM_DEVICE);
+ kfree(c2dev->aeq.msg_pool.host);
+
+ /* Unmap and free the verbs reply queue */
+ dma_unmap_single(c2dev->ibdev.dma_device,
+ pci_unmap_addr(&c2dev->rep_vq, mapping),
+ c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
+ DMA_FROM_DEVICE);
+ kfree(c2dev->rep_vq.msg_pool.host);
+
+ /* Free the MQ shared pointer pool */
+ c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
+
+ /* Free the qptr_array */
+ vfree(c2dev->qptr_array);
+
+ return;
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_status.h b/drivers/infiniband/hw/amso1100/c2_status.h
new file mode 100644
index 00000000000..6ee4aa92d87
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_status.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _C2_STATUS_H_
+#define _C2_STATUS_H_
+
+/*
+ * Verbs Status Codes
+ */
+enum c2_status {
+ C2_OK = 0, /* This must be zero */
+ CCERR_INSUFFICIENT_RESOURCES = 1,
+ CCERR_INVALID_MODIFIER = 2,
+ CCERR_INVALID_MODE = 3,
+ CCERR_IN_USE = 4,
+ CCERR_INVALID_RNIC = 5,
+ CCERR_INTERRUPTED_OPERATION = 6,
+ CCERR_INVALID_EH = 7,
+ CCERR_INVALID_CQ = 8,
+ CCERR_CQ_EMPTY = 9,
+ CCERR_NOT_IMPLEMENTED = 10,
+ CCERR_CQ_DEPTH_TOO_SMALL = 11,
+ CCERR_PD_IN_USE = 12,
+ CCERR_INVALID_PD = 13,
+ CCERR_INVALID_SRQ = 14,
+ CCERR_INVALID_ADDRESS = 15,
+ CCERR_INVALID_NETMASK = 16,
+ CCERR_INVALID_QP = 17,
+ CCERR_INVALID_QP_STATE = 18,
+ CCERR_TOO_MANY_WRS_POSTED = 19,
+ CCERR_INVALID_WR_TYPE = 20,
+ CCERR_INVALID_SGL_LENGTH = 21,
+ CCERR_INVALID_SQ_DEPTH = 22,
+ CCERR_INVALID_RQ_DEPTH = 23,
+ CCERR_INVALID_ORD = 24,
+ CCERR_INVALID_IRD = 25,
+ CCERR_QP_ATTR_CANNOT_CHANGE = 26,
+ CCERR_INVALID_STAG = 27,
+ CCERR_QP_IN_USE = 28,
+ CCERR_OUTSTANDING_WRS = 29,
+ CCERR_STAG_IN_USE = 30,
+ CCERR_INVALID_STAG_INDEX = 31,
+ CCERR_INVALID_SGL_FORMAT = 32,
+ CCERR_ADAPTER_TIMEOUT = 33,
+ CCERR_INVALID_CQ_DEPTH = 34,
+ CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
+ CCERR_INVALID_EP = 36,
+ CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
+ CCERR_FLUSHED = 38,
+ CCERR_INVALID_WQE = 39,
+ CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
+ CCERR_REMOTE_TERMINATION_ERROR = 41,
+ CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
+ CCERR_ACCESS_VIOLATION = 43,
+ CCERR_INVALID_PD_ID = 44,
+ CCERR_WRAP_ERROR = 45,
+ CCERR_INV_STAG_ACCESS_ERROR = 46,
+ CCERR_ZERO_RDMA_READ_RESOURCES = 47,
+ CCERR_QP_NOT_PRIVILEGED = 48,
+ CCERR_STAG_STATE_NOT_INVALID = 49,
+ CCERR_INVALID_PAGE_SIZE = 50,
+ CCERR_INVALID_BUFFER_SIZE = 51,
+ CCERR_INVALID_PBE = 52,
+ CCERR_INVALID_FBO = 53,
+ CCERR_INVALID_LENGTH = 54,
+ CCERR_INVALID_ACCESS_RIGHTS = 55,
+ CCERR_PBL_TOO_BIG = 56,
+ CCERR_INVALID_VA = 57,
+ CCERR_INVALID_REGION = 58,
+ CCERR_INVALID_WINDOW = 59,
+ CCERR_TOTAL_LENGTH_TOO_BIG = 60,
+ CCERR_INVALID_QP_ID = 61,
+ CCERR_ADDR_IN_USE = 62,
+ CCERR_ADDR_NOT_AVAIL = 63,
+ CCERR_NET_DOWN = 64,
+ CCERR_NET_UNREACHABLE = 65,
+ CCERR_CONN_ABORTED = 66,
+ CCERR_CONN_RESET = 67,
+ CCERR_NO_BUFS = 68,
+ CCERR_CONN_TIMEDOUT = 69,
+ CCERR_CONN_REFUSED = 70,
+ CCERR_HOST_UNREACHABLE = 71,
+ CCERR_INVALID_SEND_SGL_DEPTH = 72,
+ CCERR_INVALID_RECV_SGL_DEPTH = 73,
+ CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
+ CCERR_INSUFFICIENT_PRIVILEGES = 75,
+ CCERR_STACK_ERROR = 76,
+ CCERR_INVALID_VERSION = 77,
+ CCERR_INVALID_MTU = 78,
+ CCERR_INVALID_IMAGE = 79,
+ CCERR_PENDING = 98, /* not an error; user internally by adapter */
+ CCERR_DEFER = 99, /* not an error; used internally by adapter */
+ CCERR_FAILED_WRITE = 100,
+ CCERR_FAILED_ERASE = 101,
+ CCERR_FAILED_VERIFICATION = 102,
+ CCERR_NOT_FOUND = 103,
+
+};
+
+/*
+ * CCAE_ACTIVE_CONNECT_RESULTS status result codes.
+ */
+enum c2_connect_status {
+ C2_CONN_STATUS_SUCCESS = C2_OK,
+ C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
+ C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
+ C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
+ C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
+ C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
+ C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
+ C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
+ C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
+ C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
+ C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
+};
+
+/*
+ * Flash programming status codes.
+ */
+enum c2_flash_status {
+ C2_FLASH_STATUS_SUCCESS = 0x0000,
+ C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
+ C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
+ C2_FLASH_STATUS_ECLBS = 0x0400,
+ C2_FLASH_STATUS_PSLBS = 0x0800,
+ C2_FLASH_STATUS_VPENS = 0x1000,
+};
+
+#endif /* _C2_STATUS_H_ */
diff --git a/drivers/infiniband/hw/amso1100/c2_user.h b/drivers/infiniband/hw/amso1100/c2_user.h
new file mode 100644
index 00000000000..7e9e7ad6546
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_user.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef C2_USER_H
+#define C2_USER_H
+
+#include <linux/types.h>
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+
+struct c2_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 uarc_size;
+};
+
+struct c2_alloc_pd_resp {
+ __u32 pdn;
+ __u32 reserved;
+};
+
+struct c2_create_cq {
+ __u32 lkey;
+ __u32 pdn;
+ __u64 arm_db_page;
+ __u64 set_db_page;
+ __u32 arm_db_index;
+ __u32 set_db_index;
+};
+
+struct c2_create_cq_resp {
+ __u32 cqn;
+ __u32 reserved;
+};
+
+struct c2_create_qp {
+ __u32 lkey;
+ __u32 reserved;
+ __u64 sq_db_page;
+ __u64 rq_db_page;
+ __u32 sq_db_index;
+ __u32 rq_db_index;
+};
+
+#endif /* C2_USER_H */
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
new file mode 100644
index 00000000000..40caeb5f41b
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "c2_vq.h"
+#include "c2_provider.h"
+
+/*
+ * Verbs Request Objects:
+ *
+ * VQ Request Objects are allocated by the kernel verbs handlers.
+ * They contain a wait object, a refcnt, an atomic bool indicating that the
+ * adapter has replied, and a copy of the verb reply work request.
+ * A pointer to the VQ Request Object is passed down in the context
+ * field of the work request message, and reflected back by the adapter
+ * in the verbs reply message. The function handle_vq() in the interrupt
+ * path will use this pointer to:
+ * 1) append a copy of the verbs reply message
+ * 2) mark that the reply is ready
+ * 3) wake up the kernel verbs handler blocked awaiting the reply.
+ *
+ *
+ * The kernel verbs handlers do a "get" to put a 2nd reference on the
+ * VQ Request object. If the kernel verbs handler exits before the adapter
+ * can respond, this extra reference will keep the VQ Request object around
+ * until the adapter's reply can be processed. The reason we need this is
+ * because a pointer to this object is stuffed into the context field of
+ * the verbs work request message, and reflected back in the reply message.
+ * It is used in the interrupt handler (handle_vq()) to wake up the appropriate
+ * kernel verb handler that is blocked awaiting the verb reply.
+ * So handle_vq() will do a "put" on the object when it's done accessing it.
+ * NOTE: If we guarantee that the kernel verb handler will never bail before
+ * getting the reply, then we don't need these refcnts.
+ *
+ *
+ * VQ Request objects are freed by the kernel verbs handlers only
+ * after the verb has been processed, or when the adapter fails and
+ * does not reply.
+ *
+ *
+ * Verbs Reply Buffers:
+ *
+ * VQ Reply bufs are local host memory copies of a
+ * outstanding Verb Request reply
+ * message. The are always allocated by the kernel verbs handlers, and _may_ be
+ * freed by either the kernel verbs handler -or- the interrupt handler. The
+ * kernel verbs handler _must_ free the repbuf, then free the vq request object
+ * in that order.
+ */
+
+int vq_init(struct c2_dev *c2dev)
+{
+ sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
+ (char) ('0' + c2dev->devnum));
+ c2dev->host_msg_cache =
+ kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (c2dev->host_msg_cache == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void vq_term(struct c2_dev *c2dev)
+{
+ kmem_cache_destroy(c2dev->host_msg_cache);
+}
+
+/* vq_req_alloc - allocate a VQ Request Object and initialize it.
+ * The refcnt is set to 1.
+ */
+struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
+{
+ struct c2_vq_req *r;
+
+ r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
+ if (r) {
+ init_waitqueue_head(&r->wait_object);
+ r->reply_msg = (u64) NULL;
+ r->event = 0;
+ r->cm_id = NULL;
+ r->qp = NULL;
+ atomic_set(&r->refcnt, 1);
+ atomic_set(&r->reply_ready, 0);
+ }
+ return r;
+}
+
+
+/* vq_req_free - free the VQ Request Object. It is assumed the verbs handler
+ * has already free the VQ Reply Buffer if it existed.
+ */
+void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
+{
+ r->reply_msg = (u64) NULL;
+ if (atomic_dec_and_test(&r->refcnt)) {
+ kfree(r);
+ }
+}
+
+/* vq_req_get - reference a VQ Request Object. Done
+ * only in the kernel verbs handlers.
+ */
+void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
+{
+ atomic_inc(&r->refcnt);
+}
+
+
+/* vq_req_put - dereference and potentially free a VQ Request Object.
+ *
+ * This is only called by handle_vq() on the
+ * interrupt when it is done processing
+ * a verb reply message. If the associated
+ * kernel verbs handler has already bailed,
+ * then this put will actually free the VQ
+ * Request object _and_ the VQ Reply Buffer
+ * if it exists.
+ */
+void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
+{
+ if (atomic_dec_and_test(&r->refcnt)) {
+ if (r->reply_msg != (u64) NULL)
+ vq_repbuf_free(c2dev,
+ (void *) (unsigned long) r->reply_msg);
+ kfree(r);
+ }
+}
+
+
+/*
+ * vq_repbuf_alloc - allocate a VQ Reply Buffer.
+ */
+void *vq_repbuf_alloc(struct c2_dev *c2dev)
+{
+ return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC);
+}
+
+/*
+ * vq_send_wr - post a verbs request message to the Verbs Request Queue.
+ * If a message is not available in the MQ, then block until one is available.
+ * NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
+ * When the adapter drains the Verbs Request Queue,
+ * it inserts MQ index 0 in to the
+ * adapter->host activity fifo and interrupts the host.
+ */
+int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
+{
+ void *msg;
+ wait_queue_t __wait;
+
+ /*
+ * grab adapter vq lock
+ */
+ spin_lock(&c2dev->vqlock);
+
+ /*
+ * allocate msg
+ */
+ msg = c2_mq_alloc(&c2dev->req_vq);
+
+ /*
+ * If we cannot get a msg, then we'll wait
+ * When a messages are available, the int handler will wake_up()
+ * any waiters.
+ */
+ while (msg == NULL) {
+ pr_debug("%s:%d no available msg in VQ, waiting...\n",
+ __FUNCTION__, __LINE__);
+ init_waitqueue_entry(&__wait, current);
+ add_wait_queue(&c2dev->req_vq_wo, &__wait);
+ spin_unlock(&c2dev->vqlock);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!c2_mq_full(&c2dev->req_vq)) {
+ break;
+ }
+ if (!signal_pending(current)) {
+ schedule_timeout(1 * HZ); /* 1 second... */
+ continue;
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&c2dev->req_vq_wo, &__wait);
+ return -EINTR;
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&c2dev->req_vq_wo, &__wait);
+ spin_lock(&c2dev->vqlock);
+ msg = c2_mq_alloc(&c2dev->req_vq);
+ }
+
+ /*
+ * copy wr into adapter msg
+ */
+ memcpy(msg, wr, c2dev->req_vq.msg_size);
+
+ /*
+ * post msg
+ */
+ c2_mq_produce(&c2dev->req_vq);
+
+ /*
+ * release adapter vq lock
+ */
+ spin_unlock(&c2dev->vqlock);
+ return 0;
+}
+
+
+/*
+ * vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
+ */
+int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
+{
+ if (!wait_event_timeout(req->wait_object,
+ atomic_read(&req->reply_ready),
+ 60*HZ))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/*
+ * vq_repbuf_free - Free a Verbs Reply Buffer.
+ */
+void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
+{
+ kmem_cache_free(c2dev->host_msg_cache, reply);
+}
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.h b/drivers/infiniband/hw/amso1100/c2_vq.h
new file mode 100644
index 00000000000..33805627a60
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_vq.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _C2_VQ_H_
+#define _C2_VQ_H_
+#include <linux/sched.h>
+#include "c2.h"
+#include "c2_wr.h"
+#include "c2_provider.h"
+
+struct c2_vq_req {
+ u64 reply_msg; /* ptr to reply msg */
+ wait_queue_head_t wait_object; /* wait object for vq reqs */
+ atomic_t reply_ready; /* set when reply is ready */
+ atomic_t refcnt; /* used to cancel WRs... */
+ int event;
+ struct iw_cm_id *cm_id;
+ struct c2_qp *qp;
+};
+
+extern int vq_init(struct c2_dev *c2dev);
+extern void vq_term(struct c2_dev *c2dev);
+
+extern struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
+extern void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
+extern void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
+extern void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
+extern int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
+
+extern void *vq_repbuf_alloc(struct c2_dev *c2dev);
+extern void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
+
+extern int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
+#endif /* _C2_VQ_H_ */
diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/infiniband/hw/amso1100/c2_wr.h
new file mode 100644
index 00000000000..3ec6c43bb0e
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_wr.h
@@ -0,0 +1,1520 @@
+/*
+ * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _C2_WR_H_
+#define _C2_WR_H_
+
+#ifdef CCDEBUG
+#define CCWR_MAGIC 0xb07700b0
+#endif
+
+#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
+
+/* Maximum allowed size in bytes of private_data exchange
+ * on connect.
+ */
+#define C2_MAX_PRIVATE_DATA_SIZE 200
+
+/*
+ * These types are shared among the adapter, host, and CCIL consumer.
+ */
+enum c2_cq_notification_type {
+ C2_CQ_NOTIFICATION_TYPE_NONE = 1,
+ C2_CQ_NOTIFICATION_TYPE_NEXT,
+ C2_CQ_NOTIFICATION_TYPE_NEXT_SE
+};
+
+enum c2_setconfig_cmd {
+ C2_CFG_ADD_ADDR = 1,
+ C2_CFG_DEL_ADDR = 2,
+ C2_CFG_ADD_ROUTE = 3,
+ C2_CFG_DEL_ROUTE = 4
+};
+
+enum c2_getconfig_cmd {
+ C2_GETCONFIG_ROUTES = 1,
+ C2_GETCONFIG_ADDRS
+};
+
+/*
+ * CCIL Work Request Identifiers
+ */
+enum c2wr_ids {
+ CCWR_RNIC_OPEN = 1,
+ CCWR_RNIC_QUERY,
+ CCWR_RNIC_SETCONFIG,
+ CCWR_RNIC_GETCONFIG,
+ CCWR_RNIC_CLOSE,
+ CCWR_CQ_CREATE,
+ CCWR_CQ_QUERY,
+ CCWR_CQ_MODIFY,
+ CCWR_CQ_DESTROY,
+ CCWR_QP_CONNECT,
+ CCWR_PD_ALLOC,
+ CCWR_PD_DEALLOC,
+ CCWR_SRQ_CREATE,
+ CCWR_SRQ_QUERY,
+ CCWR_SRQ_MODIFY,
+ CCWR_SRQ_DESTROY,
+ CCWR_QP_CREATE,
+ CCWR_QP_QUERY,
+ CCWR_QP_MODIFY,
+ CCWR_QP_DESTROY,
+ CCWR_NSMR_STAG_ALLOC,
+ CCWR_NSMR_REGISTER,
+ CCWR_NSMR_PBL,
+ CCWR_STAG_DEALLOC,
+ CCWR_NSMR_REREGISTER,
+ CCWR_SMR_REGISTER,
+ CCWR_MR_QUERY,
+ CCWR_MW_ALLOC,
+ CCWR_MW_QUERY,
+ CCWR_EP_CREATE,
+ CCWR_EP_GETOPT,
+ CCWR_EP_SETOPT,
+ CCWR_EP_DESTROY,
+ CCWR_EP_BIND,
+ CCWR_EP_CONNECT,
+ CCWR_EP_LISTEN,
+ CCWR_EP_SHUTDOWN,
+ CCWR_EP_LISTEN_CREATE,
+ CCWR_EP_LISTEN_DESTROY,
+ CCWR_EP_QUERY,
+ CCWR_CR_ACCEPT,
+ CCWR_CR_REJECT,
+ CCWR_CONSOLE,
+ CCWR_TERM,
+ CCWR_FLASH_INIT,
+ CCWR_FLASH,
+ CCWR_BUF_ALLOC,
+ CCWR_BUF_FREE,
+ CCWR_FLASH_WRITE,
+ CCWR_INIT, /* WARNING: Don't move this ever again! */
+
+
+
+ /* Add new IDs here */
+
+
+
+ /*
+ * WARNING: CCWR_LAST must always be the last verbs id defined!
+ * All the preceding IDs are fixed, and must not change.
+ * You can add new IDs, but must not remove or reorder
+ * any IDs. If you do, YOU will ruin any hope of
+ * compatability between versions.
+ */
+ CCWR_LAST,
+
+ /*
+ * Start over at 1 so that arrays indexed by user wr id's
+ * begin at 1. This is OK since the verbs and user wr id's
+ * are always used on disjoint sets of queues.
+ */
+ /*
+ * The order of the CCWR_SEND_XX verbs must
+ * match the order of the RDMA_OPs
+ */
+ CCWR_SEND = 1,
+ CCWR_SEND_INV,
+ CCWR_SEND_SE,
+ CCWR_SEND_SE_INV,
+ CCWR_RDMA_WRITE,
+ CCWR_RDMA_READ,
+ CCWR_RDMA_READ_INV,
+ CCWR_MW_BIND,
+ CCWR_NSMR_FASTREG,
+ CCWR_STAG_INVALIDATE,
+ CCWR_RECV,
+ CCWR_NOP,
+ CCWR_UNIMPL,
+/* WARNING: This must always be the last user wr id defined! */
+};
+#define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2)
+
+/*
+ * SQ/RQ Work Request Types
+ */
+enum c2_wr_type {
+ C2_WR_TYPE_SEND = CCWR_SEND,
+ C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
+ C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
+ C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
+ C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
+ C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
+ C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
+ C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
+ C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
+ C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
+ C2_WR_TYPE_RECV = CCWR_RECV,
+ C2_WR_TYPE_NOP = CCWR_NOP,
+};
+
+struct c2_netaddr {
+ u32 ip_addr;
+ u32 netmask;
+ u32 mtu;
+};
+
+struct c2_route {
+ u32 ip_addr; /* 0 indicates the default route */
+ u32 netmask; /* netmask associated with dst */
+ u32 flags;
+ union {
+ u32 ipaddr; /* address of the nexthop interface */
+ u8 enaddr[6];
+ } nexthop;
+};
+
+/*
+ * A Scatter Gather Entry.
+ */
+struct c2_data_addr {
+ u32 stag;
+ u32 length;
+ u64 to;
+};
+
+/*
+ * MR and MW flags used by the consumer, RI, and RNIC.
+ */
+enum c2_mm_flags {
+ MEM_REMOTE = 0x0001, /* allow mw binds with remote access. */
+ MEM_VA_BASED = 0x0002, /* Not Zero-based */
+ MEM_PBL_COMPLETE = 0x0004, /* PBL array is complete in this msg */
+ MEM_LOCAL_READ = 0x0008, /* allow local reads */
+ MEM_LOCAL_WRITE = 0x0010, /* allow local writes */
+ MEM_REMOTE_READ = 0x0020, /* allow remote reads */
+ MEM_REMOTE_WRITE = 0x0040, /* allow remote writes */
+ MEM_WINDOW_BIND = 0x0080, /* binds allowed */
+ MEM_SHARED = 0x0100, /* set if MR is shared */
+ MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */
+};
+
+/*
+ * CCIL API ACF flags defined in terms of the low level mem flags.
+ * This minimizes translation needed in the user API
+ */
+enum c2_acf {
+ C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
+ C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
+ C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
+ C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
+ C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
+};
+
+/*
+ * Image types of objects written to flash
+ */
+#define C2_FLASH_IMG_BITFILE 1
+#define C2_FLASH_IMG_OPTION_ROM 2
+#define C2_FLASH_IMG_VPD 3
+
+/*
+ * to fix bug 1815 we define the max size allowable of the
+ * terminate message (per the IETF spec).Refer to the IETF
+ * protocal specification, section 12.1.6, page 64)
+ * The message is prefixed by 20 types of DDP info.
+ *
+ * Then the message has 6 bytes for the terminate control
+ * and DDP segment length info plus a DDP header (either
+ * 14 or 18 byts) plus 28 bytes for the RDMA header.
+ * Thus the max size in:
+ * 20 + (6 + 18 + 28) = 72
+ */
+#define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
+
+/*
+ * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h
+ */
+#define WR_BUILD_STR_LEN 64
+
+/*
+ * WARNING: All of these structs need to align any 64bit types on
+ * 64 bit boundaries! 64bit types include u64 and u64.
+ */
+
+/*
+ * Clustercore Work Request Header. Be sensitive to field layout
+ * and alignment.
+ */
+struct c2wr_hdr {
+ /* wqe_count is part of the cqe. It is put here so the
+ * adapter can write to it while the wr is pending without
+ * clobbering part of the wr. This word need not be dma'd
+ * from the host to adapter by libccil, but we copy it anyway
+ * to make the memcpy to the adapter better aligned.
+ */
+ u32 wqe_count;
+
+ /* Put these fields next so that later 32- and 64-bit
+ * quantities are naturally aligned.
+ */
+ u8 id;
+ u8 result; /* adapter -> host */
+ u8 sge_count; /* host -> adapter */
+ u8 flags; /* host -> adapter */
+
+ u64 context;
+#ifdef CCMSGMAGIC
+ u32 magic;
+ u32 pad;
+#endif
+} __attribute__((packed));
+
+/*
+ *------------------------ RNIC ------------------------
+ */
+
+/*
+ * WR_RNIC_OPEN
+ */
+
+/*
+ * Flags for the RNIC WRs
+ */
+enum c2_rnic_flags {
+ RNIC_IRD_STATIC = 0x0001,
+ RNIC_ORD_STATIC = 0x0002,
+ RNIC_QP_STATIC = 0x0004,
+ RNIC_SRQ_SUPPORTED = 0x0008,
+ RNIC_PBL_BLOCK_MODE = 0x0010,
+ RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
+ RNIC_CQ_OVF_DETECTED = 0x0040,
+ RNIC_PRIV_MODE = 0x0080
+};
+
+struct c2wr_rnic_open_req {
+ struct c2wr_hdr hdr;
+ u64 user_context;
+ u16 flags; /* See enum c2_rnic_flags */
+ u16 port_num;
+} __attribute__((packed));
+
+struct c2wr_rnic_open_rep {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+} __attribute__((packed));
+
+union c2wr_rnic_open {
+ struct c2wr_rnic_open_req req;
+ struct c2wr_rnic_open_rep rep;
+} __attribute__((packed));
+
+struct c2wr_rnic_query_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+} __attribute__((packed));
+
+/*
+ * WR_RNIC_QUERY
+ */
+struct c2wr_rnic_query_rep {
+ struct c2wr_hdr hdr;
+ u64 user_context;
+ u32 vendor_id;
+ u32 part_number;
+ u32 hw_version;
+ u32 fw_ver_major;
+ u32 fw_ver_minor;
+ u32 fw_ver_patch;
+ char fw_ver_build_str[WR_BUILD_STR_LEN];
+ u32 max_qps;
+ u32 max_qp_depth;
+ u32 max_srq_depth;
+ u32 max_send_sgl_depth;
+ u32 max_rdma_sgl_depth;
+ u32 max_cqs;
+ u32 max_cq_depth;
+ u32 max_cq_event_handlers;
+ u32 max_mrs;
+ u32 max_pbl_depth;
+ u32 max_pds;
+ u32 max_global_ird;
+ u32 max_global_ord;
+ u32 max_qp_ird;
+ u32 max_qp_ord;
+ u32 flags;
+ u32 max_mws;
+ u32 pbe_range_low;
+ u32 pbe_range_high;
+ u32 max_srqs;
+ u32 page_size;
+} __attribute__((packed));
+
+union c2wr_rnic_query {
+ struct c2wr_rnic_query_req req;
+ struct c2wr_rnic_query_rep rep;
+} __attribute__((packed));
+
+/*
+ * WR_RNIC_GETCONFIG
+ */
+
+struct c2wr_rnic_getconfig_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 option; /* see c2_getconfig_cmd_t */
+ u64 reply_buf;
+ u32 reply_buf_len;
+} __attribute__((packed)) ;
+
+struct c2wr_rnic_getconfig_rep {
+ struct c2wr_hdr hdr;
+ u32 option; /* see c2_getconfig_cmd_t */
+ u32 count_len; /* length of the number of addresses configured */
+} __attribute__((packed)) ;
+
+union c2wr_rnic_getconfig {
+ struct c2wr_rnic_getconfig_req req;
+ struct c2wr_rnic_getconfig_rep rep;
+} __attribute__((packed)) ;
+
+/*
+ * WR_RNIC_SETCONFIG
+ */
+struct c2wr_rnic_setconfig_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 option; /* See c2_setconfig_cmd_t */
+ /* variable data and pad. See c2_netaddr and c2_route */
+ u8 data[0];
+} __attribute__((packed)) ;
+
+struct c2wr_rnic_setconfig_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_rnic_setconfig {
+ struct c2wr_rnic_setconfig_req req;
+ struct c2wr_rnic_setconfig_rep rep;
+} __attribute__((packed)) ;
+
+/*
+ * WR_RNIC_CLOSE
+ */
+struct c2wr_rnic_close_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+} __attribute__((packed)) ;
+
+struct c2wr_rnic_close_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_rnic_close {
+ struct c2wr_rnic_close_req req;
+ struct c2wr_rnic_close_rep rep;
+} __attribute__((packed)) ;
+
+/*
+ *------------------------ CQ ------------------------
+ */
+struct c2wr_cq_create_req {
+ struct c2wr_hdr hdr;
+ u64 shared_ht;
+ u64 user_context;
+ u64 msg_pool;
+ u32 rnic_handle;
+ u32 msg_size;
+ u32 depth;
+} __attribute__((packed)) ;
+
+struct c2wr_cq_create_rep {
+ struct c2wr_hdr hdr;
+ u32 mq_index;
+ u32 adapter_shared;
+ u32 cq_handle;
+} __attribute__((packed)) ;
+
+union c2wr_cq_create {
+ struct c2wr_cq_create_req req;
+ struct c2wr_cq_create_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_cq_modify_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 cq_handle;
+ u32 new_depth;
+ u64 new_msg_pool;
+} __attribute__((packed)) ;
+
+struct c2wr_cq_modify_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_cq_modify {
+ struct c2wr_cq_modify_req req;
+ struct c2wr_cq_modify_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_cq_destroy_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 cq_handle;
+} __attribute__((packed)) ;
+
+struct c2wr_cq_destroy_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_cq_destroy {
+ struct c2wr_cq_destroy_req req;
+ struct c2wr_cq_destroy_rep rep;
+} __attribute__((packed)) ;
+
+/*
+ *------------------------ PD ------------------------
+ */
+struct c2wr_pd_alloc_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 pd_id;
+} __attribute__((packed)) ;
+
+struct c2wr_pd_alloc_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_pd_alloc {
+ struct c2wr_pd_alloc_req req;
+ struct c2wr_pd_alloc_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_pd_dealloc_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 pd_id;
+} __attribute__((packed)) ;
+
+struct c2wr_pd_dealloc_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_pd_dealloc {
+ struct c2wr_pd_dealloc_req req;
+ struct c2wr_pd_dealloc_rep rep;
+} __attribute__((packed)) ;
+
+/*
+ *------------------------ SRQ ------------------------
+ */
+struct c2wr_srq_create_req {
+ struct c2wr_hdr hdr;
+ u64 shared_ht;
+ u64 user_context;
+ u32 rnic_handle;
+ u32 srq_depth;
+ u32 srq_limit;
+ u32 sgl_depth;
+ u32 pd_id;
+} __attribute__((packed)) ;
+
+struct c2wr_srq_create_rep {
+ struct c2wr_hdr hdr;
+ u32 srq_depth;
+ u32 sgl_depth;
+ u32 msg_size;
+ u32 mq_index;
+ u32 mq_start;
+ u32 srq_handle;
+} __attribute__((packed)) ;
+
+union c2wr_srq_create {
+ struct c2wr_srq_create_req req;
+ struct c2wr_srq_create_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_srq_destroy_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 srq_handle;
+} __attribute__((packed)) ;
+
+struct c2wr_srq_destroy_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_srq_destroy {
+ struct c2wr_srq_destroy_req req;
+ struct c2wr_srq_destroy_rep rep;
+} __attribute__((packed)) ;
+
+/*
+ *------------------------ QP ------------------------
+ */
+enum c2wr_qp_flags {
+ QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */
+ QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */
+ QP_MW_BIND = 0x00000004, /* MWs enabled */
+ QP_ZERO_STAG = 0x00000008, /* enabled? */
+ QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */
+ QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */
+ /* enabled? */
+};
+
+struct c2wr_qp_create_req {
+ struct c2wr_hdr hdr;
+ u64 shared_sq_ht;
+ u64 shared_rq_ht;
+ u64 user_context;
+ u32 rnic_handle;
+ u32 sq_cq_handle;
+ u32 rq_cq_handle;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 srq_handle;
+ u32 srq_limit;
+ u32 flags; /* see enum c2wr_qp_flags */
+ u32 send_sgl_depth;
+ u32 recv_sgl_depth;
+ u32 rdma_write_sgl_depth;
+ u32 ord;
+ u32 ird;
+ u32 pd_id;
+} __attribute__((packed)) ;
+
+struct c2wr_qp_create_rep {
+ struct c2wr_hdr hdr;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sgl_depth;
+ u32 recv_sgl_depth;
+ u32 rdma_write_sgl_depth;
+ u32 ord;
+ u32 ird;
+ u32 sq_msg_size;
+ u32 sq_mq_index;
+ u32 sq_mq_start;
+ u32 rq_msg_size;
+ u32 rq_mq_index;
+ u32 rq_mq_start;
+ u32 qp_handle;
+} __attribute__((packed)) ;
+
+union c2wr_qp_create {
+ struct c2wr_qp_create_req req;
+ struct c2wr_qp_create_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_qp_query_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 qp_handle;
+} __attribute__((packed)) ;
+
+struct c2wr_qp_query_rep {
+ struct c2wr_hdr hdr;
+ u64 user_context;
+ u32 rnic_handle;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sgl_depth;
+ u32 rdma_write_sgl_depth;
+ u32 recv_sgl_depth;
+ u32 ord;
+ u32 ird;
+ u16 qp_state;
+ u16 flags; /* see c2wr_qp_flags_t */
+ u32 qp_id;
+ u32 local_addr;
+ u32 remote_addr;
+ u16 local_port;
+ u16 remote_port;
+ u32 terminate_msg_length; /* 0 if not present */
+ u8 data[0];
+ /* Terminate Message in-line here. */
+} __attribute__((packed)) ;
+
+union c2wr_qp_query {
+ struct c2wr_qp_query_req req;
+ struct c2wr_qp_query_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_qp_modify_req {
+ struct c2wr_hdr hdr;
+ u64 stream_msg;
+ u32 stream_msg_length;
+ u32 rnic_handle;
+ u32 qp_handle;
+ u32 next_qp_state;
+ u32 ord;
+ u32 ird;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 llp_ep_handle;
+} __attribute__((packed)) ;
+
+struct c2wr_qp_modify_rep {
+ struct c2wr_hdr hdr;
+ u32 ord;
+ u32 ird;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 sq_msg_size;
+ u32 sq_mq_index;
+ u32 sq_mq_start;
+ u32 rq_msg_size;
+ u32 rq_mq_index;
+ u32 rq_mq_start;
+} __attribute__((packed)) ;
+
+union c2wr_qp_modify {
+ struct c2wr_qp_modify_req req;
+ struct c2wr_qp_modify_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_qp_destroy_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 qp_handle;
+} __attribute__((packed)) ;
+
+struct c2wr_qp_destroy_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_qp_destroy {
+ struct c2wr_qp_destroy_req req;
+ struct c2wr_qp_destroy_rep rep;
+} __attribute__((packed)) ;
+
+/*
+ * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can
+ * only be posted when a QP is in IDLE state. After the connect request is
+ * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
+ * No synchronous reply from adapter to this WR. The results of
+ * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
+ * See c2wr_ae_active_connect_results_t
+ */
+struct c2wr_qp_connect_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 qp_handle;
+ u32 remote_addr;
+ u16 remote_port;
+ u16 pad;
+ u32 private_data_length;
+ u8 private_data[0]; /* Private data in-line. */
+} __attribute__((packed)) ;
+
+struct c2wr_qp_connect {
+ struct c2wr_qp_connect_req req;
+ /* no synchronous reply. */
+} __attribute__((packed)) ;
+
+
+/*
+ *------------------------ MM ------------------------
+ */
+
+struct c2wr_nsmr_stag_alloc_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 pbl_depth;
+ u32 pd_id;
+ u32 flags;
+} __attribute__((packed)) ;
+
+struct c2wr_nsmr_stag_alloc_rep {
+ struct c2wr_hdr hdr;
+ u32 pbl_depth;
+ u32 stag_index;
+} __attribute__((packed)) ;
+
+union c2wr_nsmr_stag_alloc {
+ struct c2wr_nsmr_stag_alloc_req req;
+ struct c2wr_nsmr_stag_alloc_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_nsmr_register_req {
+ struct c2wr_hdr hdr;
+ u64 va;
+ u32 rnic_handle;
+ u16 flags;
+ u8 stag_key;
+ u8 pad;
+ u32 pd_id;
+ u32 pbl_depth;
+ u32 pbe_size;
+ u32 fbo;
+ u32 length;
+ u32 addrs_length;
+ /* array of paddrs (must be aligned on a 64bit boundary) */
+ u64 paddrs[0];
+} __attribute__((packed)) ;
+
+struct c2wr_nsmr_register_rep {
+ struct c2wr_hdr hdr;
+ u32 pbl_depth;
+ u32 stag_index;
+} __attribute__((packed)) ;
+
+union c2wr_nsmr_register {
+ struct c2wr_nsmr_register_req req;
+ struct c2wr_nsmr_register_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_nsmr_pbl_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 flags;
+ u32 stag_index;
+ u32 addrs_length;
+ /* array of paddrs (must be aligned on a 64bit boundary) */
+ u64 paddrs[0];
+} __attribute__((packed)) ;
+
+struct c2wr_nsmr_pbl_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_nsmr_pbl {
+ struct c2wr_nsmr_pbl_req req;
+ struct c2wr_nsmr_pbl_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_mr_query_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 stag_index;
+} __attribute__((packed)) ;
+
+struct c2wr_mr_query_rep {
+ struct c2wr_hdr hdr;
+ u8 stag_key;
+ u8 pad[3];
+ u32 pd_id;
+ u32 flags;
+ u32 pbl_depth;
+} __attribute__((packed)) ;
+
+union c2wr_mr_query {
+ struct c2wr_mr_query_req req;
+ struct c2wr_mr_query_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_mw_query_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 stag_index;
+} __attribute__((packed)) ;
+
+struct c2wr_mw_query_rep {
+ struct c2wr_hdr hdr;
+ u8 stag_key;
+ u8 pad[3];
+ u32 pd_id;
+ u32 flags;
+} __attribute__((packed)) ;
+
+union c2wr_mw_query {
+ struct c2wr_mw_query_req req;
+ struct c2wr_mw_query_rep rep;
+} __attribute__((packed)) ;
+
+
+struct c2wr_stag_dealloc_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 stag_index;
+} __attribute__((packed)) ;
+
+struct c2wr_stag_dealloc_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed)) ;
+
+union c2wr_stag_dealloc {
+ struct c2wr_stag_dealloc_req req;
+ struct c2wr_stag_dealloc_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_nsmr_reregister_req {
+ struct c2wr_hdr hdr;
+ u64 va;
+ u32 rnic_handle;
+ u16 flags;
+ u8 stag_key;
+ u8 pad;
+ u32 stag_index;
+ u32 pd_id;
+ u32 pbl_depth;
+ u32 pbe_size;
+ u32 fbo;
+ u32 length;
+ u32 addrs_length;
+ u32 pad1;
+ /* array of paddrs (must be aligned on a 64bit boundary) */
+ u64 paddrs[0];
+} __attribute__((packed)) ;
+
+struct c2wr_nsmr_reregister_rep {
+ struct c2wr_hdr hdr;
+ u32 pbl_depth;
+ u32 stag_index;
+} __attribute__((packed)) ;
+
+union c2wr_nsmr_reregister {
+ struct c2wr_nsmr_reregister_req req;
+ struct c2wr_nsmr_reregister_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_smr_register_req {
+ struct c2wr_hdr hdr;
+ u64 va;
+ u32 rnic_handle;
+ u16 flags;
+ u8 stag_key;
+ u8 pad;
+ u32 stag_index;
+ u32 pd_id;
+} __attribute__((packed)) ;
+
+struct c2wr_smr_register_rep {
+ struct c2wr_hdr hdr;
+ u32 stag_index;
+} __attribute__((packed)) ;
+
+union c2wr_smr_register {
+ struct c2wr_smr_register_req req;
+ struct c2wr_smr_register_rep rep;
+} __attribute__((packed)) ;
+
+struct c2wr_mw_alloc_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 pd_id;
+} __attribute__((packed)) ;
+
+struct c2wr_mw_alloc_rep {
+ struct c2wr_hdr hdr;
+ u32 stag_index;
+} __attribute__((packed)) ;
+
+union c2wr_mw_alloc {
+ struct c2wr_mw_alloc_req req;
+ struct c2wr_mw_alloc_rep rep;
+} __attribute__((packed)) ;
+
+/*
+ *------------------------ WRs -----------------------
+ */
+
+struct c2wr_user_hdr {
+ struct c2wr_hdr hdr; /* Has status and WR Type */
+} __attribute__((packed)) ;
+
+enum c2_qp_state {
+ C2_QP_STATE_IDLE = 0x01,
+ C2_QP_STATE_CONNECTING = 0x02,
+ C2_QP_STATE_RTS = 0x04,
+ C2_QP_STATE_CLOSING = 0x08,
+ C2_QP_STATE_TERMINATE = 0x10,
+ C2_QP_STATE_ERROR = 0x20,
+};
+
+/* Completion queue entry. */
+struct c2wr_ce {
+ struct c2wr_hdr hdr; /* Has status and WR Type */
+ u64 qp_user_context; /* c2_user_qp_t * */
+ u32 qp_state; /* Current QP State */
+ u32 handle; /* QPID or EP Handle */
+ u32 bytes_rcvd; /* valid for RECV WCs */
+ u32 stag;
+} __attribute__((packed)) ;
+
+
+/*
+ * Flags used for all post-sq WRs. These must fit in the flags
+ * field of the struct c2wr_hdr (eight bits).
+ */
+enum {
+ SQ_SIGNALED = 0x01,
+ SQ_READ_FENCE = 0x02,
+ SQ_FENCE = 0x04,
+};
+
+/*
+ * Common fields for all post-sq WRs. Namely the standard header and a
+ * secondary header with fields common to all post-sq WRs.
+ */
+struct c2_sq_hdr {
+ struct c2wr_user_hdr user_hdr;
+} __attribute__((packed));
+
+/*
+ * Same as above but for post-rq WRs.
+ */
+struct c2_rq_hdr {
+ struct c2wr_user_hdr user_hdr;
+} __attribute__((packed));
+
+/*
+ * use the same struct for all sends.
+ */
+struct c2wr_send_req {
+ struct c2_sq_hdr sq_hdr;
+ u32 sge_len;
+ u32 remote_stag;
+ u8 data[0]; /* SGE array */
+} __attribute__((packed));
+
+union c2wr_send {
+ struct c2wr_send_req req;
+ struct c2wr_ce rep;
+} __attribute__((packed));
+
+struct c2wr_rdma_write_req {
+ struct c2_sq_hdr sq_hdr;
+ u64 remote_to;
+ u32 remote_stag;
+ u32 sge_len;
+ u8 data[0]; /* SGE array */
+} __attribute__((packed));
+
+union c2wr_rdma_write {
+ struct c2wr_rdma_write_req req;
+ struct c2wr_ce rep;
+} __attribute__((packed));
+
+struct c2wr_rdma_read_req {
+ struct c2_sq_hdr sq_hdr;
+ u64 local_to;
+ u64 remote_to;
+ u32 local_stag;
+ u32 remote_stag;
+ u32 length;
+} __attribute__((packed));
+
+union c2wr_rdma_read {
+ struct c2wr_rdma_read_req req;
+ struct c2wr_ce rep;
+} __attribute__((packed));
+
+struct c2wr_mw_bind_req {
+ struct c2_sq_hdr sq_hdr;
+ u64 va;
+ u8 stag_key;
+ u8 pad[3];
+ u32 mw_stag_index;
+ u32 mr_stag_index;
+ u32 length;
+ u32 flags;
+} __attribute__((packed));
+
+union c2wr_mw_bind {
+ struct c2wr_mw_bind_req req;
+ struct c2wr_ce rep;
+} __attribute__((packed));
+
+struct c2wr_nsmr_fastreg_req {
+ struct c2_sq_hdr sq_hdr;
+ u64 va;
+ u8 stag_key;
+ u8 pad[3];
+ u32 stag_index;
+ u32 pbe_size;
+ u32 fbo;
+ u32 length;
+ u32 addrs_length;
+ /* array of paddrs (must be aligned on a 64bit boundary) */
+ u64 paddrs[0];
+} __attribute__((packed));
+
+union c2wr_nsmr_fastreg {
+ struct c2wr_nsmr_fastreg_req req;
+ struct c2wr_ce rep;
+} __attribute__((packed));
+
+struct c2wr_stag_invalidate_req {
+ struct c2_sq_hdr sq_hdr;
+ u8 stag_key;
+ u8 pad[3];
+ u32 stag_index;
+} __attribute__((packed));
+
+union c2wr_stag_invalidate {
+ struct c2wr_stag_invalidate_req req;
+ struct c2wr_ce rep;
+} __attribute__((packed));
+
+union c2wr_sqwr {
+ struct c2_sq_hdr sq_hdr;
+ struct c2wr_send_req send;
+ struct c2wr_send_req send_se;
+ struct c2wr_send_req send_inv;
+ struct c2wr_send_req send_se_inv;
+ struct c2wr_rdma_write_req rdma_write;
+ struct c2wr_rdma_read_req rdma_read;
+ struct c2wr_mw_bind_req mw_bind;
+ struct c2wr_nsmr_fastreg_req nsmr_fastreg;
+ struct c2wr_stag_invalidate_req stag_inv;
+} __attribute__((packed));
+
+
+/*
+ * RQ WRs
+ */
+struct c2wr_rqwr {
+ struct c2_rq_hdr rq_hdr;
+ u8 data[0]; /* array of SGEs */
+} __attribute__((packed));
+
+union c2wr_recv {
+ struct c2wr_rqwr req;
+ struct c2wr_ce rep;
+} __attribute__((packed));
+
+/*
+ * All AEs start with this header. Most AEs only need to convey the
+ * information in the header. Some, like LLP connection events, need
+ * more info. The union typdef c2wr_ae_t has all the possible AEs.
+ *
+ * hdr.context is the user_context from the rnic_open WR. NULL If this
+ * is not affiliated with an rnic
+ *
+ * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN,
+ * CCAE_LLP_CLOSE_COMPLETE)
+ *
+ * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
+ *
+ * user_context is the context passed down when the host created the resource.
+ */
+struct c2wr_ae_hdr {
+ struct c2wr_hdr hdr;
+ u64 user_context; /* user context for this res. */
+ u32 resource_type; /* see enum c2_resource_indicator */
+ u32 resource; /* handle for resource */
+ u32 qp_state; /* current QP State */
+} __attribute__((packed));
+
+/*
+ * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
+ * the adapter moves the QP into RTS state
+ */
+struct c2wr_ae_active_connect_results {
+ struct c2wr_ae_hdr ae_hdr;
+ u32 laddr;
+ u32 raddr;
+ u16 lport;
+ u16 rport;
+ u32 private_data_length;
+ u8 private_data[0]; /* data is in-line in the msg. */
+} __attribute__((packed));
+
+/*
+ * When connections are established by the stack (and the private data
+ * MPA frame is received), the adapter will generate an event to the host.
+ * The details of the connection, any private data, and the new connection
+ * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
+ * AE queue:
+ */
+struct c2wr_ae_connection_request {
+ struct c2wr_ae_hdr ae_hdr;
+ u32 cr_handle; /* connreq handle (sock ptr) */
+ u32 laddr;
+ u32 raddr;
+ u16 lport;
+ u16 rport;
+ u32 private_data_length;
+ u8 private_data[0]; /* data is in-line in the msg. */
+} __attribute__((packed));
+
+union c2wr_ae {
+ struct c2wr_ae_hdr ae_generic;
+ struct c2wr_ae_active_connect_results ae_active_connect_results;
+ struct c2wr_ae_connection_request ae_connection_request;
+} __attribute__((packed));
+
+struct c2wr_init_req {
+ struct c2wr_hdr hdr;
+ u64 hint_count;
+ u64 q0_host_shared;
+ u64 q1_host_shared;
+ u64 q1_host_msg_pool;
+ u64 q2_host_shared;
+ u64 q2_host_msg_pool;
+} __attribute__((packed));
+
+struct c2wr_init_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed));
+
+union c2wr_init {
+ struct c2wr_init_req req;
+ struct c2wr_init_rep rep;
+} __attribute__((packed));
+
+/*
+ * For upgrading flash.
+ */
+
+struct c2wr_flash_init_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+} __attribute__((packed));
+
+struct c2wr_flash_init_rep {
+ struct c2wr_hdr hdr;
+ u32 adapter_flash_buf_offset;
+ u32 adapter_flash_len;
+} __attribute__((packed));
+
+union c2wr_flash_init {
+ struct c2wr_flash_init_req req;
+ struct c2wr_flash_init_rep rep;
+} __attribute__((packed));
+
+struct c2wr_flash_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 len;
+} __attribute__((packed));
+
+struct c2wr_flash_rep {
+ struct c2wr_hdr hdr;
+ u32 status;
+} __attribute__((packed));
+
+union c2wr_flash {
+ struct c2wr_flash_req req;
+ struct c2wr_flash_rep rep;
+} __attribute__((packed));
+
+struct c2wr_buf_alloc_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 size;
+} __attribute__((packed));
+
+struct c2wr_buf_alloc_rep {
+ struct c2wr_hdr hdr;
+ u32 offset; /* 0 if mem not available */
+ u32 size; /* 0 if mem not available */
+} __attribute__((packed));
+
+union c2wr_buf_alloc {
+ struct c2wr_buf_alloc_req req;
+ struct c2wr_buf_alloc_rep rep;
+} __attribute__((packed));
+
+struct c2wr_buf_free_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 offset; /* Must match value from alloc */
+ u32 size; /* Must match value from alloc */
+} __attribute__((packed));
+
+struct c2wr_buf_free_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed));
+
+union c2wr_buf_free {
+ struct c2wr_buf_free_req req;
+ struct c2wr_ce rep;
+} __attribute__((packed));
+
+struct c2wr_flash_write_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 offset;
+ u32 size;
+ u32 type;
+ u32 flags;
+} __attribute__((packed));
+
+struct c2wr_flash_write_rep {
+ struct c2wr_hdr hdr;
+ u32 status;
+} __attribute__((packed));
+
+union c2wr_flash_write {
+ struct c2wr_flash_write_req req;
+ struct c2wr_flash_write_rep rep;
+} __attribute__((packed));
+
+/*
+ * Messages for LLP connection setup.
+ */
+
+/*
+ * Listen Request. This allocates a listening endpoint to allow passive
+ * connection setup. Newly established LLP connections are passed up
+ * via an AE. See c2wr_ae_connection_request_t
+ */
+struct c2wr_ep_listen_create_req {
+ struct c2wr_hdr hdr;
+ u64 user_context; /* returned in AEs. */
+ u32 rnic_handle;
+ u32 local_addr; /* local addr, or 0 */
+ u16 local_port; /* 0 means "pick one" */
+ u16 pad;
+ u32 backlog; /* tradional tcp listen bl */
+} __attribute__((packed));
+
+struct c2wr_ep_listen_create_rep {
+ struct c2wr_hdr hdr;
+ u32 ep_handle; /* handle to new listening ep */
+ u16 local_port; /* resulting port... */
+ u16 pad;
+} __attribute__((packed));
+
+union c2wr_ep_listen_create {
+ struct c2wr_ep_listen_create_req req;
+ struct c2wr_ep_listen_create_rep rep;
+} __attribute__((packed));
+
+struct c2wr_ep_listen_destroy_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 ep_handle;
+} __attribute__((packed));
+
+struct c2wr_ep_listen_destroy_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed));
+
+union c2wr_ep_listen_destroy {
+ struct c2wr_ep_listen_destroy_req req;
+ struct c2wr_ep_listen_destroy_rep rep;
+} __attribute__((packed));
+
+struct c2wr_ep_query_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 ep_handle;
+} __attribute__((packed));
+
+struct c2wr_ep_query_rep {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 local_addr;
+ u32 remote_addr;
+ u16 local_port;
+ u16 remote_port;
+} __attribute__((packed));
+
+union c2wr_ep_query {
+ struct c2wr_ep_query_req req;
+ struct c2wr_ep_query_rep rep;
+} __attribute__((packed));
+
+
+/*
+ * The host passes this down to indicate acceptance of a pending iWARP
+ * connection. The cr_handle was obtained from the CONNECTION_REQUEST
+ * AE passed up by the adapter. See c2wr_ae_connection_request_t.
+ */
+struct c2wr_cr_accept_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 qp_handle; /* QP to bind to this LLP conn */
+ u32 ep_handle; /* LLP handle to accept */
+ u32 private_data_length;
+ u8 private_data[0]; /* data in-line in msg. */
+} __attribute__((packed));
+
+/*
+ * adapter sends reply when private data is successfully submitted to
+ * the LLP.
+ */
+struct c2wr_cr_accept_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed));
+
+union c2wr_cr_accept {
+ struct c2wr_cr_accept_req req;
+ struct c2wr_cr_accept_rep rep;
+} __attribute__((packed));
+
+/*
+ * The host sends this down if a given iWARP connection request was
+ * rejected by the consumer. The cr_handle was obtained from a
+ * previous c2wr_ae_connection_request_t AE sent by the adapter.
+ */
+struct c2wr_cr_reject_req {
+ struct c2wr_hdr hdr;
+ u32 rnic_handle;
+ u32 ep_handle; /* LLP handle to reject */
+} __attribute__((packed));
+
+/*
+ * Dunno if this is needed, but we'll add it for now. The adapter will
+ * send the reject_reply after the LLP endpoint has been destroyed.
+ */
+struct c2wr_cr_reject_rep {
+ struct c2wr_hdr hdr;
+} __attribute__((packed));
+
+union c2wr_cr_reject {
+ struct c2wr_cr_reject_req req;
+ struct c2wr_cr_reject_rep rep;
+} __attribute__((packed));
+
+/*
+ * console command. Used to implement a debug console over the verbs
+ * request and reply queues.
+ */
+
+/*
+ * Console request message. It contains:
+ * - message hdr with id = CCWR_CONSOLE
+ * - the physaddr/len of host memory to be used for the reply.
+ * - the command string. eg: "netstat -s" or "zoneinfo"
+ */
+struct c2wr_console_req {
+ struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
+ u64 reply_buf; /* pinned host buf for reply */
+ u32 reply_buf_len; /* length of reply buffer */
+ u8 command[0]; /* NUL terminated ascii string */
+ /* containing the command req */
+} __attribute__((packed));
+
+/*
+ * flags used in the console reply.
+ */
+enum c2_console_flags {
+ CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */
+} __attribute__((packed));
+
+/*
+ * Console reply message.
+ * hdr.result contains the c2_status_t error if the reply was _not_ generated,
+ * or C2_OK if the reply was generated.
+ */
+struct c2wr_console_rep {
+ struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
+ u32 flags;
+} __attribute__((packed));
+
+union c2wr_console {
+ struct c2wr_console_req req;
+ struct c2wr_console_rep rep;
+} __attribute__((packed));
+
+
+/*
+ * Giant union with all WRs. Makes life easier...
+ */
+union c2wr {
+ struct c2wr_hdr hdr;
+ struct c2wr_user_hdr user_hdr;
+ union c2wr_rnic_open rnic_open;
+ union c2wr_rnic_query rnic_query;
+ union c2wr_rnic_getconfig rnic_getconfig;
+ union c2wr_rnic_setconfig rnic_setconfig;
+ union c2wr_rnic_close rnic_close;
+ union c2wr_cq_create cq_create;
+ union c2wr_cq_modify cq_modify;
+ union c2wr_cq_destroy cq_destroy;
+ union c2wr_pd_alloc pd_alloc;
+ union c2wr_pd_dealloc pd_dealloc;
+ union c2wr_srq_create srq_create;
+ union c2wr_srq_destroy srq_destroy;
+ union c2wr_qp_create qp_create;
+ union c2wr_qp_query qp_query;
+ union c2wr_qp_modify qp_modify;
+ union c2wr_qp_destroy qp_destroy;
+ struct c2wr_qp_connect qp_connect;
+ union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
+ union c2wr_nsmr_register nsmr_register;
+ union c2wr_nsmr_pbl nsmr_pbl;
+ union c2wr_mr_query mr_query;
+ union c2wr_mw_query mw_query;
+ union c2wr_stag_dealloc stag_dealloc;
+ union c2wr_sqwr sqwr;
+ struct c2wr_rqwr rqwr;
+ struct c2wr_ce ce;
+ union c2wr_ae ae;
+ union c2wr_init init;
+ union c2wr_ep_listen_create ep_listen_create;
+ union c2wr_ep_listen_destroy ep_listen_destroy;
+ union c2wr_cr_accept cr_accept;
+ union c2wr_cr_reject cr_reject;
+ union c2wr_console console;
+ union c2wr_flash_init flash_init;
+ union c2wr_flash flash;
+ union c2wr_buf_alloc buf_alloc;
+ union c2wr_buf_free buf_free;
+ union c2wr_flash_write flash_write;
+} __attribute__((packed));
+
+
+/*
+ * Accessors for the wr fields that are packed together tightly to
+ * reduce the wr message size. The wr arguments are void* so that
+ * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
+ * in the struct c2wr union can be passed in.
+ */
+static __inline__ u8 c2_wr_get_id(void *wr)
+{
+ return ((struct c2wr_hdr *) wr)->id;
+}
+static __inline__ void c2_wr_set_id(void *wr, u8 id)
+{
+ ((struct c2wr_hdr *) wr)->id = id;
+}
+static __inline__ u8 c2_wr_get_result(void *wr)
+{
+ return ((struct c2wr_hdr *) wr)->result;
+}
+static __inline__ void c2_wr_set_result(void *wr, u8 result)
+{
+ ((struct c2wr_hdr *) wr)->result = result;
+}
+static __inline__ u8 c2_wr_get_flags(void *wr)
+{
+ return ((struct c2wr_hdr *) wr)->flags;
+}
+static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
+{
+ ((struct c2wr_hdr *) wr)->flags = flags;
+}
+static __inline__ u8 c2_wr_get_sge_count(void *wr)
+{
+ return ((struct c2wr_hdr *) wr)->sge_count;
+}
+static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
+{
+ ((struct c2wr_hdr *) wr)->sge_count = sge_count;
+}
+static __inline__ u32 c2_wr_get_wqe_count(void *wr)
+{
+ return ((struct c2wr_hdr *) wr)->wqe_count;
+}
+static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
+{
+ ((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
+}
+
+#endif /* _C2_WR_H_ */
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/infiniband/hw/ehca/Kconfig
new file mode 100644
index 00000000000..922389b6439
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/Kconfig
@@ -0,0 +1,16 @@
+config INFINIBAND_EHCA
+ tristate "eHCA support"
+ depends on IBMEBUS && INFINIBAND
+ ---help---
+ This driver supports the IBM pSeries eHCA InfiniBand adapter.
+
+ To compile the driver as a module, choose M here. The module
+ will be called ib_ehca.
+
+config INFINIBAND_EHCA_SCALING
+ bool "Scaling support (EXPERIMENTAL)"
+ depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL
+ ---help---
+ eHCA scaling support schedules the CQ callbacks to different CPUs.
+
+ To enable this feature choose Y here.
diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/infiniband/hw/ehca/Makefile
new file mode 100644
index 00000000000..74d284e46a4
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/Makefile
@@ -0,0 +1,16 @@
+# Authors: Heiko J Schick <schickhj@de.ibm.com>
+# Christoph Raisch <raisch@de.ibm.com>
+# Joachim Fenkes <fenkes@de.ibm.com>
+#
+# Copyright (c) 2005 IBM Corporation
+#
+# All rights reserved.
+#
+# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
+
+obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
+
+ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
+ ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
+ ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
+
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
new file mode 100644
index 00000000000..3bac197f901
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -0,0 +1,271 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * adress vector functions
+ *
+ * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Khadija Souissi <souissik@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/current.h>
+
+#include "ehca_tools.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+
+static struct kmem_cache *av_cache;
+
+struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+ int ret;
+ struct ehca_av *av;
+ struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
+ ib_device);
+
+ av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
+ if (!av) {
+ ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
+ pd, ah_attr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ av->av.sl = ah_attr->sl;
+ av->av.dlid = ah_attr->dlid;
+ av->av.slid_path_bits = ah_attr->src_path_bits;
+
+ if (ehca_static_rate < 0) {
+ int ah_mult = ib_rate_to_mult(ah_attr->static_rate);
+ int ehca_mult =
+ ib_rate_to_mult(shca->sport[ah_attr->port_num].rate );
+
+ if (ah_mult >= ehca_mult)
+ av->av.ipd = 0;
+ else
+ av->av.ipd = (ah_mult > 0) ?
+ ((ehca_mult - 1) / ah_mult) : 0;
+ } else
+ av->av.ipd = ehca_static_rate;
+
+ av->av.lnh = ah_attr->ah_flags;
+ av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
+ av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
+ ah_attr->grh.traffic_class);
+ av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
+ ah_attr->grh.flow_label);
+ av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
+ ah_attr->grh.hop_limit);
+ av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
+ /* set sgid in grh.word_1 */
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ int rc;
+ struct ib_port_attr port_attr;
+ union ib_gid gid;
+ memset(&port_attr, 0, sizeof(port_attr));
+ rc = ehca_query_port(pd->device, ah_attr->port_num,
+ &port_attr);
+ if (rc) { /* invalid port number */
+ ret = -EINVAL;
+ ehca_err(pd->device, "Invalid port number "
+ "ehca_query_port() returned %x "
+ "pd=%p ah_attr=%p", rc, pd, ah_attr);
+ goto create_ah_exit1;
+ }
+ memset(&gid, 0, sizeof(gid));
+ rc = ehca_query_gid(pd->device,
+ ah_attr->port_num,
+ ah_attr->grh.sgid_index, &gid);
+ if (rc) {
+ ret = -EINVAL;
+ ehca_err(pd->device, "Failed to retrieve sgid "
+ "ehca_query_gid() returned %x "
+ "pd=%p ah_attr=%p", rc, pd, ah_attr);
+ goto create_ah_exit1;
+ }
+ memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
+ }
+ /* for the time being we use a hard coded PMTU of 2048 Bytes */
+ av->av.pmtu = 4;
+
+ /* dgid comes in grh.word_3 */
+ memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
+ sizeof(ah_attr->grh.dgid));
+
+ return &av->ib_ah;
+
+create_ah_exit1:
+ kmem_cache_free(av_cache, av);
+
+ return ERR_PTR(ret);
+}
+
+int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
+{
+ struct ehca_av *av;
+ struct ehca_ud_av new_ehca_av;
+ struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
+ u32 cur_pid = current->tgid;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ memset(&new_ehca_av, 0, sizeof(new_ehca_av));
+ new_ehca_av.sl = ah_attr->sl;
+ new_ehca_av.dlid = ah_attr->dlid;
+ new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
+ new_ehca_av.ipd = ah_attr->static_rate;
+ new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
+ (ah_attr->ah_flags & IB_AH_GRH) > 0);
+ new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
+ ah_attr->grh.traffic_class);
+ new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
+ ah_attr->grh.flow_label);
+ new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
+ ah_attr->grh.hop_limit);
+ new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
+
+ /* set sgid in grh.word_1 */
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ int rc;
+ struct ib_port_attr port_attr;
+ union ib_gid gid;
+ memset(&port_attr, 0, sizeof(port_attr));
+ rc = ehca_query_port(ah->device, ah_attr->port_num,
+ &port_attr);
+ if (rc) { /* invalid port number */
+ ehca_err(ah->device, "Invalid port number "
+ "ehca_query_port() returned %x "
+ "ah=%p ah_attr=%p port_num=%x",
+ rc, ah, ah_attr, ah_attr->port_num);
+ return -EINVAL;
+ }
+ memset(&gid, 0, sizeof(gid));
+ rc = ehca_query_gid(ah->device,
+ ah_attr->port_num,
+ ah_attr->grh.sgid_index, &gid);
+ if (rc) {
+ ehca_err(ah->device, "Failed to retrieve sgid "
+ "ehca_query_gid() returned %x "
+ "ah=%p ah_attr=%p port_num=%x "
+ "sgid_index=%x",
+ rc, ah, ah_attr, ah_attr->port_num,
+ ah_attr->grh.sgid_index);
+ return -EINVAL;
+ }
+ memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
+ }
+
+ new_ehca_av.pmtu = 4; /* see also comment in create_ah() */
+
+ memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
+ sizeof(ah_attr->grh.dgid));
+
+ av = container_of(ah, struct ehca_av, ib_ah);
+ av->av = new_ehca_av;
+
+ return 0;
+}
+
+int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
+{
+ struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
+ struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
+ u32 cur_pid = current->tgid;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
+ sizeof(ah_attr->grh.dgid));
+ ah_attr->sl = av->av.sl;
+
+ ah_attr->dlid = av->av.dlid;
+
+ ah_attr->src_path_bits = av->av.slid_path_bits;
+ ah_attr->static_rate = av->av.ipd;
+ ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
+ ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
+ av->av.grh.word_0);
+ ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
+ av->av.grh.word_0);
+ ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
+ av->av.grh.word_0);
+
+ return 0;
+}
+
+int ehca_destroy_ah(struct ib_ah *ah)
+{
+ struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
+ u32 cur_pid = current->tgid;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
+
+ return 0;
+}
+
+int ehca_init_av_cache(void)
+{
+ av_cache = kmem_cache_create("ehca_cache_av",
+ sizeof(struct ehca_av), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!av_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_av_cache(void)
+{
+ if (av_cache)
+ kmem_cache_destroy(av_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
new file mode 100644
index 00000000000..1c722032319
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -0,0 +1,346 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Struct definition for eHCA internal structures
+ *
+ * Authors: Heiko J Schick <schickhj@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EHCA_CLASSES_H__
+#define __EHCA_CLASSES_H__
+
+#include "ehca_classes.h"
+#include "ipz_pt_fn.h"
+
+struct ehca_module;
+struct ehca_qp;
+struct ehca_cq;
+struct ehca_eq;
+struct ehca_mr;
+struct ehca_mw;
+struct ehca_pd;
+struct ehca_av;
+
+#ifdef CONFIG_PPC64
+#include "ehca_classes_pSeries.h"
+#endif
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "ehca_irq.h"
+
+struct ehca_eq {
+ u32 length;
+ struct ipz_queue ipz_queue;
+ struct ipz_eq_handle ipz_eq_handle;
+ struct work_struct work;
+ struct h_galpas galpas;
+ int is_initialized;
+ struct ehca_pfeq pf;
+ spinlock_t spinlock;
+ struct tasklet_struct interrupt_task;
+ u32 ist;
+};
+
+struct ehca_sport {
+ struct ib_cq *ibcq_aqp1;
+ struct ib_qp *ibqp_aqp1;
+ enum ib_rate rate;
+ enum ib_port_state port_state;
+};
+
+struct ehca_shca {
+ struct ib_device ib_device;
+ struct ibmebus_dev *ibmebus_dev;
+ u8 num_ports;
+ int hw_level;
+ struct list_head shca_list;
+ struct ipz_adapter_handle ipz_hca_handle;
+ struct ehca_sport sport[2];
+ struct ehca_eq eq;
+ struct ehca_eq neq;
+ struct ehca_mr *maxmr;
+ struct ehca_pd *pd;
+ struct h_galpas galpas;
+};
+
+struct ehca_pd {
+ struct ib_pd ib_pd;
+ struct ipz_pd fw_pd;
+ u32 ownpid;
+};
+
+struct ehca_qp {
+ struct ib_qp ib_qp;
+ u32 qp_type;
+ struct ipz_queue ipz_squeue;
+ struct ipz_queue ipz_rqueue;
+ struct h_galpas galpas;
+ u32 qkey;
+ u32 real_qp_num;
+ u32 token;
+ spinlock_t spinlock_s;
+ spinlock_t spinlock_r;
+ u32 sq_max_inline_data_size;
+ struct ipz_qp_handle ipz_qp_handle;
+ struct ehca_pfqp pf;
+ struct ib_qp_init_attr init_attr;
+ u64 uspace_squeue;
+ u64 uspace_rqueue;
+ u64 uspace_fwh;
+ struct ehca_cq *send_cq;
+ struct ehca_cq *recv_cq;
+ unsigned int sqerr_purgeflag;
+ struct hlist_node list_entries;
+};
+
+/* must be power of 2 */
+#define QP_HASHTAB_LEN 8
+
+struct ehca_cq {
+ struct ib_cq ib_cq;
+ struct ipz_queue ipz_queue;
+ struct h_galpas galpas;
+ spinlock_t spinlock;
+ u32 cq_number;
+ u32 token;
+ u32 nr_of_entries;
+ struct ipz_cq_handle ipz_cq_handle;
+ struct ehca_pfcq pf;
+ spinlock_t cb_lock;
+ u64 uspace_queue;
+ u64 uspace_fwh;
+ struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
+ struct list_head entry;
+ u32 nr_callbacks;
+ spinlock_t task_lock;
+ u32 ownpid;
+};
+
+enum ehca_mr_flag {
+ EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
+ EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
+};
+
+struct ehca_mr {
+ union {
+ struct ib_mr ib_mr; /* must always be first in ehca_mr */
+ struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
+ } ib;
+ spinlock_t mrlock;
+
+ enum ehca_mr_flag flags;
+ u32 num_pages; /* number of MR pages */
+ u32 num_4k; /* number of 4k "page" portions to form MR */
+ int acl; /* ACL (stored here for usage in reregister) */
+ u64 *start; /* virtual start address (stored here for */
+ /* usage in reregister) */
+ u64 size; /* size (stored here for usage in reregister) */
+ u32 fmr_page_size; /* page size for FMR */
+ u32 fmr_max_pages; /* max pages for FMR */
+ u32 fmr_max_maps; /* max outstanding maps for FMR */
+ u32 fmr_map_cnt; /* map counter for FMR */
+ /* fw specific data */
+ struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
+ struct h_galpas galpas;
+ /* data for userspace bridge */
+ u32 nr_of_pages;
+ void *pagearray;
+};
+
+struct ehca_mw {
+ struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
+ spinlock_t mwlock;
+
+ u8 never_bound; /* indication MW was never bound */
+ struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
+ struct h_galpas galpas;
+};
+
+enum ehca_mr_pgi_type {
+ EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
+ * ehca_rereg_phys_mr,
+ * ehca_reg_internal_maxmr */
+ EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
+ EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
+};
+
+struct ehca_mr_pginfo {
+ enum ehca_mr_pgi_type type;
+ u64 num_pages;
+ u64 page_cnt;
+ u64 num_4k; /* number of 4k "page" portions */
+ u64 page_4k_cnt; /* counter for 4k "page" portions */
+ u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */
+
+ /* type EHCA_MR_PGI_PHYS section */
+ int num_phys_buf;
+ struct ib_phys_buf *phys_buf_array;
+ u64 next_buf;
+
+ /* type EHCA_MR_PGI_USER section */
+ struct ib_umem *region;
+ struct ib_umem_chunk *next_chunk;
+ u64 next_nmap;
+
+ /* type EHCA_MR_PGI_FMR section */
+ u64 *page_list;
+ u64 next_listelem;
+ /* next_4k also used within EHCA_MR_PGI_FMR */
+};
+
+/* output parameters for MR/FMR hipz calls */
+struct ehca_mr_hipzout_parms {
+ struct ipz_mrmw_handle handle;
+ u32 lkey;
+ u32 rkey;
+ u64 len;
+ u64 vaddr;
+ u32 acl;
+};
+
+/* output parameters for MW hipz calls */
+struct ehca_mw_hipzout_parms {
+ struct ipz_mrmw_handle handle;
+ u32 rkey;
+};
+
+struct ehca_av {
+ struct ib_ah ib_ah;
+ struct ehca_ud_av av;
+};
+
+struct ehca_ucontext {
+ struct ib_ucontext ib_ucontext;
+};
+
+struct ehca_module *ehca_module_new(void);
+
+int ehca_module_delete(struct ehca_module *me);
+
+int ehca_eq_ctor(struct ehca_eq *eq);
+
+int ehca_eq_dtor(struct ehca_eq *eq);
+
+struct ehca_shca *ehca_shca_new(void);
+
+int ehca_shca_delete(struct ehca_shca *me);
+
+struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
+
+int ehca_init_pd_cache(void);
+void ehca_cleanup_pd_cache(void);
+int ehca_init_cq_cache(void);
+void ehca_cleanup_cq_cache(void);
+int ehca_init_qp_cache(void);
+void ehca_cleanup_qp_cache(void);
+int ehca_init_av_cache(void);
+void ehca_cleanup_av_cache(void);
+int ehca_init_mrmw_cache(void);
+void ehca_cleanup_mrmw_cache(void);
+
+extern spinlock_t ehca_qp_idr_lock;
+extern spinlock_t ehca_cq_idr_lock;
+extern struct idr ehca_qp_idr;
+extern struct idr ehca_cq_idr;
+
+extern int ehca_static_rate;
+extern int ehca_port_act_time;
+extern int ehca_use_hp_mr;
+
+struct ipzu_queue_resp {
+ u64 queue; /* points to first queue entry */
+ u32 qe_size; /* queue entry size */
+ u32 act_nr_of_sg;
+ u32 queue_length; /* queue length allocated in bytes */
+ u32 pagesize;
+ u32 toggle_state;
+ u32 dummy; /* padding for 8 byte alignment */
+};
+
+struct ehca_create_cq_resp {
+ u32 cq_number;
+ u32 token;
+ struct ipzu_queue_resp ipz_queue;
+ struct h_galpas galpas;
+};
+
+struct ehca_create_qp_resp {
+ u32 qp_num;
+ u32 token;
+ u32 qp_type;
+ u32 qkey;
+ /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
+ u32 real_qp_num;
+ u32 dummy; /* padding for 8 byte alignment */
+ struct ipzu_queue_resp ipz_squeue;
+ struct ipzu_queue_resp ipz_rqueue;
+ struct h_galpas galpas;
+};
+
+struct ehca_alloc_cq_parms {
+ u32 nr_cqe;
+ u32 act_nr_of_entries;
+ u32 act_pages;
+ struct ipz_eq_handle eq_handle;
+};
+
+struct ehca_alloc_qp_parms {
+ int servicetype;
+ int sigtype;
+ int daqp_ctrl;
+ int max_send_sge;
+ int max_recv_sge;
+ int ud_av_l_key_ctl;
+
+ u16 act_nr_send_wqes;
+ u16 act_nr_recv_wqes;
+ u8 act_nr_recv_sges;
+ u8 act_nr_send_sges;
+
+ u32 nr_rq_pages;
+ u32 nr_sq_pages;
+
+ struct ipz_eq_handle ipz_eq_handle;
+ struct ipz_pd pd;
+};
+
+int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
+int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
+struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
new file mode 100644
index 00000000000..5665f213b81
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -0,0 +1,236 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * pSeries interface definitions
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EHCA_CLASSES_PSERIES_H__
+#define __EHCA_CLASSES_PSERIES_H__
+
+#include "hcp_phyp.h"
+#include "ipz_pt_fn.h"
+
+
+struct ehca_pfqp {
+ struct ipz_qpt sqpt;
+ struct ipz_qpt rqpt;
+};
+
+struct ehca_pfcq {
+ struct ipz_qpt qpt;
+ u32 cqnr;
+};
+
+struct ehca_pfeq {
+ struct ipz_qpt qpt;
+ struct h_galpa galpa;
+ u32 eqnr;
+};
+
+struct ipz_adapter_handle {
+ u64 handle;
+};
+
+struct ipz_cq_handle {
+ u64 handle;
+};
+
+struct ipz_eq_handle {
+ u64 handle;
+};
+
+struct ipz_qp_handle {
+ u64 handle;
+};
+struct ipz_mrmw_handle {
+ u64 handle;
+};
+
+struct ipz_pd {
+ u32 value;
+};
+
+struct hcp_modify_qp_control_block {
+ u32 qkey; /* 00 */
+ u32 rdd; /* reliable datagram domain */
+ u32 send_psn; /* 02 */
+ u32 receive_psn; /* 03 */
+ u32 prim_phys_port; /* 04 */
+ u32 alt_phys_port; /* 05 */
+ u32 prim_p_key_idx; /* 06 */
+ u32 alt_p_key_idx; /* 07 */
+ u32 rdma_atomic_ctrl; /* 08 */
+ u32 qp_state; /* 09 */
+ u32 reserved_10; /* 10 */
+ u32 rdma_nr_atomic_resp_res; /* 11 */
+ u32 path_migration_state; /* 12 */
+ u32 rdma_atomic_outst_dest_qp; /* 13 */
+ u32 dest_qp_nr; /* 14 */
+ u32 min_rnr_nak_timer_field; /* 15 */
+ u32 service_level; /* 16 */
+ u32 send_grh_flag; /* 17 */
+ u32 retry_count; /* 18 */
+ u32 timeout; /* 19 */
+ u32 path_mtu; /* 20 */
+ u32 max_static_rate; /* 21 */
+ u32 dlid; /* 22 */
+ u32 rnr_retry_count; /* 23 */
+ u32 source_path_bits; /* 24 */
+ u32 traffic_class; /* 25 */
+ u32 hop_limit; /* 26 */
+ u32 source_gid_idx; /* 27 */
+ u32 flow_label; /* 28 */
+ u32 reserved_29; /* 29 */
+ union { /* 30 */
+ u64 dw[2];
+ u8 byte[16];
+ } dest_gid;
+ u32 service_level_al; /* 34 */
+ u32 send_grh_flag_al; /* 35 */
+ u32 retry_count_al; /* 36 */
+ u32 timeout_al; /* 37 */
+ u32 max_static_rate_al; /* 38 */
+ u32 dlid_al; /* 39 */
+ u32 rnr_retry_count_al; /* 40 */
+ u32 source_path_bits_al; /* 41 */
+ u32 traffic_class_al; /* 42 */
+ u32 hop_limit_al; /* 43 */
+ u32 source_gid_idx_al; /* 44 */
+ u32 flow_label_al; /* 45 */
+ u32 reserved_46; /* 46 */
+ u32 reserved_47; /* 47 */
+ union { /* 48 */
+ u64 dw[2];
+ u8 byte[16];
+ } dest_gid_al;
+ u32 max_nr_outst_send_wr; /* 52 */
+ u32 max_nr_outst_recv_wr; /* 53 */
+ u32 disable_ete_credit_check; /* 54 */
+ u32 qp_number; /* 55 */
+ u64 send_queue_handle; /* 56 */
+ u64 recv_queue_handle; /* 58 */
+ u32 actual_nr_sges_in_sq_wqe; /* 60 */
+ u32 actual_nr_sges_in_rq_wqe; /* 61 */
+ u32 qp_enable; /* 62 */
+ u32 curr_srq_limit; /* 63 */
+ u64 qp_aff_asyn_ev_log_reg; /* 64 */
+ u64 shared_rq_hndl; /* 66 */
+ u64 trigg_doorbell_qp_hndl; /* 68 */
+ u32 reserved_70_127[58]; /* 70 */
+};
+
+#define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0)
+#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2)
+#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3)
+#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4)
+#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5)
+#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6)
+#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7)
+#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8)
+#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9)
+#define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11)
+#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12)
+#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13)
+#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14)
+#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15)
+#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16)
+#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17)
+#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18)
+#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19)
+#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20)
+#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21)
+#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22)
+#define MQPCB_DLID EHCA_BMASK_IBM(16,31)
+#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23)
+#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31)
+#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24)
+#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31)
+#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25)
+#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26)
+#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27)
+#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28)
+#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31)
+#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30)
+#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31)
+#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31)
+#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32)
+#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31)
+#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33)
+#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
+#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34)
+#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31)
+#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35)
+#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36)
+#define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31)
+#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37)
+#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
+#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38)
+#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31)
+#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39)
+#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40)
+#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41)
+#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31)
+#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42)
+#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31)
+#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44)
+#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45)
+#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31)
+#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46)
+#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31)
+#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47)
+#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31)
+#define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31)
+#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48)
+#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31)
+#define MQPCB_MASK_CURR_SQR_LIMIT EHCA_BMASK_IBM(49,49)
+#define MQPCB_CURR_SQR_LIMIT EHCA_BMASK_IBM(15,31)
+#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50)
+#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51)
+
+#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
new file mode 100644
index 00000000000..458fe19648a
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -0,0 +1,427 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Completion queue handling
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Khadija Souissi <souissi@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_iverbs.h"
+#include "ehca_classes.h"
+#include "ehca_irq.h"
+#include "hcp_if.h"
+
+static struct kmem_cache *cq_cache;
+
+int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
+{
+ unsigned int qp_num = qp->real_qp_num;
+ unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
+ unsigned long spl_flags;
+
+ spin_lock_irqsave(&cq->spinlock, spl_flags);
+ hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
+ spin_unlock_irqrestore(&cq->spinlock, spl_flags);
+
+ ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
+ cq->cq_number, qp_num);
+
+ return 0;
+}
+
+int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
+{
+ int ret = -EINVAL;
+ unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
+ struct hlist_node *iter;
+ struct ehca_qp *qp;
+ unsigned long spl_flags;
+
+ spin_lock_irqsave(&cq->spinlock, spl_flags);
+ hlist_for_each(iter, &cq->qp_hashtab[key]) {
+ qp = hlist_entry(iter, struct ehca_qp, list_entries);
+ if (qp->real_qp_num == real_qp_num) {
+ hlist_del(iter);
+ ehca_dbg(cq->ib_cq.device,
+ "removed qp from cq .cq_num=%x real_qp_num=%x",
+ cq->cq_number, real_qp_num);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cq->spinlock, spl_flags);
+ if (ret)
+ ehca_err(cq->ib_cq.device,
+ "qp not found cq_num=%x real_qp_num=%x",
+ cq->cq_number, real_qp_num);
+
+ return ret;
+}
+
+struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
+{
+ struct ehca_qp *ret = NULL;
+ unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
+ struct hlist_node *iter;
+ struct ehca_qp *qp;
+ hlist_for_each(iter, &cq->qp_hashtab[key]) {
+ qp = hlist_entry(iter, struct ehca_qp, list_entries);
+ if (qp->real_qp_num == real_qp_num) {
+ ret = qp;
+ break;
+ }
+ }
+ return ret;
+}
+
+struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ static const u32 additional_cqe = 20;
+ struct ib_cq *cq;
+ struct ehca_cq *my_cq;
+ struct ehca_shca *shca =
+ container_of(device, struct ehca_shca, ib_device);
+ struct ipz_adapter_handle adapter_handle;
+ struct ehca_alloc_cq_parms param; /* h_call's out parameters */
+ struct h_galpa gal;
+ void *vpage;
+ u32 counter;
+ u64 rpage, cqx_fec, h_ret;
+ int ipz_rc, ret, i;
+ unsigned long flags;
+
+ if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
+ return ERR_PTR(-EINVAL);
+
+ my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
+ if (!my_cq) {
+ ehca_err(device, "Out of memory for ehca_cq struct device=%p",
+ device);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(my_cq, 0, sizeof(struct ehca_cq));
+ memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
+
+ spin_lock_init(&my_cq->spinlock);
+ spin_lock_init(&my_cq->cb_lock);
+ spin_lock_init(&my_cq->task_lock);
+ my_cq->ownpid = current->tgid;
+
+ cq = &my_cq->ib_cq;
+
+ adapter_handle = shca->ipz_hca_handle;
+ param.eq_handle = shca->eq.ipz_eq_handle;
+
+ do {
+ if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
+ cq = ERR_PTR(-ENOMEM);
+ ehca_err(device, "Can't reserve idr nr. device=%p",
+ device);
+ goto create_cq_exit1;
+ }
+
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+ } while (ret == -EAGAIN);
+
+ if (ret) {
+ cq = ERR_PTR(-ENOMEM);
+ ehca_err(device, "Can't allocate new idr entry. device=%p",
+ device);
+ goto create_cq_exit1;
+ }
+
+ /*
+ * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
+ * for receiving errors CQEs.
+ */
+ param.nr_cqe = cqe + additional_cqe;
+ h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
+
+ if (h_ret != H_SUCCESS) {
+ ehca_err(device, "hipz_h_alloc_resource_cq() failed "
+ "h_ret=%lx device=%p", h_ret, device);
+ cq = ERR_PTR(ehca2ib_return_code(h_ret));
+ goto create_cq_exit2;
+ }
+
+ ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages,
+ EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0);
+ if (!ipz_rc) {
+ ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p",
+ ipz_rc, device);
+ cq = ERR_PTR(-EINVAL);
+ goto create_cq_exit3;
+ }
+
+ for (counter = 0; counter < param.act_pages; counter++) {
+ vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
+ if (!vpage) {
+ ehca_err(device, "ipz_qpageit_get_inc() "
+ "returns NULL device=%p", device);
+ cq = ERR_PTR(-EAGAIN);
+ goto create_cq_exit4;
+ }
+ rpage = virt_to_abs(vpage);
+
+ h_ret = hipz_h_register_rpage_cq(adapter_handle,
+ my_cq->ipz_cq_handle,
+ &my_cq->pf,
+ 0,
+ 0,
+ rpage,
+ 1,
+ my_cq->galpas.
+ kernel);
+
+ if (h_ret < H_SUCCESS) {
+ ehca_err(device, "hipz_h_register_rpage_cq() failed "
+ "ehca_cq=%p cq_num=%x h_ret=%lx counter=%i "
+ "act_pages=%i", my_cq, my_cq->cq_number,
+ h_ret, counter, param.act_pages);
+ cq = ERR_PTR(-EINVAL);
+ goto create_cq_exit4;
+ }
+
+ if (counter == (param.act_pages - 1)) {
+ vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
+ if ((h_ret != H_SUCCESS) || vpage) {
+ ehca_err(device, "Registration of pages not "
+ "complete ehca_cq=%p cq_num=%x "
+ "h_ret=%lx", my_cq, my_cq->cq_number,
+ h_ret);
+ cq = ERR_PTR(-EAGAIN);
+ goto create_cq_exit4;
+ }
+ } else {
+ if (h_ret != H_PAGE_REGISTERED) {
+ ehca_err(device, "Registration of page failed "
+ "ehca_cq=%p cq_num=%x h_ret=%lx"
+ "counter=%i act_pages=%i",
+ my_cq, my_cq->cq_number,
+ h_ret, counter, param.act_pages);
+ cq = ERR_PTR(-ENOMEM);
+ goto create_cq_exit4;
+ }
+ }
+ }
+
+ ipz_qeit_reset(&my_cq->ipz_queue);
+
+ gal = my_cq->galpas.kernel;
+ cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
+ ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
+ my_cq, my_cq->cq_number, cqx_fec);
+
+ my_cq->ib_cq.cqe = my_cq->nr_of_entries =
+ param.act_nr_of_entries - additional_cqe;
+ my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
+
+ for (i = 0; i < QP_HASHTAB_LEN; i++)
+ INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
+
+ if (context) {
+ struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
+ struct ehca_create_cq_resp resp;
+ struct vm_area_struct *vma;
+ memset(&resp, 0, sizeof(resp));
+ resp.cq_number = my_cq->cq_number;
+ resp.token = my_cq->token;
+ resp.ipz_queue.qe_size = ipz_queue->qe_size;
+ resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
+ resp.ipz_queue.queue_length = ipz_queue->queue_length;
+ resp.ipz_queue.pagesize = ipz_queue->pagesize;
+ resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
+ ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
+ ipz_queue->queue_length,
+ (void**)&resp.ipz_queue.queue,
+ &vma);
+ if (ret) {
+ ehca_err(device, "Could not mmap queue pages");
+ cq = ERR_PTR(ret);
+ goto create_cq_exit4;
+ }
+ my_cq->uspace_queue = resp.ipz_queue.queue;
+ resp.galpas = my_cq->galpas;
+ ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
+ (void**)&resp.galpas.kernel.fw_handle,
+ &vma);
+ if (ret) {
+ ehca_err(device, "Could not mmap fw_handle");
+ cq = ERR_PTR(ret);
+ goto create_cq_exit5;
+ }
+ my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ ehca_err(device, "Copy to udata failed.");
+ goto create_cq_exit6;
+ }
+ }
+
+ return cq;
+
+create_cq_exit6:
+ ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+
+create_cq_exit5:
+ ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
+
+create_cq_exit4:
+ ipz_queue_dtor(&my_cq->ipz_queue);
+
+create_cq_exit3:
+ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
+ if (h_ret != H_SUCCESS)
+ ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
+ "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
+
+create_cq_exit2:
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ idr_remove(&ehca_cq_idr, my_cq->token);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+create_cq_exit1:
+ kmem_cache_free(cq_cache, my_cq);
+
+ return cq;
+}
+
+int ehca_destroy_cq(struct ib_cq *cq)
+{
+ u64 h_ret;
+ int ret;
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ int cq_num = my_cq->cq_number;
+ struct ib_device *device = cq->device;
+ struct ehca_shca *shca = container_of(device, struct ehca_shca,
+ ib_device);
+ struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
+ u32 cur_pid = current->tgid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ while (my_cq->nr_callbacks)
+ yield();
+
+ idr_remove(&ehca_cq_idr, my_cq->token);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+ if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
+ ehca_err(device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_cq->ownpid);
+ return -EINVAL;
+ }
+
+ /* un-mmap if vma alloc */
+ if (my_cq->uspace_queue ) {
+ ret = ehca_munmap(my_cq->uspace_queue,
+ my_cq->ipz_queue.queue_length);
+ if (ret)
+ ehca_err(device, "Could not munmap queue ehca_cq=%p "
+ "cq_num=%x", my_cq, cq_num);
+ ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
+ if (ret)
+ ehca_err(device, "Could not munmap fwh ehca_cq=%p "
+ "cq_num=%x", my_cq, cq_num);
+ }
+
+ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
+ if (h_ret == H_R_STATE) {
+ /* cq in err: read err data and destroy it forcibly */
+ ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
+ "state. Try to delete it forcibly.",
+ my_cq, cq_num, my_cq->ipz_cq_handle.handle);
+ ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
+ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
+ if (h_ret == H_SUCCESS)
+ ehca_dbg(device, "cq_num=%x deleted successfully.",
+ cq_num);
+ }
+ if (h_ret != H_SUCCESS) {
+ ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx "
+ "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
+ return ehca2ib_return_code(h_ret);
+ }
+ ipz_queue_dtor(&my_cq->ipz_queue);
+ kmem_cache_free(cq_cache, my_cq);
+
+ return 0;
+}
+
+int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
+{
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ u32 cur_pid = current->tgid;
+
+ if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
+ ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_cq->ownpid);
+ return -EINVAL;
+ }
+
+ /* TODO: proper resize needs to be done */
+ ehca_err(cq->device, "not implemented yet");
+
+ return -EFAULT;
+}
+
+int ehca_init_cq_cache(void)
+{
+ cq_cache = kmem_cache_create("ehca_cache_cq",
+ sizeof(struct ehca_cq), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!cq_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_cq_cache(void)
+{
+ if (cq_cache)
+ kmem_cache_destroy(cq_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
new file mode 100644
index 00000000000..5281dec66f1
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -0,0 +1,185 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Event queue handling
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Khadija Souissi <souissi@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_classes.h"
+#include "ehca_irq.h"
+#include "ehca_iverbs.h"
+#include "ehca_qes.h"
+#include "hcp_if.h"
+#include "ipz_pt_fn.h"
+
+int ehca_create_eq(struct ehca_shca *shca,
+ struct ehca_eq *eq,
+ const enum ehca_eq_type type, const u32 length)
+{
+ u64 ret;
+ u32 nr_pages;
+ u32 i;
+ void *vpage;
+ struct ib_device *ib_dev = &shca->ib_device;
+
+ spin_lock_init(&eq->spinlock);
+ eq->is_initialized = 0;
+
+ if (type != EHCA_EQ && type != EHCA_NEQ) {
+ ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
+ return -EINVAL;
+ }
+ if (!length) {
+ ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
+ return -EINVAL;
+ }
+
+ ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
+ &eq->pf,
+ type,
+ length,
+ &eq->ipz_eq_handle,
+ &eq->length,
+ &nr_pages, &eq->ist);
+
+ if (ret != H_SUCCESS) {
+ ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
+ return -EINVAL;
+ }
+
+ ret = ipz_queue_ctor(&eq->ipz_queue, nr_pages,
+ EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0);
+ if (!ret) {
+ ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
+ goto create_eq_exit1;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ u64 rpage;
+
+ if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) {
+ ret = H_RESOURCE;
+ goto create_eq_exit2;
+ }
+
+ rpage = virt_to_abs(vpage);
+ ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
+ eq->ipz_eq_handle,
+ &eq->pf,
+ 0, 0, rpage, 1);
+
+ if (i == (nr_pages - 1)) {
+ /* last page */
+ vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
+ if (ret != H_SUCCESS || vpage)
+ goto create_eq_exit2;
+ } else {
+ if (ret != H_PAGE_REGISTERED || !vpage)
+ goto create_eq_exit2;
+ }
+ }
+
+ ipz_qeit_reset(&eq->ipz_queue);
+
+ /* register interrupt handlers and initialize work queues */
+ if (type == EHCA_EQ) {
+ ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq,
+ SA_INTERRUPT, "ehca_eq",
+ (void *)shca);
+ if (ret < 0)
+ ehca_err(ib_dev, "Can't map interrupt handler.");
+
+ tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
+ } else if (type == EHCA_NEQ) {
+ ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq,
+ SA_INTERRUPT, "ehca_neq",
+ (void *)shca);
+ if (ret < 0)
+ ehca_err(ib_dev, "Can't map interrupt handler.");
+
+ tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
+ }
+
+ eq->is_initialized = 1;
+
+ return 0;
+
+create_eq_exit2:
+ ipz_queue_dtor(&eq->ipz_queue);
+
+create_eq_exit1:
+ hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+
+ return -EINVAL;
+}
+
+void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
+{
+ unsigned long flags;
+ void *eqe;
+
+ spin_lock_irqsave(&eq->spinlock, flags);
+ eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
+ spin_unlock_irqrestore(&eq->spinlock, flags);
+
+ return eqe;
+}
+
+int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
+{
+ unsigned long flags;
+ u64 h_ret;
+
+ spin_lock_irqsave(&eq->spinlock, flags);
+ ibmebus_free_irq(NULL, eq->ist, (void *)shca);
+
+ h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+
+ spin_unlock_irqrestore(&eq->spinlock, flags);
+
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "Can't free EQ resources.");
+ return -EINVAL;
+ }
+ ipz_queue_dtor(&eq->ipz_queue);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
new file mode 100644
index 00000000000..5eae6ac4842
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -0,0 +1,241 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * HCA query functions
+ *
+ * Authors: Heiko J Schick <schickhj@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_tools.h"
+#include "hcp_if.h"
+
+int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
+{
+ int ret = 0;
+ struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+ ib_device);
+ struct hipz_query_hca *rblock;
+
+ rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!rblock) {
+ ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+ return -ENOMEM;
+ }
+
+ if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "Can't query device properties");
+ ret = -EINVAL;
+ goto query_device1;
+ }
+
+ memset(props, 0, sizeof(struct ib_device_attr));
+ props->fw_ver = rblock->hw_ver;
+ props->max_mr_size = rblock->max_mr_size;
+ props->vendor_id = rblock->vendor_id >> 8;
+ props->vendor_part_id = rblock->vendor_part_id >> 16;
+ props->hw_ver = rblock->hw_ver;
+ props->max_qp = min_t(int, rblock->max_qp, INT_MAX);
+ props->max_qp_wr = min_t(int, rblock->max_wqes_wq, INT_MAX);
+ props->max_sge = min_t(int, rblock->max_sge, INT_MAX);
+ props->max_sge_rd = min_t(int, rblock->max_sge_rd, INT_MAX);
+ props->max_cq = min_t(int, rblock->max_cq, INT_MAX);
+ props->max_cqe = min_t(int, rblock->max_cqe, INT_MAX);
+ props->max_mr = min_t(int, rblock->max_mr, INT_MAX);
+ props->max_mw = min_t(int, rblock->max_mw, INT_MAX);
+ props->max_pd = min_t(int, rblock->max_pd, INT_MAX);
+ props->max_ah = min_t(int, rblock->max_ah, INT_MAX);
+ props->max_fmr = min_t(int, rblock->max_mr, INT_MAX);
+ props->max_srq = 0;
+ props->max_srq_wr = 0;
+ props->max_srq_sge = 0;
+ props->max_pkeys = 16;
+ props->local_ca_ack_delay
+ = rblock->local_ca_ack_delay;
+ props->max_raw_ipv6_qp
+ = min_t(int, rblock->max_raw_ipv6_qp, INT_MAX);
+ props->max_raw_ethy_qp
+ = min_t(int, rblock->max_raw_ethy_qp, INT_MAX);
+ props->max_mcast_grp
+ = min_t(int, rblock->max_mcast_grp, INT_MAX);
+ props->max_mcast_qp_attach
+ = min_t(int, rblock->max_mcast_qp_attach, INT_MAX);
+ props->max_total_mcast_qp_attach
+ = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX);
+
+query_device1:
+ kfree(rblock);
+
+ return ret;
+}
+
+int ehca_query_port(struct ib_device *ibdev,
+ u8 port, struct ib_port_attr *props)
+{
+ int ret = 0;
+ struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+ ib_device);
+ struct hipz_query_port *rblock;
+
+ rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!rblock) {
+ ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+ return -ENOMEM;
+ }
+
+ if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "Can't query port properties");
+ ret = -EINVAL;
+ goto query_port1;
+ }
+
+ memset(props, 0, sizeof(struct ib_port_attr));
+ props->state = rblock->state;
+
+ switch (rblock->max_mtu) {
+ case 0x1:
+ props->active_mtu = props->max_mtu = IB_MTU_256;
+ break;
+ case 0x2:
+ props->active_mtu = props->max_mtu = IB_MTU_512;
+ break;
+ case 0x3:
+ props->active_mtu = props->max_mtu = IB_MTU_1024;
+ break;
+ case 0x4:
+ props->active_mtu = props->max_mtu = IB_MTU_2048;
+ break;
+ case 0x5:
+ props->active_mtu = props->max_mtu = IB_MTU_4096;
+ break;
+ default:
+ ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
+ rblock->max_mtu);
+ break;
+ }
+
+ props->gid_tbl_len = rblock->gid_tbl_len;
+ props->max_msg_sz = rblock->max_msg_sz;
+ props->bad_pkey_cntr = rblock->bad_pkey_cntr;
+ props->qkey_viol_cntr = rblock->qkey_viol_cntr;
+ props->pkey_tbl_len = rblock->pkey_tbl_len;
+ props->lid = rblock->lid;
+ props->sm_lid = rblock->sm_lid;
+ props->lmc = rblock->lmc;
+ props->sm_sl = rblock->sm_sl;
+ props->subnet_timeout = rblock->subnet_timeout;
+ props->init_type_reply = rblock->init_type_reply;
+
+ props->active_width = IB_WIDTH_12X;
+ props->active_speed = 0x1;
+
+query_port1:
+ kfree(rblock);
+
+ return ret;
+}
+
+int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+ int ret = 0;
+ struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
+ struct hipz_query_port *rblock;
+
+ if (index > 16) {
+ ehca_err(&shca->ib_device, "Invalid index: %x.", index);
+ return -EINVAL;
+ }
+
+ rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!rblock) {
+ ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+ return -ENOMEM;
+ }
+
+ if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "Can't query port properties");
+ ret = -EINVAL;
+ goto query_pkey1;
+ }
+
+ memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
+
+query_pkey1:
+ kfree(rblock);
+
+ return ret;
+}
+
+int ehca_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ int ret = 0;
+ struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
+ ib_device);
+ struct hipz_query_port *rblock;
+
+ if (index > 255) {
+ ehca_err(&shca->ib_device, "Invalid index: %x.", index);
+ return -EINVAL;
+ }
+
+ rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!rblock) {
+ ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+ return -ENOMEM;
+ }
+
+ if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "Can't query port properties");
+ ret = -EINVAL;
+ goto query_gid1;
+ }
+
+ memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
+ memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
+
+query_gid1:
+ kfree(rblock);
+
+ return ret;
+}
+
+int ehca_modify_port(struct ib_device *ibdev,
+ u8 port, int port_modify_mask,
+ struct ib_port_modify *props)
+{
+ /* Not implemented yet */
+ return -EFAULT;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
new file mode 100644
index 00000000000..2a65b5be197
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -0,0 +1,762 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Functions for EQs, NEQs and interrupts
+ *
+ * Authors: Heiko J Schick <schickhj@de.ibm.com>
+ * Khadija Souissi <souissi@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_classes.h"
+#include "ehca_irq.h"
+#include "ehca_iverbs.h"
+#include "ehca_tools.h"
+#include "hcp_if.h"
+#include "hipz_fns.h"
+
+#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
+#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
+#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7)
+#define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31)
+#define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31)
+#define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63)
+#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63)
+
+#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
+#define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7)
+#define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15)
+#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
+
+#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63)
+#define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7)
+
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+
+static void queue_comp_task(struct ehca_cq *__cq);
+
+static struct ehca_comp_pool* pool;
+static struct notifier_block comp_pool_callback_nb;
+
+#endif
+
+static inline void comp_event_callback(struct ehca_cq *cq)
+{
+ if (!cq->ib_cq.comp_handler)
+ return;
+
+ spin_lock(&cq->cb_lock);
+ cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
+ spin_unlock(&cq->cb_lock);
+
+ return;
+}
+
+static void print_error_data(struct ehca_shca * shca, void* data,
+ u64* rblock, int length)
+{
+ u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
+ u64 resource = rblock[1];
+
+ switch (type) {
+ case 0x1: /* Queue Pair */
+ {
+ struct ehca_qp *qp = (struct ehca_qp*)data;
+
+ /* only print error data if AER is set */
+ if (rblock[6] == 0)
+ return;
+
+ ehca_err(&shca->ib_device,
+ "QP 0x%x (resource=%lx) has errors.",
+ qp->ib_qp.qp_num, resource);
+ break;
+ }
+ case 0x4: /* Completion Queue */
+ {
+ struct ehca_cq *cq = (struct ehca_cq*)data;
+
+ ehca_err(&shca->ib_device,
+ "CQ 0x%x (resource=%lx) has errors.",
+ cq->cq_number, resource);
+ break;
+ }
+ default:
+ ehca_err(&shca->ib_device,
+ "Unknown errror type: %lx on %s.",
+ type, shca->ib_device.name);
+ break;
+ }
+
+ ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
+ ehca_err(&shca->ib_device, "EHCA ----- error data begin "
+ "---------------------------------------------------");
+ ehca_dmp(rblock, length, "resource=%lx", resource);
+ ehca_err(&shca->ib_device, "EHCA ----- error data end "
+ "----------------------------------------------------");
+
+ return;
+}
+
+int ehca_error_data(struct ehca_shca *shca, void *data,
+ u64 resource)
+{
+
+ unsigned long ret;
+ u64 *rblock;
+ unsigned long block_count;
+
+ rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!rblock) {
+ ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
+ ret = -ENOMEM;
+ goto error_data1;
+ }
+
+ ret = hipz_h_error_data(shca->ipz_hca_handle,
+ resource,
+ rblock,
+ &block_count);
+
+ if (ret == H_R_STATE) {
+ ehca_err(&shca->ib_device,
+ "No error data is available: %lx.", resource);
+ }
+ else if (ret == H_SUCCESS) {
+ int length;
+
+ length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
+
+ if (length > PAGE_SIZE)
+ length = PAGE_SIZE;
+
+ print_error_data(shca, data, rblock, length);
+ }
+ else {
+ ehca_err(&shca->ib_device,
+ "Error data could not be fetched: %lx", resource);
+ }
+
+ kfree(rblock);
+
+error_data1:
+ return ret;
+
+}
+
+static void qp_event_callback(struct ehca_shca *shca,
+ u64 eqe,
+ enum ib_event_type event_type)
+{
+ struct ib_event event;
+ struct ehca_qp *qp;
+ unsigned long flags;
+ u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
+
+ spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ qp = idr_find(&ehca_qp_idr, token);
+ spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+
+ if (!qp)
+ return;
+
+ ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
+
+ if (!qp->ib_qp.event_handler)
+ return;
+
+ event.device = &shca->ib_device;
+ event.event = event_type;
+ event.element.qp = &qp->ib_qp;
+
+ qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+
+ return;
+}
+
+static void cq_event_callback(struct ehca_shca *shca,
+ u64 eqe)
+{
+ struct ehca_cq *cq;
+ unsigned long flags;
+ u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
+
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ cq = idr_find(&ehca_cq_idr, token);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+ if (!cq)
+ return;
+
+ ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
+
+ return;
+}
+
+static void parse_identifier(struct ehca_shca *shca, u64 eqe)
+{
+ u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
+
+ switch (identifier) {
+ case 0x02: /* path migrated */
+ qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG);
+ break;
+ case 0x03: /* communication established */
+ qp_event_callback(shca, eqe, IB_EVENT_COMM_EST);
+ break;
+ case 0x04: /* send queue drained */
+ qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED);
+ break;
+ case 0x05: /* QP error */
+ case 0x06: /* QP error */
+ qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL);
+ break;
+ case 0x07: /* CQ error */
+ case 0x08: /* CQ error */
+ cq_event_callback(shca, eqe);
+ break;
+ case 0x09: /* MRMWPTE error */
+ ehca_err(&shca->ib_device, "MRMWPTE error.");
+ break;
+ case 0x0A: /* port event */
+ ehca_err(&shca->ib_device, "Port event.");
+ break;
+ case 0x0B: /* MR access error */
+ ehca_err(&shca->ib_device, "MR access error.");
+ break;
+ case 0x0C: /* EQ error */
+ ehca_err(&shca->ib_device, "EQ error.");
+ break;
+ case 0x0D: /* P/Q_Key mismatch */
+ ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
+ break;
+ case 0x10: /* sampling complete */
+ ehca_err(&shca->ib_device, "Sampling complete.");
+ break;
+ case 0x11: /* unaffiliated access error */
+ ehca_err(&shca->ib_device, "Unaffiliated access error.");
+ break;
+ case 0x12: /* path migrating error */
+ ehca_err(&shca->ib_device, "Path migration error.");
+ break;
+ case 0x13: /* interface trace stopped */
+ ehca_err(&shca->ib_device, "Interface trace stopped.");
+ break;
+ case 0x14: /* first error capture info available */
+ default:
+ ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
+ identifier, shca->ib_device.name);
+ break;
+ }
+
+ return;
+}
+
+static void parse_ec(struct ehca_shca *shca, u64 eqe)
+{
+ struct ib_event event;
+ u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
+ u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
+
+ switch (ec) {
+ case 0x30: /* port availability change */
+ if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
+ ehca_info(&shca->ib_device,
+ "port %x is active.", port);
+ event.device = &shca->ib_device;
+ event.event = IB_EVENT_PORT_ACTIVE;
+ event.element.port_num = port;
+ shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
+ ib_dispatch_event(&event);
+ } else {
+ ehca_info(&shca->ib_device,
+ "port %x is inactive.", port);
+ event.device = &shca->ib_device;
+ event.event = IB_EVENT_PORT_ERR;
+ event.element.port_num = port;
+ shca->sport[port - 1].port_state = IB_PORT_DOWN;
+ ib_dispatch_event(&event);
+ }
+ break;
+ case 0x31:
+ /* port configuration change
+ * disruptive change is caused by
+ * LID, PKEY or SM change
+ */
+ ehca_warn(&shca->ib_device,
+ "disruptive port %x configuration change", port);
+
+ ehca_info(&shca->ib_device,
+ "port %x is inactive.", port);
+ event.device = &shca->ib_device;
+ event.event = IB_EVENT_PORT_ERR;
+ event.element.port_num = port;
+ shca->sport[port - 1].port_state = IB_PORT_DOWN;
+ ib_dispatch_event(&event);
+
+ ehca_info(&shca->ib_device,
+ "port %x is active.", port);
+ event.device = &shca->ib_device;
+ event.event = IB_EVENT_PORT_ACTIVE;
+ event.element.port_num = port;
+ shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
+ ib_dispatch_event(&event);
+ break;
+ case 0x32: /* adapter malfunction */
+ ehca_err(&shca->ib_device, "Adapter malfunction.");
+ break;
+ case 0x33: /* trace stopped */
+ ehca_err(&shca->ib_device, "Traced stopped.");
+ break;
+ default:
+ ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
+ ec, shca->ib_device.name);
+ break;
+ }
+
+ return;
+}
+
+static inline void reset_eq_pending(struct ehca_cq *cq)
+{
+ u64 CQx_EP;
+ struct h_galpa gal = cq->galpas.kernel;
+
+ hipz_galpa_store_cq(gal, cqx_ep, 0x0);
+ CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
+
+ return;
+}
+
+irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct ehca_shca *shca = (struct ehca_shca*)dev_id;
+
+ tasklet_hi_schedule(&shca->neq.interrupt_task);
+
+ return IRQ_HANDLED;
+}
+
+void ehca_tasklet_neq(unsigned long data)
+{
+ struct ehca_shca *shca = (struct ehca_shca*)data;
+ struct ehca_eqe *eqe;
+ u64 ret;
+
+ eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
+
+ while (eqe) {
+ if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
+ parse_ec(shca, eqe->entry);
+
+ eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
+ }
+
+ ret = hipz_h_reset_event(shca->ipz_hca_handle,
+ shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
+
+ if (ret != H_SUCCESS)
+ ehca_err(&shca->ib_device, "Can't clear notification events.");
+
+ return;
+}
+
+irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct ehca_shca *shca = (struct ehca_shca*)dev_id;
+
+ tasklet_hi_schedule(&shca->eq.interrupt_task);
+
+ return IRQ_HANDLED;
+}
+
+void ehca_tasklet_eq(unsigned long data)
+{
+ struct ehca_shca *shca = (struct ehca_shca*)data;
+ struct ehca_eqe *eqe;
+ int int_state;
+ int query_cnt = 0;
+
+ do {
+ eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
+
+ if ((shca->hw_level >= 2) && eqe)
+ int_state = 1;
+ else
+ int_state = 0;
+
+ while ((int_state == 1) || eqe) {
+ while (eqe) {
+ u64 eqe_value = eqe->entry;
+
+ ehca_dbg(&shca->ib_device,
+ "eqe_value=%lx", eqe_value);
+
+ /* TODO: better structure */
+ if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT,
+ eqe_value)) {
+ unsigned long flags;
+ u32 token;
+ struct ehca_cq *cq;
+
+ ehca_dbg(&shca->ib_device,
+ "... completion event");
+ token =
+ EHCA_BMASK_GET(EQE_CQ_TOKEN,
+ eqe_value);
+ spin_lock_irqsave(&ehca_cq_idr_lock,
+ flags);
+ cq = idr_find(&ehca_cq_idr, token);
+
+ if (cq == NULL) {
+ spin_unlock(&ehca_cq_idr_lock);
+ break;
+ }
+
+ reset_eq_pending(cq);
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+ queue_comp_task(cq);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock,
+ flags);
+#else
+ spin_unlock_irqrestore(&ehca_cq_idr_lock,
+ flags);
+ comp_event_callback(cq);
+#endif
+ } else {
+ ehca_dbg(&shca->ib_device,
+ "... non completion event");
+ parse_identifier(shca, eqe_value);
+ }
+ eqe =
+ (struct ehca_eqe *)ehca_poll_eq(shca,
+ &shca->eq);
+ }
+
+ if (shca->hw_level >= 2) {
+ int_state =
+ hipz_h_query_int_state(shca->ipz_hca_handle,
+ shca->eq.ist);
+ query_cnt++;
+ iosync();
+ if (query_cnt >= 100) {
+ query_cnt = 0;
+ int_state = 0;
+ }
+ }
+ eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
+
+ }
+ } while (int_state != 0);
+
+ return;
+}
+
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+
+static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
+{
+ unsigned long flags_last_cpu;
+
+ if (ehca_debug_level)
+ ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
+
+ spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu);
+ pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map);
+ if (pool->last_cpu == NR_CPUS)
+ pool->last_cpu = first_cpu(cpu_online_map);
+ spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu);
+
+ return pool->last_cpu;
+}
+
+static void __queue_comp_task(struct ehca_cq *__cq,
+ struct ehca_cpu_comp_task *cct)
+{
+ unsigned long flags_cct;
+ unsigned long flags_cq;
+
+ spin_lock_irqsave(&cct->task_lock, flags_cct);
+ spin_lock_irqsave(&__cq->task_lock, flags_cq);
+
+ if (__cq->nr_callbacks == 0) {
+ __cq->nr_callbacks++;
+ list_add_tail(&__cq->entry, &cct->cq_list);
+ cct->cq_jobs++;
+ wake_up(&cct->wait_queue);
+ }
+ else
+ __cq->nr_callbacks++;
+
+ spin_unlock_irqrestore(&__cq->task_lock, flags_cq);
+ spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+}
+
+static void queue_comp_task(struct ehca_cq *__cq)
+{
+ int cpu;
+ int cpu_id;
+ struct ehca_cpu_comp_task *cct;
+
+ cpu = get_cpu();
+ cpu_id = find_next_online_cpu(pool);
+
+ BUG_ON(!cpu_online(cpu_id));
+
+ cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
+
+ if (cct->cq_jobs > 0) {
+ cpu_id = find_next_online_cpu(pool);
+ cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
+ }
+
+ __queue_comp_task(__cq, cct);
+
+ put_cpu();
+
+ return;
+}
+
+static void run_comp_task(struct ehca_cpu_comp_task* cct)
+{
+ struct ehca_cq *cq;
+ unsigned long flags_cct;
+ unsigned long flags_cq;
+
+ spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+ while (!list_empty(&cct->cq_list)) {
+ cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
+ spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+ comp_event_callback(cq);
+ spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+ spin_lock_irqsave(&cq->task_lock, flags_cq);
+ cq->nr_callbacks--;
+ if (cq->nr_callbacks == 0) {
+ list_del_init(cct->cq_list.next);
+ cct->cq_jobs--;
+ }
+ spin_unlock_irqrestore(&cq->task_lock, flags_cq);
+
+ }
+
+ spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+
+ return;
+}
+
+static int comp_task(void *__cct)
+{
+ struct ehca_cpu_comp_task* cct = __cct;
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while(!kthread_should_stop()) {
+ add_wait_queue(&cct->wait_queue, &wait);
+
+ if (list_empty(&cct->cq_list))
+ schedule();
+ else
+ __set_current_state(TASK_RUNNING);
+
+ remove_wait_queue(&cct->wait_queue, &wait);
+
+ if (!list_empty(&cct->cq_list))
+ run_comp_task(__cct);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
+ int cpu)
+{
+ struct ehca_cpu_comp_task *cct;
+
+ cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+ spin_lock_init(&cct->task_lock);
+ INIT_LIST_HEAD(&cct->cq_list);
+ init_waitqueue_head(&cct->wait_queue);
+ cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
+
+ return cct->task;
+}
+
+static void destroy_comp_task(struct ehca_comp_pool *pool,
+ int cpu)
+{
+ struct ehca_cpu_comp_task *cct;
+ struct task_struct *task;
+ unsigned long flags_cct;
+
+ cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+
+ spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+ task = cct->task;
+ cct->task = NULL;
+ cct->cq_jobs = 0;
+
+ spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+
+ if (task)
+ kthread_stop(task);
+
+ return;
+}
+
+static void take_over_work(struct ehca_comp_pool *pool,
+ int cpu)
+{
+ struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+ LIST_HEAD(list);
+ struct ehca_cq *cq;
+ unsigned long flags_cct;
+
+ spin_lock_irqsave(&cct->task_lock, flags_cct);
+
+ list_splice_init(&cct->cq_list, &list);
+
+ while(!list_empty(&list)) {
+ cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
+
+ list_del(&cq->entry);
+ __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
+ smp_processor_id()));
+ }
+
+ spin_unlock_irqrestore(&cct->task_lock, flags_cct);
+
+}
+
+static int comp_pool_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct ehca_cpu_comp_task *cct;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
+ if(!create_comp_task(pool, cpu)) {
+ ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
+ return NOTIFY_BAD;
+ }
+ break;
+ case CPU_UP_CANCELED:
+ ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
+ cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+ kthread_bind(cct->task, any_online_cpu(cpu_online_map));
+ destroy_comp_task(pool, cpu);
+ break;
+ case CPU_ONLINE:
+ ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
+ cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
+ kthread_bind(cct->task, cpu);
+ wake_up_process(cct->task);
+ break;
+ case CPU_DOWN_PREPARE:
+ ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
+ break;
+ case CPU_DOWN_FAILED:
+ ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
+ break;
+ case CPU_DEAD:
+ ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
+ destroy_comp_task(pool, cpu);
+ take_over_work(pool, cpu);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+#endif
+
+int ehca_create_comp_pool(void)
+{
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+ int cpu;
+ struct task_struct *task;
+
+ pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
+ if (pool == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&pool->last_cpu_lock);
+ pool->last_cpu = any_online_cpu(cpu_online_map);
+
+ pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
+ if (pool->cpu_comp_tasks == NULL) {
+ kfree(pool);
+ return -EINVAL;
+ }
+
+ for_each_online_cpu(cpu) {
+ task = create_comp_task(pool, cpu);
+ if (task) {
+ kthread_bind(task, cpu);
+ wake_up_process(task);
+ }
+ }
+
+ comp_pool_callback_nb.notifier_call = comp_pool_callback;
+ comp_pool_callback_nb.priority =0;
+ register_cpu_notifier(&comp_pool_callback_nb);
+#endif
+
+ return 0;
+}
+
+void ehca_destroy_comp_pool(void)
+{
+#ifdef CONFIG_INFINIBAND_EHCA_SCALING
+ int i;
+
+ unregister_cpu_notifier(&comp_pool_callback_nb);
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_online(i))
+ destroy_comp_task(pool, i);
+ }
+#endif
+
+ return;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h
new file mode 100644
index 00000000000..85bf1fe16fe
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_irq.h
@@ -0,0 +1,77 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Function definitions and structs for EQs, NEQs and interrupts
+ *
+ * Authors: Heiko J Schick <schickhj@de.ibm.com>
+ * Khadija Souissi <souissi@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EHCA_IRQ_H
+#define __EHCA_IRQ_H
+
+
+struct ehca_shca;
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/atomic.h>
+
+int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
+
+irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs);
+void ehca_tasklet_neq(unsigned long data);
+
+irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs);
+void ehca_tasklet_eq(unsigned long data);
+
+struct ehca_cpu_comp_task {
+ wait_queue_head_t wait_queue;
+ struct list_head cq_list;
+ struct task_struct *task;
+ spinlock_t task_lock;
+ int cq_jobs;
+};
+
+struct ehca_comp_pool {
+ struct ehca_cpu_comp_task *cpu_comp_tasks;
+ int last_cpu;
+ spinlock_t last_cpu_lock;
+};
+
+int ehca_create_comp_pool(void);
+void ehca_destroy_comp_pool(void);
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
new file mode 100644
index 00000000000..319c39d47f3
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -0,0 +1,182 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Function definitions for internal functions
+ *
+ * Authors: Heiko J Schick <schickhj@de.ibm.com>
+ * Dietmar Decker <ddecker@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __EHCA_IVERBS_H__
+#define __EHCA_IVERBS_H__
+
+#include "ehca_classes.h"
+
+int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
+
+int ehca_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+
+int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
+
+int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+
+int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
+ struct ib_port_modify *props);
+
+struct ib_pd *ehca_alloc_pd(struct ib_device *device,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int ehca_dealloc_pd(struct ib_pd *pd);
+
+struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+
+int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+
+int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+
+int ehca_destroy_ah(struct ib_ah *ah);
+
+struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
+
+struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ int mr_access_flags, u64 *iova_start);
+
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
+ struct ib_umem *region,
+ int mr_access_flags, struct ib_udata *udata);
+
+int ehca_rereg_phys_mr(struct ib_mr *mr,
+ int mr_rereg_mask,
+ struct ib_pd *pd,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf, int mr_access_flags, u64 *iova_start);
+
+int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
+
+int ehca_dereg_mr(struct ib_mr *mr);
+
+struct ib_mw *ehca_alloc_mw(struct ib_pd *pd);
+
+int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind);
+
+int ehca_dealloc_mw(struct ib_mw *mw);
+
+struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
+ int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+
+int ehca_map_phys_fmr(struct ib_fmr *fmr,
+ u64 *page_list, int list_len, u64 iova);
+
+int ehca_unmap_fmr(struct list_head *fmr_list);
+
+int ehca_dealloc_fmr(struct ib_fmr *fmr);
+
+enum ehca_eq_type {
+ EHCA_EQ = 0, /* Event Queue */
+ EHCA_NEQ /* Notification Event Queue */
+};
+
+int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
+ enum ehca_eq_type type, const u32 length);
+
+int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
+
+void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
+
+
+struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int ehca_destroy_cq(struct ib_cq *cq);
+
+int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+
+int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
+
+int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
+
+int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify);
+
+struct ib_qp *ehca_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+
+int ehca_destroy_qp(struct ib_qp *qp);
+
+int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata);
+
+int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+
+int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
+ struct ib_send_wr **bad_send_wr);
+
+int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
+
+u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
+ struct ib_qp_init_attr *qp_init_attr);
+
+int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+
+int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+
+struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
+ struct ib_udata *udata);
+
+int ehca_dealloc_ucontext(struct ib_ucontext *context);
+
+int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+void ehca_poll_eqs(unsigned long data);
+
+int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
+ struct vm_area_struct **vma);
+
+int ehca_mmap_register(u64 physical,void **mapped,
+ struct vm_area_struct **vma);
+
+int ehca_munmap(unsigned long addr, size_t len);
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
new file mode 100644
index 00000000000..2380994418a
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -0,0 +1,818 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * module start stop, hca detection
+ *
+ * Authors: Heiko J Schick <schickhj@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Joachim Fenkes <fenkes@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_classes.h"
+#include "ehca_iverbs.h"
+#include "ehca_mrmw.h"
+#include "ehca_tools.h"
+#include "hcp_if.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
+MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
+MODULE_VERSION("SVNEHCA_0016");
+
+int ehca_open_aqp1 = 0;
+int ehca_debug_level = 0;
+int ehca_hw_level = 0;
+int ehca_nr_ports = 2;
+int ehca_use_hp_mr = 0;
+int ehca_port_act_time = 30;
+int ehca_poll_all_eqs = 1;
+int ehca_static_rate = -1;
+
+module_param_named(open_aqp1, ehca_open_aqp1, int, 0);
+module_param_named(debug_level, ehca_debug_level, int, 0);
+module_param_named(hw_level, ehca_hw_level, int, 0);
+module_param_named(nr_ports, ehca_nr_ports, int, 0);
+module_param_named(use_hp_mr, ehca_use_hp_mr, int, 0);
+module_param_named(port_act_time, ehca_port_act_time, int, 0);
+module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, 0);
+module_param_named(static_rate, ehca_static_rate, int, 0);
+
+MODULE_PARM_DESC(open_aqp1,
+ "AQP1 on startup (0: no (default), 1: yes)");
+MODULE_PARM_DESC(debug_level,
+ "debug level"
+ " (0: no debug traces (default), 1: with debug traces)");
+MODULE_PARM_DESC(hw_level,
+ "hardware level"
+ " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
+MODULE_PARM_DESC(nr_ports,
+ "number of connected ports (default: 2)");
+MODULE_PARM_DESC(use_hp_mr,
+ "high performance MRs (0: no (default), 1: yes)");
+MODULE_PARM_DESC(port_act_time,
+ "time to wait for port activation (default: 30 sec)");
+MODULE_PARM_DESC(poll_all_eqs,
+ "polls all event queues periodically"
+ " (0: no, 1: yes (default))");
+MODULE_PARM_DESC(static_rate,
+ "set permanent static rate (default: disabled)");
+
+spinlock_t ehca_qp_idr_lock;
+spinlock_t ehca_cq_idr_lock;
+DEFINE_IDR(ehca_qp_idr);
+DEFINE_IDR(ehca_cq_idr);
+
+static struct list_head shca_list; /* list of all registered ehcas */
+static spinlock_t shca_list_lock;
+
+static struct timer_list poll_eqs_timer;
+
+static int ehca_create_slab_caches(void)
+{
+ int ret;
+
+ ret = ehca_init_pd_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create PD SLAB cache.");
+ return ret;
+ }
+
+ ret = ehca_init_cq_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create CQ SLAB cache.");
+ goto create_slab_caches2;
+ }
+
+ ret = ehca_init_qp_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create QP SLAB cache.");
+ goto create_slab_caches3;
+ }
+
+ ret = ehca_init_av_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create AV SLAB cache.");
+ goto create_slab_caches4;
+ }
+
+ ret = ehca_init_mrmw_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create MR&MW SLAB cache.");
+ goto create_slab_caches5;
+ }
+
+ return 0;
+
+create_slab_caches5:
+ ehca_cleanup_av_cache();
+
+create_slab_caches4:
+ ehca_cleanup_qp_cache();
+
+create_slab_caches3:
+ ehca_cleanup_cq_cache();
+
+create_slab_caches2:
+ ehca_cleanup_pd_cache();
+
+ return ret;
+}
+
+static void ehca_destroy_slab_caches(void)
+{
+ ehca_cleanup_mrmw_cache();
+ ehca_cleanup_av_cache();
+ ehca_cleanup_qp_cache();
+ ehca_cleanup_cq_cache();
+ ehca_cleanup_pd_cache();
+}
+
+#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
+#define EHCA_REVID EHCA_BMASK_IBM(40,63)
+
+int ehca_sense_attributes(struct ehca_shca *shca)
+{
+ int ret = 0;
+ u64 h_ret;
+ struct hipz_query_hca *rblock;
+
+ rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!rblock) {
+ ehca_gen_err("Cannot allocate rblock memory.");
+ return -ENOMEM;
+ }
+
+ h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
+ if (h_ret != H_SUCCESS) {
+ ehca_gen_err("Cannot query device properties. h_ret=%lx",
+ h_ret);
+ ret = -EPERM;
+ goto num_ports1;
+ }
+
+ if (ehca_nr_ports == 1)
+ shca->num_ports = 1;
+ else
+ shca->num_ports = (u8)rblock->num_ports;
+
+ ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
+
+ if (ehca_hw_level == 0) {
+ u32 hcaaver;
+ u32 revid;
+
+ hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
+ revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
+
+ ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
+
+ if ((hcaaver == 1) && (revid == 0))
+ shca->hw_level = 0;
+ else if ((hcaaver == 1) && (revid == 1))
+ shca->hw_level = 1;
+ else if ((hcaaver == 1) && (revid == 2))
+ shca->hw_level = 2;
+ }
+ ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
+
+ shca->sport[0].rate = IB_RATE_30_GBPS;
+ shca->sport[1].rate = IB_RATE_30_GBPS;
+
+num_ports1:
+ kfree(rblock);
+ return ret;
+}
+
+static int init_node_guid(struct ehca_shca *shca)
+{
+ int ret = 0;
+ struct hipz_query_hca *rblock;
+
+ rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!rblock) {
+ ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+ return -ENOMEM;
+ }
+
+ if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "Can't query device properties");
+ ret = -EINVAL;
+ goto init_node_guid1;
+ }
+
+ memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
+
+init_node_guid1:
+ kfree(rblock);
+ return ret;
+}
+
+int ehca_register_device(struct ehca_shca *shca)
+{
+ int ret;
+
+ ret = init_node_guid(shca);
+ if (ret)
+ return ret;
+
+ strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
+ shca->ib_device.owner = THIS_MODULE;
+
+ shca->ib_device.uverbs_abi_ver = 5;
+ shca->ib_device.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
+
+ shca->ib_device.node_type = RDMA_NODE_IB_CA;
+ shca->ib_device.phys_port_cnt = shca->num_ports;
+ shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev;
+ shca->ib_device.query_device = ehca_query_device;
+ shca->ib_device.query_port = ehca_query_port;
+ shca->ib_device.query_gid = ehca_query_gid;
+ shca->ib_device.query_pkey = ehca_query_pkey;
+ /* shca->in_device.modify_device = ehca_modify_device */
+ shca->ib_device.modify_port = ehca_modify_port;
+ shca->ib_device.alloc_ucontext = ehca_alloc_ucontext;
+ shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext;
+ shca->ib_device.alloc_pd = ehca_alloc_pd;
+ shca->ib_device.dealloc_pd = ehca_dealloc_pd;
+ shca->ib_device.create_ah = ehca_create_ah;
+ /* shca->ib_device.modify_ah = ehca_modify_ah; */
+ shca->ib_device.query_ah = ehca_query_ah;
+ shca->ib_device.destroy_ah = ehca_destroy_ah;
+ shca->ib_device.create_qp = ehca_create_qp;
+ shca->ib_device.modify_qp = ehca_modify_qp;
+ shca->ib_device.query_qp = ehca_query_qp;
+ shca->ib_device.destroy_qp = ehca_destroy_qp;
+ shca->ib_device.post_send = ehca_post_send;
+ shca->ib_device.post_recv = ehca_post_recv;
+ shca->ib_device.create_cq = ehca_create_cq;
+ shca->ib_device.destroy_cq = ehca_destroy_cq;
+ shca->ib_device.resize_cq = ehca_resize_cq;
+ shca->ib_device.poll_cq = ehca_poll_cq;
+ /* shca->ib_device.peek_cq = ehca_peek_cq; */
+ shca->ib_device.req_notify_cq = ehca_req_notify_cq;
+ /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
+ shca->ib_device.get_dma_mr = ehca_get_dma_mr;
+ shca->ib_device.reg_phys_mr = ehca_reg_phys_mr;
+ shca->ib_device.reg_user_mr = ehca_reg_user_mr;
+ shca->ib_device.query_mr = ehca_query_mr;
+ shca->ib_device.dereg_mr = ehca_dereg_mr;
+ shca->ib_device.rereg_phys_mr = ehca_rereg_phys_mr;
+ shca->ib_device.alloc_mw = ehca_alloc_mw;
+ shca->ib_device.bind_mw = ehca_bind_mw;
+ shca->ib_device.dealloc_mw = ehca_dealloc_mw;
+ shca->ib_device.alloc_fmr = ehca_alloc_fmr;
+ shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
+ shca->ib_device.unmap_fmr = ehca_unmap_fmr;
+ shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
+ shca->ib_device.attach_mcast = ehca_attach_mcast;
+ shca->ib_device.detach_mcast = ehca_detach_mcast;
+ /* shca->ib_device.process_mad = ehca_process_mad; */
+ shca->ib_device.mmap = ehca_mmap;
+
+ ret = ib_register_device(&shca->ib_device);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "ib_register_device() failed ret=%x", ret);
+
+ return ret;
+}
+
+static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
+{
+ struct ehca_sport *sport = &shca->sport[port - 1];
+ struct ib_cq *ibcq;
+ struct ib_qp *ibqp;
+ struct ib_qp_init_attr qp_init_attr;
+ int ret;
+
+ if (sport->ibcq_aqp1) {
+ ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
+ return -EPERM;
+ }
+
+ ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10);
+ if (IS_ERR(ibcq)) {
+ ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
+ return PTR_ERR(ibcq);
+ }
+ sport->ibcq_aqp1 = ibcq;
+
+ if (sport->ibqp_aqp1) {
+ ehca_err(&shca->ib_device, "AQP1 QP is already created.");
+ ret = -EPERM;
+ goto create_aqp1;
+ }
+
+ memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
+ qp_init_attr.send_cq = ibcq;
+ qp_init_attr.recv_cq = ibcq;
+ qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ qp_init_attr.cap.max_send_wr = 100;
+ qp_init_attr.cap.max_recv_wr = 100;
+ qp_init_attr.cap.max_send_sge = 2;
+ qp_init_attr.cap.max_recv_sge = 1;
+ qp_init_attr.qp_type = IB_QPT_GSI;
+ qp_init_attr.port_num = port;
+ qp_init_attr.qp_context = NULL;
+ qp_init_attr.event_handler = NULL;
+ qp_init_attr.srq = NULL;
+
+ ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
+ if (IS_ERR(ibqp)) {
+ ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
+ ret = PTR_ERR(ibqp);
+ goto create_aqp1;
+ }
+ sport->ibqp_aqp1 = ibqp;
+
+ return 0;
+
+create_aqp1:
+ ib_destroy_cq(sport->ibcq_aqp1);
+ return ret;
+}
+
+static int ehca_destroy_aqp1(struct ehca_sport *sport)
+{
+ int ret;
+
+ ret = ib_destroy_qp(sport->ibqp_aqp1);
+ if (ret) {
+ ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret);
+ return ret;
+ }
+
+ ret = ib_destroy_cq(sport->ibcq_aqp1);
+ if (ret)
+ ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret);
+
+ return ret;
+}
+
+static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ehca_debug_level);
+}
+
+static ssize_t ehca_store_debug_level(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ int value = (*buf) - '0';
+ if (value >= 0 && value <= 9)
+ ehca_debug_level = value;
+ return 1;
+}
+
+DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
+ ehca_show_debug_level, ehca_store_debug_level);
+
+void ehca_create_driver_sysfs(struct ibmebus_driver *drv)
+{
+ driver_create_file(&drv->driver, &driver_attr_debug_level);
+}
+
+void ehca_remove_driver_sysfs(struct ibmebus_driver *drv)
+{
+ driver_remove_file(&drv->driver, &driver_attr_debug_level);
+}
+
+#define EHCA_RESOURCE_ATTR(name) \
+static ssize_t ehca_show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ehca_shca *shca; \
+ struct hipz_query_hca *rblock; \
+ int data; \
+ \
+ shca = dev->driver_data; \
+ \
+ rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \
+ if (!rblock) { \
+ dev_err(dev, "Can't allocate rblock memory."); \
+ return 0; \
+ } \
+ \
+ if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
+ dev_err(dev, "Can't query device properties"); \
+ kfree(rblock); \
+ return 0; \
+ } \
+ \
+ data = rblock->name; \
+ kfree(rblock); \
+ \
+ if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
+ return snprintf(buf, 256, "1\n"); \
+ else \
+ return snprintf(buf, 256, "%d\n", data); \
+ \
+} \
+static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
+
+EHCA_RESOURCE_ATTR(num_ports);
+EHCA_RESOURCE_ATTR(hw_ver);
+EHCA_RESOURCE_ATTR(max_eq);
+EHCA_RESOURCE_ATTR(cur_eq);
+EHCA_RESOURCE_ATTR(max_cq);
+EHCA_RESOURCE_ATTR(cur_cq);
+EHCA_RESOURCE_ATTR(max_qp);
+EHCA_RESOURCE_ATTR(cur_qp);
+EHCA_RESOURCE_ATTR(max_mr);
+EHCA_RESOURCE_ATTR(cur_mr);
+EHCA_RESOURCE_ATTR(max_mw);
+EHCA_RESOURCE_ATTR(cur_mw);
+EHCA_RESOURCE_ATTR(max_pd);
+EHCA_RESOURCE_ATTR(max_ah);
+
+static ssize_t ehca_show_adapter_handle(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ehca_shca *shca = dev->driver_data;
+
+ return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle);
+
+}
+static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
+
+
+void ehca_create_device_sysfs(struct ibmebus_dev *dev)
+{
+ device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
+ device_create_file(&dev->ofdev.dev, &dev_attr_num_ports);
+ device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver);
+ device_create_file(&dev->ofdev.dev, &dev_attr_max_eq);
+ device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq);
+ device_create_file(&dev->ofdev.dev, &dev_attr_max_cq);
+ device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq);
+ device_create_file(&dev->ofdev.dev, &dev_attr_max_qp);
+ device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp);
+ device_create_file(&dev->ofdev.dev, &dev_attr_max_mr);
+ device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr);
+ device_create_file(&dev->ofdev.dev, &dev_attr_max_mw);
+ device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw);
+ device_create_file(&dev->ofdev.dev, &dev_attr_max_pd);
+ device_create_file(&dev->ofdev.dev, &dev_attr_max_ah);
+}
+
+void ehca_remove_device_sysfs(struct ibmebus_dev *dev)
+{
+ device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd);
+ device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah);
+}
+
+static int __devinit ehca_probe(struct ibmebus_dev *dev,
+ const struct of_device_id *id)
+{
+ struct ehca_shca *shca;
+ u64 *handle;
+ struct ib_pd *ibpd;
+ int ret;
+
+ handle = (u64 *)get_property(dev->ofdev.node, "ibm,hca-handle", NULL);
+ if (!handle) {
+ ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
+ dev->ofdev.node->full_name);
+ return -ENODEV;
+ }
+
+ if (!(*handle)) {
+ ehca_gen_err("Wrong eHCA handle for adapter: %s.",
+ dev->ofdev.node->full_name);
+ return -ENODEV;
+ }
+
+ shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
+ if (!shca) {
+ ehca_gen_err("Cannot allocate shca memory.");
+ return -ENOMEM;
+ }
+
+ shca->ibmebus_dev = dev;
+ shca->ipz_hca_handle.handle = *handle;
+ dev->ofdev.dev.driver_data = shca;
+
+ ret = ehca_sense_attributes(shca);
+ if (ret < 0) {
+ ehca_gen_err("Cannot sense eHCA attributes.");
+ goto probe1;
+ }
+
+ ret = ehca_register_device(shca);
+ if (ret) {
+ ehca_gen_err("Cannot register Infiniband device");
+ goto probe1;
+ }
+
+ /* create event queues */
+ ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048);
+ if (ret) {
+ ehca_err(&shca->ib_device, "Cannot create EQ.");
+ goto probe2;
+ }
+
+ ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
+ if (ret) {
+ ehca_err(&shca->ib_device, "Cannot create NEQ.");
+ goto probe3;
+ }
+
+ /* create internal protection domain */
+ ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
+ if (IS_ERR(ibpd)) {
+ ehca_err(&shca->ib_device, "Cannot create internal PD.");
+ ret = PTR_ERR(ibpd);
+ goto probe4;
+ }
+
+ shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
+ shca->pd->ib_pd.device = &shca->ib_device;
+
+ /* create internal max MR */
+ ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
+
+ if (ret) {
+ ehca_err(&shca->ib_device, "Cannot create internal MR ret=%x",
+ ret);
+ goto probe5;
+ }
+
+ /* create AQP1 for port 1 */
+ if (ehca_open_aqp1 == 1) {
+ shca->sport[0].port_state = IB_PORT_DOWN;
+ ret = ehca_create_aqp1(shca, 1);
+ if (ret) {
+ ehca_err(&shca->ib_device,
+ "Cannot create AQP1 for port 1.");
+ goto probe6;
+ }
+ }
+
+ /* create AQP1 for port 2 */
+ if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
+ shca->sport[1].port_state = IB_PORT_DOWN;
+ ret = ehca_create_aqp1(shca, 2);
+ if (ret) {
+ ehca_err(&shca->ib_device,
+ "Cannot create AQP1 for port 2.");
+ goto probe7;
+ }
+ }
+
+ ehca_create_device_sysfs(dev);
+
+ spin_lock(&shca_list_lock);
+ list_add(&shca->shca_list, &shca_list);
+ spin_unlock(&shca_list_lock);
+
+ return 0;
+
+probe7:
+ ret = ehca_destroy_aqp1(&shca->sport[0]);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy AQP1 for port 1. ret=%x", ret);
+
+probe6:
+ ret = ehca_dereg_internal_maxmr(shca);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy internal MR. ret=%x", ret);
+
+probe5:
+ ret = ehca_dealloc_pd(&shca->pd->ib_pd);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy internal PD. ret=%x", ret);
+
+probe4:
+ ret = ehca_destroy_eq(shca, &shca->neq);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy NEQ. ret=%x", ret);
+
+probe3:
+ ret = ehca_destroy_eq(shca, &shca->eq);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy EQ. ret=%x", ret);
+
+probe2:
+ ib_unregister_device(&shca->ib_device);
+
+probe1:
+ ib_dealloc_device(&shca->ib_device);
+
+ return -EINVAL;
+}
+
+static int __devexit ehca_remove(struct ibmebus_dev *dev)
+{
+ struct ehca_shca *shca = dev->ofdev.dev.driver_data;
+ int ret;
+
+ ehca_remove_device_sysfs(dev);
+
+ if (ehca_open_aqp1 == 1) {
+ int i;
+ for (i = 0; i < shca->num_ports; i++) {
+ ret = ehca_destroy_aqp1(&shca->sport[i]);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy AQP1 for port %x "
+ "ret=%x", ret, i);
+ }
+ }
+
+ ib_unregister_device(&shca->ib_device);
+
+ ret = ehca_dereg_internal_maxmr(shca);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy internal MR. ret=%x", ret);
+
+ ret = ehca_dealloc_pd(&shca->pd->ib_pd);
+ if (ret)
+ ehca_err(&shca->ib_device,
+ "Cannot destroy internal PD. ret=%x", ret);
+
+ ret = ehca_destroy_eq(shca, &shca->eq);
+ if (ret)
+ ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%x", ret);
+
+ ret = ehca_destroy_eq(shca, &shca->neq);
+ if (ret)
+ ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%x", ret);
+
+ ib_dealloc_device(&shca->ib_device);
+
+ spin_lock(&shca_list_lock);
+ list_del(&shca->shca_list);
+ spin_unlock(&shca_list_lock);
+
+ return ret;
+}
+
+static struct of_device_id ehca_device_table[] =
+{
+ {
+ .name = "lhca",
+ .compatible = "IBM,lhca",
+ },
+ {},
+};
+
+static struct ibmebus_driver ehca_driver = {
+ .name = "ehca",
+ .id_table = ehca_device_table,
+ .probe = ehca_probe,
+ .remove = ehca_remove,
+};
+
+void ehca_poll_eqs(unsigned long data)
+{
+ struct ehca_shca *shca;
+
+ spin_lock(&shca_list_lock);
+ list_for_each_entry(shca, &shca_list, shca_list) {
+ if (shca->eq.is_initialized)
+ ehca_tasklet_eq((unsigned long)(void*)shca);
+ }
+ mod_timer(&poll_eqs_timer, jiffies + HZ);
+ spin_unlock(&shca_list_lock);
+}
+
+int __init ehca_module_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "eHCA Infiniband Device Driver "
+ "(Rel.: SVNEHCA_0016)\n");
+ idr_init(&ehca_qp_idr);
+ idr_init(&ehca_cq_idr);
+ spin_lock_init(&ehca_qp_idr_lock);
+ spin_lock_init(&ehca_cq_idr_lock);
+
+ INIT_LIST_HEAD(&shca_list);
+ spin_lock_init(&shca_list_lock);
+
+ if ((ret = ehca_create_comp_pool())) {
+ ehca_gen_err("Cannot create comp pool.");
+ return ret;
+ }
+
+ if ((ret = ehca_create_slab_caches())) {
+ ehca_gen_err("Cannot create SLAB caches");
+ ret = -ENOMEM;
+ goto module_init1;
+ }
+
+ if ((ret = ibmebus_register_driver(&ehca_driver))) {
+ ehca_gen_err("Cannot register eHCA device driver");
+ ret = -EINVAL;
+ goto module_init2;
+ }
+
+ ehca_create_driver_sysfs(&ehca_driver);
+
+ if (ehca_poll_all_eqs != 1) {
+ ehca_gen_err("WARNING!!!");
+ ehca_gen_err("It is possible to lose interrupts.");
+ } else {
+ init_timer(&poll_eqs_timer);
+ poll_eqs_timer.function = ehca_poll_eqs;
+ poll_eqs_timer.expires = jiffies + HZ;
+ add_timer(&poll_eqs_timer);
+ }
+
+ return 0;
+
+module_init2:
+ ehca_destroy_slab_caches();
+
+module_init1:
+ ehca_destroy_comp_pool();
+ return ret;
+};
+
+void __exit ehca_module_exit(void)
+{
+ if (ehca_poll_all_eqs == 1)
+ del_timer_sync(&poll_eqs_timer);
+
+ ehca_remove_driver_sysfs(&ehca_driver);
+ ibmebus_unregister_driver(&ehca_driver);
+
+ ehca_destroy_slab_caches();
+
+ ehca_destroy_comp_pool();
+
+ idr_destroy(&ehca_cq_idr);
+ idr_destroy(&ehca_qp_idr);
+};
+
+module_init(ehca_module_init);
+module_exit(ehca_module_exit);
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
new file mode 100644
index 00000000000..32a870660bf
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_mcast.c
@@ -0,0 +1,131 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * mcast functions
+ *
+ * Authors: Khadija Souissi <souissik@de.ibm.com>
+ * Waleri Fomin <fomin@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+
+#define MAX_MC_LID 0xFFFE
+#define MIN_MC_LID 0xC000 /* Multicast limits */
+#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
+#define EHCA_VALID_MULTICAST_LID(lid) \
+ (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
+
+int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
+ ib_device);
+ union ib_gid my_gid;
+ u64 subnet_prefix, interface_id, h_ret;
+
+ if (ibqp->qp_type != IB_QPT_UD) {
+ ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
+ return -EINVAL;
+ }
+
+ if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
+ ehca_err(ibqp->device, "invalid mulitcast gid");
+ return -EINVAL;
+ } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
+ ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
+ return -EINVAL;
+ }
+
+ memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
+
+ subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
+ interface_id = be64_to_cpu(my_gid.global.interface_id);
+ h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ my_qp->galpas.kernel,
+ lid, subnet_prefix, interface_id);
+ if (h_ret != H_SUCCESS)
+ ehca_err(ibqp->device,
+ "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
+ "h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
+
+ return ehca2ib_return_code(h_ret);
+}
+
+int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_shca *shca = container_of(ibqp->pd->device,
+ struct ehca_shca, ib_device);
+ union ib_gid my_gid;
+ u64 subnet_prefix, interface_id, h_ret;
+
+ if (ibqp->qp_type != IB_QPT_UD) {
+ ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
+ return -EINVAL;
+ }
+
+ if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
+ ehca_err(ibqp->device, "invalid mulitcast gid");
+ return -EINVAL;
+ } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
+ ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
+ return -EINVAL;
+ }
+
+ memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
+
+ subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
+ interface_id = be64_to_cpu(my_gid.global.interface_id);
+ h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ my_qp->galpas.kernel,
+ lid, subnet_prefix, interface_id);
+ if (h_ret != H_SUCCESS)
+ ehca_err(ibqp->device,
+ "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
+ "h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
+
+ return ehca2ib_return_code(h_ret);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
new file mode 100644
index 00000000000..5ca65441e1d
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -0,0 +1,2261 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * MR/MW functions
+ *
+ * Authors: Dietmar Decker <ddecker@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_iverbs.h"
+#include "ehca_mrmw.h"
+#include "hcp_if.h"
+#include "hipz_hw.h"
+
+static struct kmem_cache *mr_cache;
+static struct kmem_cache *mw_cache;
+
+static struct ehca_mr *ehca_mr_new(void)
+{
+ struct ehca_mr *me;
+
+ me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
+ if (me) {
+ memset(me, 0, sizeof(struct ehca_mr));
+ spin_lock_init(&me->mrlock);
+ } else
+ ehca_gen_err("alloc failed");
+
+ return me;
+}
+
+static void ehca_mr_delete(struct ehca_mr *me)
+{
+ kmem_cache_free(mr_cache, me);
+}
+
+static struct ehca_mw *ehca_mw_new(void)
+{
+ struct ehca_mw *me;
+
+ me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
+ if (me) {
+ memset(me, 0, sizeof(struct ehca_mw));
+ spin_lock_init(&me->mwlock);
+ } else
+ ehca_gen_err("alloc failed");
+
+ return me;
+}
+
+static void ehca_mw_delete(struct ehca_mw *me)
+{
+ kmem_cache_free(mw_cache, me);
+}
+
+/*----------------------------------------------------------------------*/
+
+struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
+{
+ struct ib_mr *ib_mr;
+ int ret;
+ struct ehca_mr *e_maxmr;
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
+
+ if (shca->maxmr) {
+ e_maxmr = ehca_mr_new();
+ if (!e_maxmr) {
+ ehca_err(&shca->ib_device, "out of memory");
+ ib_mr = ERR_PTR(-ENOMEM);
+ goto get_dma_mr_exit0;
+ }
+
+ ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
+ mr_access_flags, e_pd,
+ &e_maxmr->ib.ib_mr.lkey,
+ &e_maxmr->ib.ib_mr.rkey);
+ if (ret) {
+ ib_mr = ERR_PTR(ret);
+ goto get_dma_mr_exit0;
+ }
+ ib_mr = &e_maxmr->ib.ib_mr;
+ } else {
+ ehca_err(&shca->ib_device, "no internal max-MR exist!");
+ ib_mr = ERR_PTR(-EINVAL);
+ goto get_dma_mr_exit0;
+ }
+
+get_dma_mr_exit0:
+ if (IS_ERR(ib_mr))
+ ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
+ PTR_ERR(ib_mr), pd, mr_access_flags);
+ return ib_mr;
+} /* end ehca_get_dma_mr() */
+
+/*----------------------------------------------------------------------*/
+
+struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ int mr_access_flags,
+ u64 *iova_start)
+{
+ struct ib_mr *ib_mr;
+ int ret;
+ struct ehca_mr *e_mr;
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+
+ u64 size;
+ struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ u32 num_pages_mr;
+ u32 num_pages_4k; /* 4k portion "pages" */
+
+ if ((num_phys_buf <= 0) || !phys_buf_array) {
+ ehca_err(pd->device, "bad input values: num_phys_buf=%x "
+ "phys_buf_array=%p", num_phys_buf, phys_buf_array);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_phys_mr_exit0;
+ }
+ if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
+ !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
+ ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
+ /*
+ * Remote Write Access requires Local Write Access
+ * Remote Atomic Access requires Local Write Access
+ */
+ ehca_err(pd->device, "bad input values: mr_access_flags=%x",
+ mr_access_flags);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_phys_mr_exit0;
+ }
+
+ /* check physical buffer list and calculate size */
+ ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
+ iova_start, &size);
+ if (ret) {
+ ib_mr = ERR_PTR(ret);
+ goto reg_phys_mr_exit0;
+ }
+ if ((size == 0) ||
+ (((u64)iova_start + size) < (u64)iova_start)) {
+ ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
+ size, iova_start);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_phys_mr_exit0;
+ }
+
+ e_mr = ehca_mr_new();
+ if (!e_mr) {
+ ehca_err(pd->device, "out of memory");
+ ib_mr = ERR_PTR(-ENOMEM);
+ goto reg_phys_mr_exit0;
+ }
+
+ /* determine number of MR pages */
+ num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
+ PAGE_SIZE - 1) / PAGE_SIZE);
+ num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
+ EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+
+ /* register MR on HCA */
+ if (ehca_mr_is_maxmr(size, iova_start)) {
+ e_mr->flags |= EHCA_MR_FLAG_MAXMR;
+ ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
+ e_pd, &e_mr->ib.ib_mr.lkey,
+ &e_mr->ib.ib_mr.rkey);
+ if (ret) {
+ ib_mr = ERR_PTR(ret);
+ goto reg_phys_mr_exit1;
+ }
+ } else {
+ pginfo.type = EHCA_MR_PGI_PHYS;
+ pginfo.num_pages = num_pages_mr;
+ pginfo.num_4k = num_pages_4k;
+ pginfo.num_phys_buf = num_phys_buf;
+ pginfo.phys_buf_array = phys_buf_array;
+ pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
+ EHCA_PAGESIZE);
+
+ ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
+ e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
+ &e_mr->ib.ib_mr.rkey);
+ if (ret) {
+ ib_mr = ERR_PTR(ret);
+ goto reg_phys_mr_exit1;
+ }
+ }
+
+ /* successful registration of all pages */
+ return &e_mr->ib.ib_mr;
+
+reg_phys_mr_exit1:
+ ehca_mr_delete(e_mr);
+reg_phys_mr_exit0:
+ if (IS_ERR(ib_mr))
+ ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
+ "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
+ PTR_ERR(ib_mr), pd, phys_buf_array,
+ num_phys_buf, mr_access_flags, iova_start);
+ return ib_mr;
+} /* end ehca_reg_phys_mr() */
+
+/*----------------------------------------------------------------------*/
+
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
+ struct ib_umem *region,
+ int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct ib_mr *ib_mr;
+ struct ehca_mr *e_mr;
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ int ret;
+ u32 num_pages_mr;
+ u32 num_pages_4k; /* 4k portion "pages" */
+
+ if (!pd) {
+ ehca_gen_err("bad pd=%p", pd);
+ return ERR_PTR(-EFAULT);
+ }
+ if (!region) {
+ ehca_err(pd->device, "bad input values: region=%p", region);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_user_mr_exit0;
+ }
+ if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
+ !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
+ ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
+ /*
+ * Remote Write Access requires Local Write Access
+ * Remote Atomic Access requires Local Write Access
+ */
+ ehca_err(pd->device, "bad input values: mr_access_flags=%x",
+ mr_access_flags);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_user_mr_exit0;
+ }
+ if (region->page_size != PAGE_SIZE) {
+ ehca_err(pd->device, "page size not supported, "
+ "region->page_size=%x", region->page_size);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_user_mr_exit0;
+ }
+
+ if ((region->length == 0) ||
+ ((region->virt_base + region->length) < region->virt_base)) {
+ ehca_err(pd->device, "bad input values: length=%lx "
+ "virt_base=%lx", region->length, region->virt_base);
+ ib_mr = ERR_PTR(-EINVAL);
+ goto reg_user_mr_exit0;
+ }
+
+ e_mr = ehca_mr_new();
+ if (!e_mr) {
+ ehca_err(pd->device, "out of memory");
+ ib_mr = ERR_PTR(-ENOMEM);
+ goto reg_user_mr_exit0;
+ }
+
+ /* determine number of MR pages */
+ num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length +
+ PAGE_SIZE - 1) / PAGE_SIZE);
+ num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length +
+ EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+
+ /* register MR on HCA */
+ pginfo.type = EHCA_MR_PGI_USER;
+ pginfo.num_pages = num_pages_mr;
+ pginfo.num_4k = num_pages_4k;
+ pginfo.region = region;
+ pginfo.next_4k = region->offset / EHCA_PAGESIZE;
+ pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
+ (&region->chunk_list),
+ list);
+
+ ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base,
+ region->length, mr_access_flags, e_pd, &pginfo,
+ &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
+ if (ret) {
+ ib_mr = ERR_PTR(ret);
+ goto reg_user_mr_exit1;
+ }
+
+ /* successful registration of all pages */
+ return &e_mr->ib.ib_mr;
+
+reg_user_mr_exit1:
+ ehca_mr_delete(e_mr);
+reg_user_mr_exit0:
+ if (IS_ERR(ib_mr))
+ ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x"
+ " udata=%p",
+ PTR_ERR(ib_mr), pd, region, mr_access_flags, udata);
+ return ib_mr;
+} /* end ehca_reg_user_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_rereg_phys_mr(struct ib_mr *mr,
+ int mr_rereg_mask,
+ struct ib_pd *pd,
+ struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ int mr_access_flags,
+ u64 *iova_start)
+{
+ int ret;
+
+ struct ehca_shca *shca =
+ container_of(mr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+ struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
+ u64 new_size;
+ u64 *new_start;
+ u32 new_acl;
+ struct ehca_pd *new_pd;
+ u32 tmp_lkey, tmp_rkey;
+ unsigned long sl_flags;
+ u32 num_pages_mr = 0;
+ u32 num_pages_4k = 0; /* 4k portion "pages" */
+ struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ u32 cur_pid = current->tgid;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ (my_pd->ownpid != cur_pid)) {
+ ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
+
+ if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
+ /* TODO not supported, because PHYP rereg hCall needs pages */
+ ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
+ "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
+
+ if (mr_rereg_mask & IB_MR_REREG_PD) {
+ if (!pd) {
+ ehca_err(mr->device, "rereg with bad pd, pd=%p "
+ "mr_rereg_mask=%x", pd, mr_rereg_mask);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
+ }
+
+ if ((mr_rereg_mask &
+ ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
+ (mr_rereg_mask == 0)) {
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
+
+ /* check other parameters */
+ if (e_mr == shca->maxmr) {
+ /* should be impossible, however reject to be sure */
+ ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
+ "shca->maxmr=%p mr->lkey=%x",
+ mr, shca->maxmr, mr->lkey);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
+ if (e_mr->flags & EHCA_MR_FLAG_FMR) {
+ ehca_err(mr->device, "not supported for FMR, mr=%p "
+ "flags=%x", mr, e_mr->flags);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
+ if (!phys_buf_array || num_phys_buf <= 0) {
+ ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
+ " phys_buf_array=%p num_phys_buf=%x",
+ mr_rereg_mask, phys_buf_array, num_phys_buf);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
+ }
+ if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
+ (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
+ !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
+ ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
+ /*
+ * Remote Write Access requires Local Write Access
+ * Remote Atomic Access requires Local Write Access
+ */
+ ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
+ "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit0;
+ }
+
+ /* set requested values dependent on rereg request */
+ spin_lock_irqsave(&e_mr->mrlock, sl_flags);
+ new_start = e_mr->start; /* new == old address */
+ new_size = e_mr->size; /* new == old length */
+ new_acl = e_mr->acl; /* new == old access control */
+ new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
+
+ if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+ new_start = iova_start; /* change address */
+ /* check physical buffer list and calculate size */
+ ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
+ num_phys_buf, iova_start,
+ &new_size);
+ if (ret)
+ goto rereg_phys_mr_exit1;
+ if ((new_size == 0) ||
+ (((u64)iova_start + new_size) < (u64)iova_start)) {
+ ehca_err(mr->device, "bad input values: new_size=%lx "
+ "iova_start=%p", new_size, iova_start);
+ ret = -EINVAL;
+ goto rereg_phys_mr_exit1;
+ }
+ num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
+ PAGE_SIZE - 1) / PAGE_SIZE);
+ num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
+ EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+ pginfo.type = EHCA_MR_PGI_PHYS;
+ pginfo.num_pages = num_pages_mr;
+ pginfo.num_4k = num_pages_4k;
+ pginfo.num_phys_buf = num_phys_buf;
+ pginfo.phys_buf_array = phys_buf_array;
+ pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
+ EHCA_PAGESIZE);
+ }
+ if (mr_rereg_mask & IB_MR_REREG_ACCESS)
+ new_acl = mr_access_flags;
+ if (mr_rereg_mask & IB_MR_REREG_PD)
+ new_pd = container_of(pd, struct ehca_pd, ib_pd);
+
+ ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
+ new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
+ if (ret)
+ goto rereg_phys_mr_exit1;
+
+ /* successful reregistration */
+ if (mr_rereg_mask & IB_MR_REREG_PD)
+ mr->pd = pd;
+ mr->lkey = tmp_lkey;
+ mr->rkey = tmp_rkey;
+
+rereg_phys_mr_exit1:
+ spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
+rereg_phys_mr_exit0:
+ if (ret)
+ ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
+ "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
+ "iova_start=%p",
+ ret, mr, mr_rereg_mask, pd, phys_buf_array,
+ num_phys_buf, mr_access_flags, iova_start);
+ return ret;
+} /* end ehca_rereg_phys_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
+{
+ int ret = 0;
+ u64 h_ret;
+ struct ehca_shca *shca =
+ container_of(mr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+ struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
+ u32 cur_pid = current->tgid;
+ unsigned long sl_flags;
+ struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ (my_pd->ownpid != cur_pid)) {
+ ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ ret = -EINVAL;
+ goto query_mr_exit0;
+ }
+
+ if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
+ ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
+ "e_mr->flags=%x", mr, e_mr, e_mr->flags);
+ ret = -EINVAL;
+ goto query_mr_exit0;
+ }
+
+ memset(mr_attr, 0, sizeof(struct ib_mr_attr));
+ spin_lock_irqsave(&e_mr->mrlock, sl_flags);
+
+ h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
+ "hca_hndl=%lx mr_hndl=%lx lkey=%x",
+ h_ret, mr, shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle, mr->lkey);
+ ret = ehca_mrmw_map_hrc_query_mr(h_ret);
+ goto query_mr_exit1;
+ }
+ mr_attr->pd = mr->pd;
+ mr_attr->device_virt_addr = hipzout.vaddr;
+ mr_attr->size = hipzout.len;
+ mr_attr->lkey = hipzout.lkey;
+ mr_attr->rkey = hipzout.rkey;
+ ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
+
+query_mr_exit1:
+ spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
+query_mr_exit0:
+ if (ret)
+ ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
+ ret, mr, mr_attr);
+ return ret;
+} /* end ehca_query_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_dereg_mr(struct ib_mr *mr)
+{
+ int ret = 0;
+ u64 h_ret;
+ struct ehca_shca *shca =
+ container_of(mr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
+ struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
+ u32 cur_pid = current->tgid;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ (my_pd->ownpid != cur_pid)) {
+ ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ ret = -EINVAL;
+ goto dereg_mr_exit0;
+ }
+
+ if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
+ ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
+ "e_mr->flags=%x", mr, e_mr, e_mr->flags);
+ ret = -EINVAL;
+ goto dereg_mr_exit0;
+ } else if (e_mr == shca->maxmr) {
+ /* should be impossible, however reject to be sure */
+ ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
+ "shca->maxmr=%p mr->lkey=%x",
+ mr, shca->maxmr, mr->lkey);
+ ret = -EINVAL;
+ goto dereg_mr_exit0;
+ }
+
+ /* TODO: BUSY: MR still has bound window(s) */
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
+ "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
+ h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle, mr->lkey);
+ ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+ goto dereg_mr_exit0;
+ }
+
+ /* successful deregistration */
+ ehca_mr_delete(e_mr);
+
+dereg_mr_exit0:
+ if (ret)
+ ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
+ return ret;
+} /* end ehca_dereg_mr() */
+
+/*----------------------------------------------------------------------*/
+
+struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
+{
+ struct ib_mw *ib_mw;
+ u64 h_ret;
+ struct ehca_mw *e_mw;
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
+ struct ehca_mw_hipzout_parms hipzout = {{0},0};
+
+ e_mw = ehca_mw_new();
+ if (!e_mw) {
+ ib_mw = ERR_PTR(-ENOMEM);
+ goto alloc_mw_exit0;
+ }
+
+ h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
+ e_pd->fw_pd, &hipzout);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
+ "shca=%p hca_hndl=%lx mw=%p",
+ h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
+ ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
+ goto alloc_mw_exit1;
+ }
+ /* successful MW allocation */
+ e_mw->ipz_mw_handle = hipzout.handle;
+ e_mw->ib_mw.rkey = hipzout.rkey;
+ return &e_mw->ib_mw;
+
+alloc_mw_exit1:
+ ehca_mw_delete(e_mw);
+alloc_mw_exit0:
+ if (IS_ERR(ib_mw))
+ ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
+ return ib_mw;
+} /* end ehca_alloc_mw() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_bind_mw(struct ib_qp *qp,
+ struct ib_mw *mw,
+ struct ib_mw_bind *mw_bind)
+{
+ /* TODO: not supported up to now */
+ ehca_gen_err("bind MW currently not supported by HCAD");
+
+ return -EPERM;
+} /* end ehca_bind_mw() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_dealloc_mw(struct ib_mw *mw)
+{
+ u64 h_ret;
+ struct ehca_shca *shca =
+ container_of(mw->device, struct ehca_shca, ib_device);
+ struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
+
+ h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
+ "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
+ h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
+ e_mw->ipz_mw_handle.handle);
+ return ehca_mrmw_map_hrc_free_mw(h_ret);
+ }
+ /* successful deallocation */
+ ehca_mw_delete(e_mw);
+ return 0;
+} /* end ehca_dealloc_mw() */
+
+/*----------------------------------------------------------------------*/
+
+struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
+ int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ struct ib_fmr *ib_fmr;
+ struct ehca_shca *shca =
+ container_of(pd->device, struct ehca_shca, ib_device);
+ struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_mr *e_fmr;
+ int ret;
+ u32 tmp_lkey, tmp_rkey;
+ struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+
+ /* check other parameters */
+ if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
+ !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
+ ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
+ /*
+ * Remote Write Access requires Local Write Access
+ * Remote Atomic Access requires Local Write Access
+ */
+ ehca_err(pd->device, "bad input values: mr_access_flags=%x",
+ mr_access_flags);
+ ib_fmr = ERR_PTR(-EINVAL);
+ goto alloc_fmr_exit0;
+ }
+ if (mr_access_flags & IB_ACCESS_MW_BIND) {
+ ehca_err(pd->device, "bad input values: mr_access_flags=%x",
+ mr_access_flags);
+ ib_fmr = ERR_PTR(-EINVAL);
+ goto alloc_fmr_exit0;
+ }
+ if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
+ ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
+ "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
+ fmr_attr->max_pages, fmr_attr->max_maps,
+ fmr_attr->page_shift);
+ ib_fmr = ERR_PTR(-EINVAL);
+ goto alloc_fmr_exit0;
+ }
+ if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
+ ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
+ ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
+ fmr_attr->page_shift);
+ ib_fmr = ERR_PTR(-EINVAL);
+ goto alloc_fmr_exit0;
+ }
+
+ e_fmr = ehca_mr_new();
+ if (!e_fmr) {
+ ib_fmr = ERR_PTR(-ENOMEM);
+ goto alloc_fmr_exit0;
+ }
+ e_fmr->flags |= EHCA_MR_FLAG_FMR;
+
+ /* register MR on HCA */
+ ret = ehca_reg_mr(shca, e_fmr, NULL,
+ fmr_attr->max_pages * (1 << fmr_attr->page_shift),
+ mr_access_flags, e_pd, &pginfo,
+ &tmp_lkey, &tmp_rkey);
+ if (ret) {
+ ib_fmr = ERR_PTR(ret);
+ goto alloc_fmr_exit1;
+ }
+
+ /* successful */
+ e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
+ e_fmr->fmr_max_pages = fmr_attr->max_pages;
+ e_fmr->fmr_max_maps = fmr_attr->max_maps;
+ e_fmr->fmr_map_cnt = 0;
+ return &e_fmr->ib.ib_fmr;
+
+alloc_fmr_exit1:
+ ehca_mr_delete(e_fmr);
+alloc_fmr_exit0:
+ if (IS_ERR(ib_fmr))
+ ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
+ "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
+ mr_access_flags, fmr_attr);
+ return ib_fmr;
+} /* end ehca_alloc_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_map_phys_fmr(struct ib_fmr *fmr,
+ u64 *page_list,
+ int list_len,
+ u64 iova)
+{
+ int ret;
+ struct ehca_shca *shca =
+ container_of(fmr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
+ struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
+ struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ u32 tmp_lkey, tmp_rkey;
+
+ if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
+ ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
+ e_fmr, e_fmr->flags);
+ ret = -EINVAL;
+ goto map_phys_fmr_exit0;
+ }
+ ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
+ if (ret)
+ goto map_phys_fmr_exit0;
+ if (iova % e_fmr->fmr_page_size) {
+ /* only whole-numbered pages */
+ ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
+ iova, e_fmr->fmr_page_size);
+ ret = -EINVAL;
+ goto map_phys_fmr_exit0;
+ }
+ if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
+ /* HCAD does not limit the maps, however trace this anyway */
+ ehca_info(fmr->device, "map limit exceeded, fmr=%p "
+ "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
+ fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
+ }
+
+ pginfo.type = EHCA_MR_PGI_FMR;
+ pginfo.num_pages = list_len;
+ pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
+ pginfo.page_list = page_list;
+ pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) /
+ EHCA_PAGESIZE);
+
+ ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
+ list_len * e_fmr->fmr_page_size,
+ e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
+ if (ret)
+ goto map_phys_fmr_exit0;
+
+ /* successful reregistration */
+ e_fmr->fmr_map_cnt++;
+ e_fmr->ib.ib_fmr.lkey = tmp_lkey;
+ e_fmr->ib.ib_fmr.rkey = tmp_rkey;
+ return 0;
+
+map_phys_fmr_exit0:
+ if (ret)
+ ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
+ "iova=%lx",
+ ret, fmr, page_list, list_len, iova);
+ return ret;
+} /* end ehca_map_phys_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_unmap_fmr(struct list_head *fmr_list)
+{
+ int ret = 0;
+ struct ib_fmr *ib_fmr;
+ struct ehca_shca *shca = NULL;
+ struct ehca_shca *prev_shca;
+ struct ehca_mr *e_fmr;
+ u32 num_fmr = 0;
+ u32 unmap_fmr_cnt = 0;
+
+ /* check all FMR belong to same SHCA, and check internal flag */
+ list_for_each_entry(ib_fmr, fmr_list, list) {
+ prev_shca = shca;
+ if (!ib_fmr) {
+ ehca_gen_err("bad fmr=%p in list", ib_fmr);
+ ret = -EINVAL;
+ goto unmap_fmr_exit0;
+ }
+ shca = container_of(ib_fmr->device, struct ehca_shca,
+ ib_device);
+ e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
+ if ((shca != prev_shca) && prev_shca) {
+ ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
+ "prev_shca=%p e_fmr=%p",
+ shca, prev_shca, e_fmr);
+ ret = -EINVAL;
+ goto unmap_fmr_exit0;
+ }
+ if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
+ ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
+ "e_fmr->flags=%x", e_fmr, e_fmr->flags);
+ ret = -EINVAL;
+ goto unmap_fmr_exit0;
+ }
+ num_fmr++;
+ }
+
+ /* loop over all FMRs to unmap */
+ list_for_each_entry(ib_fmr, fmr_list, list) {
+ unmap_fmr_cnt++;
+ e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
+ shca = container_of(ib_fmr->device, struct ehca_shca,
+ ib_device);
+ ret = ehca_unmap_one_fmr(shca, e_fmr);
+ if (ret) {
+ /* unmap failed, stop unmapping of rest of FMRs */
+ ehca_err(&shca->ib_device, "unmap of one FMR failed, "
+ "stop rest, e_fmr=%p num_fmr=%x "
+ "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
+ unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
+ goto unmap_fmr_exit0;
+ }
+ }
+
+unmap_fmr_exit0:
+ if (ret)
+ ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
+ ret, fmr_list, num_fmr, unmap_fmr_cnt);
+ return ret;
+} /* end ehca_unmap_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_dealloc_fmr(struct ib_fmr *fmr)
+{
+ int ret;
+ u64 h_ret;
+ struct ehca_shca *shca =
+ container_of(fmr->device, struct ehca_shca, ib_device);
+ struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
+
+ if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
+ ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
+ e_fmr, e_fmr->flags);
+ ret = -EINVAL;
+ goto free_fmr_exit0;
+ }
+
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
+ "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
+ h_ret, e_fmr, shca->ipz_hca_handle.handle,
+ e_fmr->ipz_mr_handle.handle, fmr->lkey);
+ ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+ goto free_fmr_exit0;
+ }
+ /* successful deregistration */
+ ehca_mr_delete(e_fmr);
+ return 0;
+
+free_fmr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
+ return ret;
+} /* end ehca_dealloc_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_reg_mr(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ u64 *iova_start,
+ u64 size,
+ int acl,
+ struct ehca_pd *e_pd,
+ struct ehca_mr_pginfo *pginfo,
+ u32 *lkey, /*OUT*/
+ u32 *rkey) /*OUT*/
+{
+ int ret;
+ u64 h_ret;
+ u32 hipz_acl;
+ struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+ ehca_mrmw_map_acl(acl, &hipz_acl);
+ ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+ if (ehca_use_hp_mr == 1)
+ hipz_acl |= 0x00000001;
+
+ h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
+ (u64)iova_start, size, hipz_acl,
+ e_pd->fw_pd, &hipzout);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
+ "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
+ ret = ehca_mrmw_map_hrc_alloc(h_ret);
+ goto ehca_reg_mr_exit0;
+ }
+
+ e_mr->ipz_mr_handle = hipzout.handle;
+
+ ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
+ if (ret)
+ goto ehca_reg_mr_exit1;
+
+ /* successful registration */
+ e_mr->num_pages = pginfo->num_pages;
+ e_mr->num_4k = pginfo->num_4k;
+ e_mr->start = iova_start;
+ e_mr->size = size;
+ e_mr->acl = acl;
+ *lkey = hipzout.lkey;
+ *rkey = hipzout.rkey;
+ return 0;
+
+ehca_reg_mr_exit1:
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
+ "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
+ "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
+ h_ret, shca, e_mr, iova_start, size, acl, e_pd,
+ hipzout.lkey, pginfo, pginfo->num_pages,
+ pginfo->num_4k, ret);
+ ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
+ "not recoverable");
+ }
+ehca_reg_mr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
+ "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
+ "num_pages=%lx num_4k=%lx",
+ ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
+ pginfo->num_pages, pginfo->num_4k);
+ return ret;
+} /* end ehca_reg_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_reg_mr_rpages(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ int ret = 0;
+ u64 h_ret;
+ u32 rnum;
+ u64 rpage;
+ u32 i;
+ u64 *kpage;
+
+ kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!kpage) {
+ ehca_err(&shca->ib_device, "kpage alloc failed");
+ ret = -ENOMEM;
+ goto ehca_reg_mr_rpages_exit0;
+ }
+
+ /* max 512 pages per shot */
+ for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
+
+ if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
+ rnum = pginfo->num_4k % 512; /* last shot */
+ if (rnum == 0)
+ rnum = 512; /* last shot is full */
+ } else
+ rnum = 512;
+
+ if (rnum > 1) {
+ ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
+ if (ret) {
+ ehca_err(&shca->ib_device, "ehca_set_pagebuf "
+ "bad rc, ret=%x rnum=%x kpage=%p",
+ ret, rnum, kpage);
+ ret = -EFAULT;
+ goto ehca_reg_mr_rpages_exit1;
+ }
+ rpage = virt_to_abs(kpage);
+ if (!rpage) {
+ ehca_err(&shca->ib_device, "kpage=%p i=%x",
+ kpage, i);
+ ret = -EFAULT;
+ goto ehca_reg_mr_rpages_exit1;
+ }
+ } else { /* rnum==1 */
+ ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
+ if (ret) {
+ ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
+ "bad rc, ret=%x i=%x", ret, i);
+ ret = -EFAULT;
+ goto ehca_reg_mr_rpages_exit1;
+ }
+ }
+
+ h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
+ 0, /* pagesize 4k */
+ 0, rpage, rnum);
+
+ if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
+ /*
+ * check for 'registration complete'==H_SUCCESS
+ * and for 'page registered'==H_PAGE_REGISTERED
+ */
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "last "
+ "hipz_reg_rpage_mr failed, h_ret=%lx "
+ "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
+ " lkey=%x", h_ret, e_mr, i,
+ shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle,
+ e_mr->ib.ib_mr.lkey);
+ ret = ehca_mrmw_map_hrc_rrpg_last(h_ret);
+ break;
+ } else
+ ret = 0;
+ } else if (h_ret != H_PAGE_REGISTERED) {
+ ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
+ "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
+ "mr_hndl=%lx", h_ret, e_mr, i,
+ e_mr->ib.ib_mr.lkey,
+ shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle);
+ ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret);
+ break;
+ } else
+ ret = 0;
+ } /* end for(i) */
+
+
+ehca_reg_mr_rpages_exit1:
+ kfree(kpage);
+ehca_reg_mr_rpages_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
+ "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
+ pginfo->num_pages, pginfo->num_4k);
+ return ret;
+} /* end ehca_reg_mr_rpages() */
+
+/*----------------------------------------------------------------------*/
+
+inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ u64 *iova_start,
+ u64 size,
+ u32 acl,
+ struct ehca_pd *e_pd,
+ struct ehca_mr_pginfo *pginfo,
+ u32 *lkey, /*OUT*/
+ u32 *rkey) /*OUT*/
+{
+ int ret;
+ u64 h_ret;
+ u32 hipz_acl;
+ u64 *kpage;
+ u64 rpage;
+ struct ehca_mr_pginfo pginfo_save;
+ struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+ ehca_mrmw_map_acl(acl, &hipz_acl);
+ ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+
+ kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (!kpage) {
+ ehca_err(&shca->ib_device, "kpage alloc failed");
+ ret = -ENOMEM;
+ goto ehca_rereg_mr_rereg1_exit0;
+ }
+
+ pginfo_save = *pginfo;
+ ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
+ if (ret) {
+ ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
+ "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
+ e_mr, pginfo, pginfo->type, pginfo->num_pages,
+ pginfo->num_4k,kpage);
+ goto ehca_rereg_mr_rereg1_exit1;
+ }
+ rpage = virt_to_abs(kpage);
+ if (!rpage) {
+ ehca_err(&shca->ib_device, "kpage=%p", kpage);
+ ret = -EFAULT;
+ goto ehca_rereg_mr_rereg1_exit1;
+ }
+ h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
+ (u64)iova_start, size, hipz_acl,
+ e_pd->fw_pd, rpage, &hipzout);
+ if (h_ret != H_SUCCESS) {
+ /*
+ * reregistration unsuccessful, try it again with the 3 hCalls,
+ * e.g. this is required in case H_MR_CONDITION
+ * (MW bound or MR is shared)
+ */
+ ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
+ "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
+ *pginfo = pginfo_save;
+ ret = -EAGAIN;
+ } else if ((u64*)hipzout.vaddr != iova_start) {
+ ehca_err(&shca->ib_device, "PHYP changed iova_start in "
+ "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
+ "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
+ hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
+ e_mr->ib.ib_mr.lkey, hipzout.lkey);
+ ret = -EFAULT;
+ } else {
+ /*
+ * successful reregistration
+ * note: start and start_out are identical for eServer HCAs
+ */
+ e_mr->num_pages = pginfo->num_pages;
+ e_mr->num_4k = pginfo->num_4k;
+ e_mr->start = iova_start;
+ e_mr->size = size;
+ e_mr->acl = acl;
+ *lkey = hipzout.lkey;
+ *rkey = hipzout.rkey;
+ }
+
+ehca_rereg_mr_rereg1_exit1:
+ kfree(kpage);
+ehca_rereg_mr_rereg1_exit0:
+ if ( ret && (ret != -EAGAIN) )
+ ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
+ "pginfo=%p num_pages=%lx num_4k=%lx",
+ ret, *lkey, *rkey, pginfo, pginfo->num_pages,
+ pginfo->num_4k);
+ return ret;
+} /* end ehca_rereg_mr_rereg1() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_rereg_mr(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ u64 *iova_start,
+ u64 size,
+ int acl,
+ struct ehca_pd *e_pd,
+ struct ehca_mr_pginfo *pginfo,
+ u32 *lkey,
+ u32 *rkey)
+{
+ int ret = 0;
+ u64 h_ret;
+ int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
+ int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
+
+ /* first determine reregistration hCall(s) */
+ if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
+ (pginfo->num_4k > e_mr->num_4k)) {
+ ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
+ "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
+ rereg_1_hcall = 0;
+ rereg_3_hcall = 1;
+ }
+
+ if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
+ rereg_1_hcall = 0;
+ rereg_3_hcall = 1;
+ e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
+ ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
+ e_mr);
+ }
+
+ if (rereg_1_hcall) {
+ ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
+ acl, e_pd, pginfo, lkey, rkey);
+ if (ret) {
+ if (ret == -EAGAIN)
+ rereg_3_hcall = 1;
+ else
+ goto ehca_rereg_mr_exit0;
+ }
+ }
+
+ if (rereg_3_hcall) {
+ struct ehca_mr save_mr;
+
+ /* first deregister old MR */
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+ "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
+ "mr->lkey=%x",
+ h_ret, e_mr, shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle,
+ e_mr->ib.ib_mr.lkey);
+ ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+ goto ehca_rereg_mr_exit0;
+ }
+ /* clean ehca_mr_t, without changing struct ib_mr and lock */
+ save_mr = *e_mr;
+ ehca_mr_deletenew(e_mr);
+
+ /* set some MR values */
+ e_mr->flags = save_mr.flags;
+ e_mr->fmr_page_size = save_mr.fmr_page_size;
+ e_mr->fmr_max_pages = save_mr.fmr_max_pages;
+ e_mr->fmr_max_maps = save_mr.fmr_max_maps;
+ e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
+
+ ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
+ e_pd, pginfo, lkey, rkey);
+ if (ret) {
+ u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
+ memcpy(&e_mr->flags, &(save_mr.flags),
+ sizeof(struct ehca_mr) - offset);
+ goto ehca_rereg_mr_exit0;
+ }
+ }
+
+ehca_rereg_mr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
+ "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
+ "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
+ "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
+ acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
+ rereg_1_hcall, rereg_3_hcall);
+ return ret;
+} /* end ehca_rereg_mr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_unmap_one_fmr(struct ehca_shca *shca,
+ struct ehca_mr *e_fmr)
+{
+ int ret = 0;
+ u64 h_ret;
+ int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
+ int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
+ struct ehca_pd *e_pd =
+ container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
+ struct ehca_mr save_fmr;
+ u32 tmp_lkey, tmp_rkey;
+ struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+ /* first check if reregistration hCall can be used for unmap */
+ if (e_fmr->fmr_max_pages > 512) {
+ rereg_1_hcall = 0;
+ rereg_3_hcall = 1;
+ }
+
+ if (rereg_1_hcall) {
+ /*
+ * note: after using rereg hcall with len=0,
+ * rereg hcall must be used again for registering pages
+ */
+ h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
+ 0, 0, e_pd->fw_pd, 0, &hipzout);
+ if (h_ret != H_SUCCESS) {
+ /*
+ * should not happen, because length checked above,
+ * FMRs are not shared and no MW bound to FMRs
+ */
+ ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
+ "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
+ "mr_hndl=%lx lkey=%x lkey_out=%x",
+ h_ret, e_fmr, shca->ipz_hca_handle.handle,
+ e_fmr->ipz_mr_handle.handle,
+ e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
+ rereg_3_hcall = 1;
+ } else {
+ /* successful reregistration */
+ e_fmr->start = NULL;
+ e_fmr->size = 0;
+ tmp_lkey = hipzout.lkey;
+ tmp_rkey = hipzout.rkey;
+ }
+ }
+
+ if (rereg_3_hcall) {
+ struct ehca_mr save_mr;
+
+ /* first free old FMR */
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+ "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
+ "lkey=%x",
+ h_ret, e_fmr, shca->ipz_hca_handle.handle,
+ e_fmr->ipz_mr_handle.handle,
+ e_fmr->ib.ib_fmr.lkey);
+ ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+ goto ehca_unmap_one_fmr_exit0;
+ }
+ /* clean ehca_mr_t, without changing lock */
+ save_fmr = *e_fmr;
+ ehca_mr_deletenew(e_fmr);
+
+ /* set some MR values */
+ e_fmr->flags = save_fmr.flags;
+ e_fmr->fmr_page_size = save_fmr.fmr_page_size;
+ e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
+ e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
+ e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
+ e_fmr->acl = save_fmr.acl;
+
+ pginfo.type = EHCA_MR_PGI_FMR;
+ pginfo.num_pages = 0;
+ pginfo.num_4k = 0;
+ ret = ehca_reg_mr(shca, e_fmr, NULL,
+ (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
+ e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
+ &tmp_rkey);
+ if (ret) {
+ u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
+ memcpy(&e_fmr->flags, &(save_mr.flags),
+ sizeof(struct ehca_mr) - offset);
+ goto ehca_unmap_one_fmr_exit0;
+ }
+ }
+
+ehca_unmap_one_fmr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
+ "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
+ ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
+ rereg_1_hcall, rereg_3_hcall);
+ return ret;
+} /* end ehca_unmap_one_fmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_reg_smr(struct ehca_shca *shca,
+ struct ehca_mr *e_origmr,
+ struct ehca_mr *e_newmr,
+ u64 *iova_start,
+ int acl,
+ struct ehca_pd *e_pd,
+ u32 *lkey, /*OUT*/
+ u32 *rkey) /*OUT*/
+{
+ int ret = 0;
+ u64 h_ret;
+ u32 hipz_acl;
+ struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+ ehca_mrmw_map_acl(acl, &hipz_acl);
+ ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+
+ h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
+ (u64)iova_start, hipz_acl, e_pd->fw_pd,
+ &hipzout);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
+ "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
+ "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+ h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
+ shca->ipz_hca_handle.handle,
+ e_origmr->ipz_mr_handle.handle,
+ e_origmr->ib.ib_mr.lkey);
+ ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
+ goto ehca_reg_smr_exit0;
+ }
+ /* successful registration */
+ e_newmr->num_pages = e_origmr->num_pages;
+ e_newmr->num_4k = e_origmr->num_4k;
+ e_newmr->start = iova_start;
+ e_newmr->size = e_origmr->size;
+ e_newmr->acl = acl;
+ e_newmr->ipz_mr_handle = hipzout.handle;
+ *lkey = hipzout.lkey;
+ *rkey = hipzout.rkey;
+ return 0;
+
+ehca_reg_smr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
+ "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
+ ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
+ return ret;
+} /* end ehca_reg_smr() */
+
+/*----------------------------------------------------------------------*/
+
+/* register internal max-MR to internal SHCA */
+int ehca_reg_internal_maxmr(
+ struct ehca_shca *shca,
+ struct ehca_pd *e_pd,
+ struct ehca_mr **e_maxmr) /*OUT*/
+{
+ int ret;
+ struct ehca_mr *e_mr;
+ u64 *iova_start;
+ u64 size_maxmr;
+ struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ struct ib_phys_buf ib_pbuf;
+ u32 num_pages_mr;
+ u32 num_pages_4k; /* 4k portion "pages" */
+
+ e_mr = ehca_mr_new();
+ if (!e_mr) {
+ ehca_err(&shca->ib_device, "out of memory");
+ ret = -ENOMEM;
+ goto ehca_reg_internal_maxmr_exit0;
+ }
+ e_mr->flags |= EHCA_MR_FLAG_MAXMR;
+
+ /* register internal max-MR on HCA */
+ size_maxmr = (u64)high_memory - PAGE_OFFSET;
+ iova_start = (u64*)KERNELBASE;
+ ib_pbuf.addr = 0;
+ ib_pbuf.size = size_maxmr;
+ num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
+ PAGE_SIZE - 1) / PAGE_SIZE);
+ num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
+ EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
+
+ pginfo.type = EHCA_MR_PGI_PHYS;
+ pginfo.num_pages = num_pages_mr;
+ pginfo.num_4k = num_pages_4k;
+ pginfo.num_phys_buf = 1;
+ pginfo.phys_buf_array = &ib_pbuf;
+
+ ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
+ &pginfo, &e_mr->ib.ib_mr.lkey,
+ &e_mr->ib.ib_mr.rkey);
+ if (ret) {
+ ehca_err(&shca->ib_device, "reg of internal max MR failed, "
+ "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
+ "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
+ num_pages_mr, num_pages_4k);
+ goto ehca_reg_internal_maxmr_exit1;
+ }
+
+ /* successful registration of all pages */
+ e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
+ e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
+ e_mr->ib.ib_mr.uobject = NULL;
+ atomic_inc(&(e_pd->ib_pd.usecnt));
+ atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
+ *e_maxmr = e_mr;
+ return 0;
+
+ehca_reg_internal_maxmr_exit1:
+ ehca_mr_delete(e_mr);
+ehca_reg_internal_maxmr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
+ ret, shca, e_pd, e_maxmr);
+ return ret;
+} /* end ehca_reg_internal_maxmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_reg_maxmr(struct ehca_shca *shca,
+ struct ehca_mr *e_newmr,
+ u64 *iova_start,
+ int acl,
+ struct ehca_pd *e_pd,
+ u32 *lkey,
+ u32 *rkey)
+{
+ u64 h_ret;
+ struct ehca_mr *e_origmr = shca->maxmr;
+ u32 hipz_acl;
+ struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+
+ ehca_mrmw_map_acl(acl, &hipz_acl);
+ ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+
+ h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
+ (u64)iova_start, hipz_acl, e_pd->fw_pd,
+ &hipzout);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
+ "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
+ h_ret, e_origmr, shca->ipz_hca_handle.handle,
+ e_origmr->ipz_mr_handle.handle,
+ e_origmr->ib.ib_mr.lkey);
+ return ehca_mrmw_map_hrc_reg_smr(h_ret);
+ }
+ /* successful registration */
+ e_newmr->num_pages = e_origmr->num_pages;
+ e_newmr->num_4k = e_origmr->num_4k;
+ e_newmr->start = iova_start;
+ e_newmr->size = e_origmr->size;
+ e_newmr->acl = acl;
+ e_newmr->ipz_mr_handle = hipzout.handle;
+ *lkey = hipzout.lkey;
+ *rkey = hipzout.rkey;
+ return 0;
+} /* end ehca_reg_maxmr() */
+
+/*----------------------------------------------------------------------*/
+
+int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
+{
+ int ret;
+ struct ehca_mr *e_maxmr;
+ struct ib_pd *ib_pd;
+
+ if (!shca->maxmr) {
+ ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
+ ret = -EINVAL;
+ goto ehca_dereg_internal_maxmr_exit0;
+ }
+
+ e_maxmr = shca->maxmr;
+ ib_pd = e_maxmr->ib.ib_mr.pd;
+ shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
+
+ ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
+ if (ret) {
+ ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
+ "ret=%x e_maxmr=%p shca=%p lkey=%x",
+ ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
+ shca->maxmr = e_maxmr;
+ goto ehca_dereg_internal_maxmr_exit0;
+ }
+
+ atomic_dec(&ib_pd->usecnt);
+
+ehca_dereg_internal_maxmr_exit0:
+ if (ret)
+ ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
+ ret, shca, shca->maxmr);
+ return ret;
+} /* end ehca_dereg_internal_maxmr() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * check physical buffer array of MR verbs for validness and
+ * calculates MR size
+ */
+int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ u64 *iova_start,
+ u64 *size)
+{
+ struct ib_phys_buf *pbuf = phys_buf_array;
+ u64 size_count = 0;
+ u32 i;
+
+ if (num_phys_buf == 0) {
+ ehca_gen_err("bad phys buf array len, num_phys_buf=0");
+ return -EINVAL;
+ }
+ /* check first buffer */
+ if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
+ ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
+ "pbuf->addr=%lx pbuf->size=%lx",
+ iova_start, pbuf->addr, pbuf->size);
+ return -EINVAL;
+ }
+ if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
+ (num_phys_buf > 1)) {
+ ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
+ "pbuf->size=%lx", pbuf->addr, pbuf->size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_phys_buf; i++) {
+ if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
+ ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
+ "pbuf->size=%lx",
+ i, pbuf->addr, pbuf->size);
+ return -EINVAL;
+ }
+ if (((i > 0) && /* not 1st */
+ (i < (num_phys_buf - 1)) && /* not last */
+ (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
+ ehca_gen_err("bad size, i=%x pbuf->size=%lx",
+ i, pbuf->size);
+ return -EINVAL;
+ }
+ size_count += pbuf->size;
+ pbuf++;
+ }
+
+ *size = size_count;
+ return 0;
+} /* end ehca_mr_chk_buf_and_calc_size() */
+
+/*----------------------------------------------------------------------*/
+
+/* check page list of map FMR verb for validness */
+int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
+ u64 *page_list,
+ int list_len)
+{
+ u32 i;
+ u64 *page;
+
+ if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
+ ehca_gen_err("bad list_len, list_len=%x "
+ "e_fmr->fmr_max_pages=%x fmr=%p",
+ list_len, e_fmr->fmr_max_pages, e_fmr);
+ return -EINVAL;
+ }
+
+ /* each page must be aligned */
+ page = page_list;
+ for (i = 0; i < list_len; i++) {
+ if (*page % e_fmr->fmr_page_size) {
+ ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
+ "fmr_page_size=%x", i, *page, page, e_fmr,
+ e_fmr->fmr_page_size);
+ return -EINVAL;
+ }
+ page++;
+ }
+
+ return 0;
+} /* end ehca_fmr_check_page_list() */
+
+/*----------------------------------------------------------------------*/
+
+/* setup page buffer from page info */
+int ehca_set_pagebuf(struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo,
+ u32 number,
+ u64 *kpage)
+{
+ int ret = 0;
+ struct ib_umem_chunk *prev_chunk;
+ struct ib_umem_chunk *chunk;
+ struct ib_phys_buf *pbuf;
+ u64 *fmrlist;
+ u64 num4k, pgaddr, offs4k;
+ u32 i = 0;
+ u32 j = 0;
+
+ if (pginfo->type == EHCA_MR_PGI_PHYS) {
+ /* loop over desired phys_buf_array entries */
+ while (i < number) {
+ pbuf = pginfo->phys_buf_array + pginfo->next_buf;
+ num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
+ EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
+ offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
+ while (pginfo->next_4k < offs4k + num4k) {
+ /* sanity check */
+ if ((pginfo->page_cnt >= pginfo->num_pages) ||
+ (pginfo->page_4k_cnt >= pginfo->num_4k)) {
+ ehca_gen_err("page_cnt >= num_pages, "
+ "page_cnt=%lx "
+ "num_pages=%lx "
+ "page_4k_cnt=%lx "
+ "num_4k=%lx i=%x",
+ pginfo->page_cnt,
+ pginfo->num_pages,
+ pginfo->page_4k_cnt,
+ pginfo->num_4k, i);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_exit0;
+ }
+ *kpage = phys_to_abs(
+ (pbuf->addr & EHCA_PAGEMASK)
+ + (pginfo->next_4k * EHCA_PAGESIZE));
+ if ( !(*kpage) && pbuf->addr ) {
+ ehca_gen_err("pbuf->addr=%lx "
+ "pbuf->size=%lx "
+ "next_4k=%lx", pbuf->addr,
+ pbuf->size,
+ pginfo->next_4k);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_exit0;
+ }
+ (pginfo->page_4k_cnt)++;
+ (pginfo->next_4k)++;
+ if (pginfo->next_4k %
+ (PAGE_SIZE / EHCA_PAGESIZE) == 0)
+ (pginfo->page_cnt)++;
+ kpage++;
+ i++;
+ if (i >= number) break;
+ }
+ if (pginfo->next_4k >= offs4k + num4k) {
+ (pginfo->next_buf)++;
+ pginfo->next_4k = 0;
+ }
+ }
+ } else if (pginfo->type == EHCA_MR_PGI_USER) {
+ /* loop over desired chunk entries */
+ chunk = pginfo->next_chunk;
+ prev_chunk = pginfo->next_chunk;
+ list_for_each_entry_continue(chunk,
+ (&(pginfo->region->chunk_list)),
+ list) {
+ for (i = pginfo->next_nmap; i < chunk->nmap; ) {
+ pgaddr = ( page_to_pfn(chunk->page_list[i].page)
+ << PAGE_SHIFT );
+ *kpage = phys_to_abs(pgaddr +
+ (pginfo->next_4k *
+ EHCA_PAGESIZE));
+ if ( !(*kpage) ) {
+ ehca_gen_err("pgaddr=%lx "
+ "chunk->page_list[i]=%lx "
+ "i=%x next_4k=%lx mr=%p",
+ pgaddr,
+ (u64)sg_dma_address(
+ &chunk->
+ page_list[i]),
+ i, pginfo->next_4k, e_mr);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_exit0;
+ }
+ (pginfo->page_4k_cnt)++;
+ (pginfo->next_4k)++;
+ kpage++;
+ if (pginfo->next_4k %
+ (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
+ (pginfo->page_cnt)++;
+ (pginfo->next_nmap)++;
+ pginfo->next_4k = 0;
+ i++;
+ }
+ j++;
+ if (j >= number) break;
+ }
+ if ((pginfo->next_nmap >= chunk->nmap) &&
+ (j >= number)) {
+ pginfo->next_nmap = 0;
+ prev_chunk = chunk;
+ break;
+ } else if (pginfo->next_nmap >= chunk->nmap) {
+ pginfo->next_nmap = 0;
+ prev_chunk = chunk;
+ } else if (j >= number)
+ break;
+ else
+ prev_chunk = chunk;
+ }
+ pginfo->next_chunk =
+ list_prepare_entry(prev_chunk,
+ (&(pginfo->region->chunk_list)),
+ list);
+ } else if (pginfo->type == EHCA_MR_PGI_FMR) {
+ /* loop over desired page_list entries */
+ fmrlist = pginfo->page_list + pginfo->next_listelem;
+ for (i = 0; i < number; i++) {
+ *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
+ pginfo->next_4k * EHCA_PAGESIZE);
+ if ( !(*kpage) ) {
+ ehca_gen_err("*fmrlist=%lx fmrlist=%p "
+ "next_listelem=%lx next_4k=%lx",
+ *fmrlist, fmrlist,
+ pginfo->next_listelem,
+ pginfo->next_4k);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_exit0;
+ }
+ (pginfo->page_4k_cnt)++;
+ (pginfo->next_4k)++;
+ kpage++;
+ if (pginfo->next_4k %
+ (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
+ (pginfo->page_cnt)++;
+ (pginfo->next_listelem)++;
+ fmrlist++;
+ pginfo->next_4k = 0;
+ }
+ }
+ } else {
+ ehca_gen_err("bad pginfo->type=%x", pginfo->type);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_exit0;
+ }
+
+ehca_set_pagebuf_exit0:
+ if (ret)
+ ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
+ "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
+ "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
+ "next_listelem=%lx region=%p next_chunk=%p "
+ "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
+ pginfo->num_pages, pginfo->num_4k,
+ pginfo->next_buf, pginfo->next_4k, number, kpage,
+ pginfo->page_cnt, pginfo->page_4k_cnt, i,
+ pginfo->next_listelem, pginfo->region,
+ pginfo->next_chunk, pginfo->next_nmap);
+ return ret;
+} /* end ehca_set_pagebuf() */
+
+/*----------------------------------------------------------------------*/
+
+/* setup 1 page from page info page buffer */
+int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo,
+ u64 *rpage)
+{
+ int ret = 0;
+ struct ib_phys_buf *tmp_pbuf;
+ u64 *fmrlist;
+ struct ib_umem_chunk *chunk;
+ struct ib_umem_chunk *prev_chunk;
+ u64 pgaddr, num4k, offs4k;
+
+ if (pginfo->type == EHCA_MR_PGI_PHYS) {
+ /* sanity check */
+ if ((pginfo->page_cnt >= pginfo->num_pages) ||
+ (pginfo->page_4k_cnt >= pginfo->num_4k)) {
+ ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
+ "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
+ pginfo->page_cnt, pginfo->num_pages,
+ pginfo->page_4k_cnt, pginfo->num_4k);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_1_exit0;
+ }
+ tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
+ num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
+ EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
+ offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
+ *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
+ (pginfo->next_4k * EHCA_PAGESIZE));
+ if ( !(*rpage) && tmp_pbuf->addr ) {
+ ehca_gen_err("tmp_pbuf->addr=%lx"
+ " tmp_pbuf->size=%lx next_4k=%lx",
+ tmp_pbuf->addr, tmp_pbuf->size,
+ pginfo->next_4k);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_1_exit0;
+ }
+ (pginfo->page_4k_cnt)++;
+ (pginfo->next_4k)++;
+ if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
+ (pginfo->page_cnt)++;
+ if (pginfo->next_4k >= offs4k + num4k) {
+ (pginfo->next_buf)++;
+ pginfo->next_4k = 0;
+ }
+ } else if (pginfo->type == EHCA_MR_PGI_USER) {
+ chunk = pginfo->next_chunk;
+ prev_chunk = pginfo->next_chunk;
+ list_for_each_entry_continue(chunk,
+ (&(pginfo->region->chunk_list)),
+ list) {
+ pgaddr = ( page_to_pfn(chunk->page_list[
+ pginfo->next_nmap].page)
+ << PAGE_SHIFT);
+ *rpage = phys_to_abs(pgaddr +
+ (pginfo->next_4k * EHCA_PAGESIZE));
+ if ( !(*rpage) ) {
+ ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
+ " next_nmap=%lx next_4k=%lx mr=%p",
+ pgaddr, (u64)sg_dma_address(
+ &chunk->page_list[
+ pginfo->
+ next_nmap]),
+ pginfo->next_nmap, pginfo->next_4k,
+ e_mr);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_1_exit0;
+ }
+ (pginfo->page_4k_cnt)++;
+ (pginfo->next_4k)++;
+ if (pginfo->next_4k %
+ (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
+ (pginfo->page_cnt)++;
+ (pginfo->next_nmap)++;
+ pginfo->next_4k = 0;
+ }
+ if (pginfo->next_nmap >= chunk->nmap) {
+ pginfo->next_nmap = 0;
+ prev_chunk = chunk;
+ }
+ break;
+ }
+ pginfo->next_chunk =
+ list_prepare_entry(prev_chunk,
+ (&(pginfo->region->chunk_list)),
+ list);
+ } else if (pginfo->type == EHCA_MR_PGI_FMR) {
+ fmrlist = pginfo->page_list + pginfo->next_listelem;
+ *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
+ pginfo->next_4k * EHCA_PAGESIZE);
+ if ( !(*rpage) ) {
+ ehca_gen_err("*fmrlist=%lx fmrlist=%p "
+ "next_listelem=%lx next_4k=%lx",
+ *fmrlist, fmrlist, pginfo->next_listelem,
+ pginfo->next_4k);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_1_exit0;
+ }
+ (pginfo->page_4k_cnt)++;
+ (pginfo->next_4k)++;
+ if (pginfo->next_4k %
+ (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
+ (pginfo->page_cnt)++;
+ (pginfo->next_listelem)++;
+ pginfo->next_4k = 0;
+ }
+ } else {
+ ehca_gen_err("bad pginfo->type=%x", pginfo->type);
+ ret = -EFAULT;
+ goto ehca_set_pagebuf_1_exit0;
+ }
+
+ehca_set_pagebuf_1_exit0:
+ if (ret)
+ ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
+ "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
+ "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
+ "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
+ pginfo, pginfo->type, pginfo->num_pages,
+ pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
+ rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
+ pginfo->next_listelem, pginfo->region,
+ pginfo->next_chunk, pginfo->next_nmap);
+ return ret;
+} /* end ehca_set_pagebuf_1() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * check MR if it is a max-MR, i.e. uses whole memory
+ * in case it's a max-MR 1 is returned, else 0
+ */
+int ehca_mr_is_maxmr(u64 size,
+ u64 *iova_start)
+{
+ /* a MR is treated as max-MR only if it fits following: */
+ if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
+ (iova_start == (void*)KERNELBASE)) {
+ ehca_gen_dbg("this is a max-MR");
+ return 1;
+ } else
+ return 0;
+} /* end ehca_mr_is_maxmr() */
+
+/*----------------------------------------------------------------------*/
+
+/* map access control for MR/MW. This routine is used for MR and MW. */
+void ehca_mrmw_map_acl(int ib_acl,
+ u32 *hipz_acl)
+{
+ *hipz_acl = 0;
+ if (ib_acl & IB_ACCESS_REMOTE_READ)
+ *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
+ if (ib_acl & IB_ACCESS_REMOTE_WRITE)
+ *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
+ if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
+ *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
+ if (ib_acl & IB_ACCESS_LOCAL_WRITE)
+ *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
+ if (ib_acl & IB_ACCESS_MW_BIND)
+ *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
+} /* end ehca_mrmw_map_acl() */
+
+/*----------------------------------------------------------------------*/
+
+/* sets page size in hipz access control for MR/MW. */
+void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
+{
+ return; /* HCA supports only 4k */
+} /* end ehca_mrmw_set_pgsize_hipz_acl() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * reverse map access control for MR/MW.
+ * This routine is used for MR and MW.
+ */
+void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
+ int *ib_acl) /*OUT*/
+{
+ *ib_acl = 0;
+ if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
+ *ib_acl |= IB_ACCESS_REMOTE_READ;
+ if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
+ *ib_acl |= IB_ACCESS_REMOTE_WRITE;
+ if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
+ *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
+ if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
+ *ib_acl |= IB_ACCESS_LOCAL_WRITE;
+ if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
+ *ib_acl |= IB_ACCESS_MW_BIND;
+} /* end ehca_mrmw_reverse_map_acl() */
+
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for MR/MW allocations
+ * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
+ */
+int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
+{
+ switch (hipz_rc) {
+ case H_SUCCESS: /* successful completion */
+ return 0;
+ case H_ADAPTER_PARM: /* invalid adapter handle */
+ case H_RT_PARM: /* invalid resource type */
+ case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
+ case H_MLENGTH_PARM: /* invalid memory length */
+ case H_MEM_ACCESS_PARM: /* invalid access controls */
+ case H_CONSTRAINED: /* resource constraint */
+ return -EINVAL;
+ case H_BUSY: /* long busy */
+ return -EBUSY;
+ default:
+ return -EINVAL;
+ }
+} /* end ehca_mrmw_map_hrc_alloc() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for MR register rpage
+ * Used for hipz_h_register_rpage_mr at registering last page
+ */
+int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
+{
+ switch (hipz_rc) {
+ case H_SUCCESS: /* registration complete */
+ return 0;
+ case H_PAGE_REGISTERED: /* page registered */
+ case H_ADAPTER_PARM: /* invalid adapter handle */
+ case H_RH_PARM: /* invalid resource handle */
+/* case H_QT_PARM: invalid queue type */
+ case H_PARAMETER: /*
+ * invalid logical address,
+ * or count zero or greater 512
+ */
+ case H_TABLE_FULL: /* page table full */
+ case H_HARDWARE: /* HCA not operational */
+ return -EINVAL;
+ case H_BUSY: /* long busy */
+ return -EBUSY;
+ default:
+ return -EINVAL;
+ }
+} /* end ehca_mrmw_map_hrc_rrpg_last() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for MR register rpage
+ * Used for hipz_h_register_rpage_mr at registering one page, but not last page
+ */
+int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
+{
+ switch (hipz_rc) {
+ case H_PAGE_REGISTERED: /* page registered */
+ return 0;
+ case H_SUCCESS: /* registration complete */
+ case H_ADAPTER_PARM: /* invalid adapter handle */
+ case H_RH_PARM: /* invalid resource handle */
+/* case H_QT_PARM: invalid queue type */
+ case H_PARAMETER: /*
+ * invalid logical address,
+ * or count zero or greater 512
+ */
+ case H_TABLE_FULL: /* page table full */
+ case H_HARDWARE: /* HCA not operational */
+ return -EINVAL;
+ case H_BUSY: /* long busy */
+ return -EBUSY;
+ default:
+ return -EINVAL;
+ }
+} /* end ehca_mrmw_map_hrc_rrpg_notlast() */
+
+/*----------------------------------------------------------------------*/
+
+/* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
+int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
+{
+ switch (hipz_rc) {
+ case H_SUCCESS: /* successful completion */
+ return 0;
+ case H_ADAPTER_PARM: /* invalid adapter handle */
+ case H_RH_PARM: /* invalid resource handle */
+ return -EINVAL;
+ case H_BUSY: /* long busy */
+ return -EBUSY;
+ default:
+ return -EINVAL;
+ }
+} /* end ehca_mrmw_map_hrc_query_mr() */
+
+/*----------------------------------------------------------------------*/
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for freeing MR resource
+ * Used for hipz_h_free_resource_mr
+ */
+int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
+{
+ switch (hipz_rc) {
+ case H_SUCCESS: /* resource freed */
+ return 0;
+ case H_ADAPTER_PARM: /* invalid adapter handle */
+ case H_RH_PARM: /* invalid resource handle */
+ case H_R_STATE: /* invalid resource state */
+ case H_HARDWARE: /* HCA not operational */
+ return -EINVAL;
+ case H_RESOURCE: /* Resource in use */
+ case H_BUSY: /* long busy */
+ return -EBUSY;
+ default:
+ return -EINVAL;
+ }
+} /* end ehca_mrmw_map_hrc_free_mr() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for freeing MW resource
+ * Used for hipz_h_free_resource_mw
+ */
+int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
+{
+ switch (hipz_rc) {
+ case H_SUCCESS: /* resource freed */
+ return 0;
+ case H_ADAPTER_PARM: /* invalid adapter handle */
+ case H_RH_PARM: /* invalid resource handle */
+ case H_R_STATE: /* invalid resource state */
+ case H_HARDWARE: /* HCA not operational */
+ return -EINVAL;
+ case H_RESOURCE: /* Resource in use */
+ case H_BUSY: /* long busy */
+ return -EBUSY;
+ default:
+ return -EINVAL;
+ }
+} /* end ehca_mrmw_map_hrc_free_mw() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * map HIPZ rc to IB retcodes for SMR registrations
+ * Used for hipz_h_register_smr.
+ */
+int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
+{
+ switch (hipz_rc) {
+ case H_SUCCESS: /* successful completion */
+ return 0;
+ case H_ADAPTER_PARM: /* invalid adapter handle */
+ case H_RH_PARM: /* invalid resource handle */
+ case H_MEM_PARM: /* invalid MR virtual address */
+ case H_MEM_ACCESS_PARM: /* invalid access controls */
+ case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
+ return -EINVAL;
+ case H_BUSY: /* long busy */
+ return -EBUSY;
+ default:
+ return -EINVAL;
+ }
+} /* end ehca_mrmw_map_hrc_reg_smr() */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * MR destructor and constructor
+ * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
+ * except struct ib_mr and spinlock
+ */
+void ehca_mr_deletenew(struct ehca_mr *mr)
+{
+ mr->flags = 0;
+ mr->num_pages = 0;
+ mr->num_4k = 0;
+ mr->acl = 0;
+ mr->start = NULL;
+ mr->fmr_page_size = 0;
+ mr->fmr_max_pages = 0;
+ mr->fmr_max_maps = 0;
+ mr->fmr_map_cnt = 0;
+ memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
+ memset(&mr->galpas, 0, sizeof(mr->galpas));
+ mr->nr_of_pages = 0;
+ mr->pagearray = NULL;
+} /* end ehca_mr_deletenew() */
+
+int ehca_init_mrmw_cache(void)
+{
+ mr_cache = kmem_cache_create("ehca_cache_mr",
+ sizeof(struct ehca_mr), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!mr_cache)
+ return -ENOMEM;
+ mw_cache = kmem_cache_create("ehca_cache_mw",
+ sizeof(struct ehca_mw), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!mw_cache) {
+ kmem_cache_destroy(mr_cache);
+ mr_cache = NULL;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void ehca_cleanup_mrmw_cache(void)
+{
+ if (mr_cache)
+ kmem_cache_destroy(mr_cache);
+ if (mw_cache)
+ kmem_cache_destroy(mw_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
new file mode 100644
index 00000000000..d936e40a574
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h
@@ -0,0 +1,140 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * MR/MW declarations and inline functions
+ *
+ * Authors: Dietmar Decker <ddecker@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _EHCA_MRMW_H_
+#define _EHCA_MRMW_H_
+
+int ehca_reg_mr(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ u64 *iova_start,
+ u64 size,
+ int acl,
+ struct ehca_pd *e_pd,
+ struct ehca_mr_pginfo *pginfo,
+ u32 *lkey,
+ u32 *rkey);
+
+int ehca_reg_mr_rpages(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo);
+
+int ehca_rereg_mr(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ u64 *iova_start,
+ u64 size,
+ int mr_access_flags,
+ struct ehca_pd *e_pd,
+ struct ehca_mr_pginfo *pginfo,
+ u32 *lkey,
+ u32 *rkey);
+
+int ehca_unmap_one_fmr(struct ehca_shca *shca,
+ struct ehca_mr *e_fmr);
+
+int ehca_reg_smr(struct ehca_shca *shca,
+ struct ehca_mr *e_origmr,
+ struct ehca_mr *e_newmr,
+ u64 *iova_start,
+ int acl,
+ struct ehca_pd *e_pd,
+ u32 *lkey,
+ u32 *rkey);
+
+int ehca_reg_internal_maxmr(struct ehca_shca *shca,
+ struct ehca_pd *e_pd,
+ struct ehca_mr **maxmr);
+
+int ehca_reg_maxmr(struct ehca_shca *shca,
+ struct ehca_mr *e_newmr,
+ u64 *iova_start,
+ int acl,
+ struct ehca_pd *e_pd,
+ u32 *lkey,
+ u32 *rkey);
+
+int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
+
+int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
+ int num_phys_buf,
+ u64 *iova_start,
+ u64 *size);
+
+int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
+ u64 *page_list,
+ int list_len);
+
+int ehca_set_pagebuf(struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo,
+ u32 number,
+ u64 *kpage);
+
+int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo,
+ u64 *rpage);
+
+int ehca_mr_is_maxmr(u64 size,
+ u64 *iova_start);
+
+void ehca_mrmw_map_acl(int ib_acl,
+ u32 *hipz_acl);
+
+void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl);
+
+void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
+ int *ib_acl);
+
+int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc);
+
+int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc);
+
+void ehca_mr_deletenew(struct ehca_mr *mr);
+
+#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
new file mode 100644
index 00000000000..2c3cdc6f7b3
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -0,0 +1,114 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * PD functions
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_tools.h"
+#include "ehca_iverbs.h"
+
+static struct kmem_cache *pd_cache;
+
+struct ib_pd *ehca_alloc_pd(struct ib_device *device,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ struct ehca_pd *pd;
+
+ pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
+ if (!pd) {
+ ehca_err(device, "device=%p context=%p out of memory",
+ device, context);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(pd, 0, sizeof(struct ehca_pd));
+ pd->ownpid = current->tgid;
+
+ /*
+ * Kernel PD: when device = -1, 0
+ * User PD: when context != -1
+ */
+ if (!context) {
+ /*
+ * Kernel PDs after init reuses always
+ * the one created in ehca_shca_reopen()
+ */
+ struct ehca_shca *shca = container_of(device, struct ehca_shca,
+ ib_device);
+ pd->fw_pd.value = shca->pd->fw_pd.value;
+ } else
+ pd->fw_pd.value = (u64)pd;
+
+ return &pd->ib_pd;
+}
+
+int ehca_dealloc_pd(struct ib_pd *pd)
+{
+ u32 cur_pid = current->tgid;
+ struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ kmem_cache_free(pd_cache,
+ container_of(pd, struct ehca_pd, ib_pd));
+
+ return 0;
+}
+
+int ehca_init_pd_cache(void)
+{
+ pd_cache = kmem_cache_create("ehca_cache_pd",
+ sizeof(struct ehca_pd), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!pd_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_pd_cache(void)
+{
+ if (pd_cache)
+ kmem_cache_destroy(pd_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
new file mode 100644
index 00000000000..8707d297ce4
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_qes.h
@@ -0,0 +1,259 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Hardware request structures
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _EHCA_QES_H_
+#define _EHCA_QES_H_
+
+#include "ehca_tools.h"
+
+/* virtual scatter gather entry to specify remote adresses with length */
+struct ehca_vsgentry {
+ u64 vaddr;
+ u32 lkey;
+ u32 length;
+};
+
+#define GRH_FLAG_MASK EHCA_BMASK_IBM(7,7)
+#define GRH_IPVERSION_MASK EHCA_BMASK_IBM(0,3)
+#define GRH_TCLASS_MASK EHCA_BMASK_IBM(4,12)
+#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13,31)
+#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32,47)
+#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48,55)
+#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56,63)
+
+/*
+ * Unreliable Datagram Address Vector Format
+ * see IBTA Vol1 chapter 8.3 Global Routing Header
+ */
+struct ehca_ud_av {
+ u8 sl;
+ u8 lnh;
+ u16 dlid;
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 slid_path_bits;
+ u8 reserved4;
+ u8 ipd;
+ u8 reserved5;
+ u8 pmtu;
+ u32 reserved6;
+ u64 reserved7;
+ union {
+ struct {
+ u64 word_0; /* always set to 6 */
+ /*should be 0x1B for IB transport */
+ u64 word_1;
+ u64 word_2;
+ u64 word_3;
+ u64 word_4;
+ } grh;
+ struct {
+ u32 wd_0;
+ u32 wd_1;
+ /* DWord_1 --> SGID */
+
+ u32 sgid_wd3;
+ u32 sgid_wd2;
+
+ u32 sgid_wd1;
+ u32 sgid_wd0;
+ /* DWord_3 --> DGID */
+
+ u32 dgid_wd3;
+ u32 dgid_wd2;
+
+ u32 dgid_wd1;
+ u32 dgid_wd0;
+ } grh_l;
+ };
+};
+
+/* maximum number of sg entries allowed in a WQE */
+#define MAX_WQE_SG_ENTRIES 252
+
+#define WQE_OPTYPE_SEND 0x80
+#define WQE_OPTYPE_RDMAREAD 0x40
+#define WQE_OPTYPE_RDMAWRITE 0x20
+#define WQE_OPTYPE_CMPSWAP 0x10
+#define WQE_OPTYPE_FETCHADD 0x08
+#define WQE_OPTYPE_BIND 0x04
+
+#define WQE_WRFLAG_REQ_SIGNAL_COM 0x80
+#define WQE_WRFLAG_FENCE 0x40
+#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
+#define WQE_WRFLAG_SOLIC_EVENT 0x10
+
+#define WQEF_CACHE_HINT 0x80
+#define WQEF_CACHE_HINT_RD_WR 0x40
+#define WQEF_TIMED_WQE 0x20
+#define WQEF_PURGE 0x08
+#define WQEF_HIGH_NIBBLE 0xF0
+
+#define MW_BIND_ACCESSCTRL_R_WRITE 0x40
+#define MW_BIND_ACCESSCTRL_R_READ 0x20
+#define MW_BIND_ACCESSCTRL_R_ATOMIC 0x10
+
+struct ehca_wqe {
+ u64 work_request_id;
+ u8 optype;
+ u8 wr_flag;
+ u16 pkeyi;
+ u8 wqef;
+ u8 nr_of_data_seg;
+ u16 wqe_provided_slid;
+ u32 destination_qp_number;
+ u32 resync_psn_sqp;
+ u32 local_ee_context_qkey;
+ u32 immediate_data;
+ union {
+ struct {
+ u64 remote_virtual_adress;
+ u32 rkey;
+ u32 reserved;
+ u64 atomic_1st_op_dma_len;
+ u64 atomic_2nd_op;
+ struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
+
+ } nud;
+ struct {
+ u64 ehca_ud_av_ptr;
+ u64 reserved1;
+ u64 reserved2;
+ u64 reserved3;
+ struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
+ } ud_avp;
+ struct {
+ struct ehca_ud_av ud_av;
+ struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
+ 2];
+ } ud_av;
+ struct {
+ u64 reserved0;
+ u64 reserved1;
+ u64 reserved2;
+ u64 reserved3;
+ struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
+ } all_rcv;
+
+ struct {
+ u64 reserved;
+ u32 rkey;
+ u32 old_rkey;
+ u64 reserved1;
+ u64 reserved2;
+ u64 virtual_address;
+ u32 reserved3;
+ u32 length;
+ u32 reserved4;
+ u16 reserved5;
+ u8 reserved6;
+ u8 lr_ctl;
+ u32 lkey;
+ u32 reserved7;
+ u64 reserved8;
+ u64 reserved9;
+ u64 reserved10;
+ u64 reserved11;
+ } bind;
+ struct {
+ u64 reserved12;
+ u64 reserved13;
+ u32 size;
+ u32 start;
+ } inline_data;
+ } u;
+
+};
+
+#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0)
+#define WC_IMM_DATA EHCA_BMASK_IBM(1,1)
+#define WC_GRH_PRESENT EHCA_BMASK_IBM(2,2)
+#define WC_SE_BIT EHCA_BMASK_IBM(3,3)
+#define WC_STATUS_ERROR_BIT 0x80000000
+#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
+#define WC_STATUS_PURGE_BIT 0x10
+
+struct ehca_cqe {
+ u64 work_request_id;
+ u8 optype;
+ u8 w_completion_flags;
+ u16 reserved1;
+ u32 nr_bytes_transferred;
+ u32 immediate_data;
+ u32 local_qp_number;
+ u8 freed_resource_count;
+ u8 service_level;
+ u16 wqe_count;
+ u32 qp_token;
+ u32 qkey_ee_token;
+ u32 remote_qp_number;
+ u16 dlid;
+ u16 rlid;
+ u16 reserved2;
+ u16 pkey_index;
+ u32 cqe_timestamp;
+ u32 wqe_timestamp;
+ u8 wqe_timestamp_valid;
+ u8 reserved3;
+ u8 reserved4;
+ u8 cqe_flags;
+ u32 status;
+};
+
+struct ehca_eqe {
+ u64 entry;
+};
+
+struct ehca_mrte {
+ u64 starting_va;
+ u64 length; /* length of memory region in bytes*/
+ u32 pd;
+ u8 key_instance;
+ u8 pagesize;
+ u8 mr_control;
+ u8 local_remote_access_ctrl;
+ u8 reserved[0x20 - 0x18];
+ u64 at_pointer[4];
+};
+#endif /*_EHCA_QES_H_*/
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
new file mode 100644
index 00000000000..4394123cdbd
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -0,0 +1,1507 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * QP functions
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/current.h>
+
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+#include "hipz_fns.h"
+
+static struct kmem_cache *qp_cache;
+
+/*
+ * attributes not supported by query qp
+ */
+#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
+ IB_QP_MAX_QP_RD_ATOMIC | \
+ IB_QP_ACCESS_FLAGS | \
+ IB_QP_EN_SQD_ASYNC_NOTIFY)
+
+/*
+ * ehca (internal) qp state values
+ */
+enum ehca_qp_state {
+ EHCA_QPS_RESET = 1,
+ EHCA_QPS_INIT = 2,
+ EHCA_QPS_RTR = 3,
+ EHCA_QPS_RTS = 5,
+ EHCA_QPS_SQD = 6,
+ EHCA_QPS_SQE = 8,
+ EHCA_QPS_ERR = 128
+};
+
+/*
+ * qp state transitions as defined by IB Arch Rel 1.1 page 431
+ */
+enum ib_qp_statetrans {
+ IB_QPST_ANY2RESET,
+ IB_QPST_ANY2ERR,
+ IB_QPST_RESET2INIT,
+ IB_QPST_INIT2RTR,
+ IB_QPST_INIT2INIT,
+ IB_QPST_RTR2RTS,
+ IB_QPST_RTS2SQD,
+ IB_QPST_RTS2RTS,
+ IB_QPST_SQD2RTS,
+ IB_QPST_SQE2RTS,
+ IB_QPST_SQD2SQD,
+ IB_QPST_MAX /* nr of transitions, this must be last!!! */
+};
+
+/*
+ * ib2ehca_qp_state maps IB to ehca qp_state
+ * returns ehca qp state corresponding to given ib qp state
+ */
+static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
+{
+ switch (ib_qp_state) {
+ case IB_QPS_RESET:
+ return EHCA_QPS_RESET;
+ case IB_QPS_INIT:
+ return EHCA_QPS_INIT;
+ case IB_QPS_RTR:
+ return EHCA_QPS_RTR;
+ case IB_QPS_RTS:
+ return EHCA_QPS_RTS;
+ case IB_QPS_SQD:
+ return EHCA_QPS_SQD;
+ case IB_QPS_SQE:
+ return EHCA_QPS_SQE;
+ case IB_QPS_ERR:
+ return EHCA_QPS_ERR;
+ default:
+ ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
+ return -EINVAL;
+ }
+}
+
+/*
+ * ehca2ib_qp_state maps ehca to IB qp_state
+ * returns ib qp state corresponding to given ehca qp state
+ */
+static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
+ ehca_qp_state)
+{
+ switch (ehca_qp_state) {
+ case EHCA_QPS_RESET:
+ return IB_QPS_RESET;
+ case EHCA_QPS_INIT:
+ return IB_QPS_INIT;
+ case EHCA_QPS_RTR:
+ return IB_QPS_RTR;
+ case EHCA_QPS_RTS:
+ return IB_QPS_RTS;
+ case EHCA_QPS_SQD:
+ return IB_QPS_SQD;
+ case EHCA_QPS_SQE:
+ return IB_QPS_SQE;
+ case EHCA_QPS_ERR:
+ return IB_QPS_ERR;
+ default:
+ ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
+ return -EINVAL;
+ }
+}
+
+/*
+ * ehca_qp_type used as index for req_attr and opt_attr of
+ * struct ehca_modqp_statetrans
+ */
+enum ehca_qp_type {
+ QPT_RC = 0,
+ QPT_UC = 1,
+ QPT_UD = 2,
+ QPT_SQP = 3,
+ QPT_MAX
+};
+
+/*
+ * ib2ehcaqptype maps Ib to ehca qp_type
+ * returns ehca qp type corresponding to ib qp type
+ */
+static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
+{
+ switch (ibqptype) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ return QPT_SQP;
+ case IB_QPT_RC:
+ return QPT_RC;
+ case IB_QPT_UC:
+ return QPT_UC;
+ case IB_QPT_UD:
+ return QPT_UD;
+ default:
+ ehca_gen_err("Invalid ibqptype=%x", ibqptype);
+ return -EINVAL;
+ }
+}
+
+static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
+ int ib_tostate)
+{
+ int index = -EINVAL;
+ switch (ib_tostate) {
+ case IB_QPS_RESET:
+ index = IB_QPST_ANY2RESET;
+ break;
+ case IB_QPS_INIT:
+ switch (ib_fromstate) {
+ case IB_QPS_RESET:
+ index = IB_QPST_RESET2INIT;
+ break;
+ case IB_QPS_INIT:
+ index = IB_QPST_INIT2INIT;
+ break;
+ }
+ break;
+ case IB_QPS_RTR:
+ if (ib_fromstate == IB_QPS_INIT)
+ index = IB_QPST_INIT2RTR;
+ break;
+ case IB_QPS_RTS:
+ switch (ib_fromstate) {
+ case IB_QPS_RTR:
+ index = IB_QPST_RTR2RTS;
+ break;
+ case IB_QPS_RTS:
+ index = IB_QPST_RTS2RTS;
+ break;
+ case IB_QPS_SQD:
+ index = IB_QPST_SQD2RTS;
+ break;
+ case IB_QPS_SQE:
+ index = IB_QPST_SQE2RTS;
+ break;
+ }
+ break;
+ case IB_QPS_SQD:
+ if (ib_fromstate == IB_QPS_RTS)
+ index = IB_QPST_RTS2SQD;
+ break;
+ case IB_QPS_SQE:
+ break;
+ case IB_QPS_ERR:
+ index = IB_QPST_ANY2ERR;
+ break;
+ default:
+ break;
+ }
+ return index;
+}
+
+enum ehca_service_type {
+ ST_RC = 0,
+ ST_UC = 1,
+ ST_RD = 2,
+ ST_UD = 3
+};
+
+/*
+ * ibqptype2servicetype returns hcp service type corresponding to given
+ * ib qp type used by create_qp()
+ */
+static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
+{
+ switch (ibqptype) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ return ST_UD;
+ case IB_QPT_RC:
+ return ST_RC;
+ case IB_QPT_UC:
+ return ST_UC;
+ case IB_QPT_UD:
+ return ST_UD;
+ case IB_QPT_RAW_IPV6:
+ return -EINVAL;
+ case IB_QPT_RAW_ETY:
+ return -EINVAL;
+ default:
+ ehca_gen_err("Invalid ibqptype=%x", ibqptype);
+ return -EINVAL;
+ }
+}
+
+/*
+ * init_qp_queues initializes/constructs r/squeue and registers queue pages.
+ */
+static inline int init_qp_queues(struct ehca_shca *shca,
+ struct ehca_qp *my_qp,
+ int nr_sq_pages,
+ int nr_rq_pages,
+ int swqe_size,
+ int rwqe_size,
+ int nr_send_sges, int nr_receive_sges)
+{
+ int ret, cnt, ipz_rc;
+ void *vpage;
+ u64 rpage, h_ret;
+ struct ib_device *ib_dev = &shca->ib_device;
+ struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
+
+ ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
+ nr_sq_pages,
+ EHCA_PAGESIZE, swqe_size, nr_send_sges);
+ if (!ipz_rc) {
+ ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x",
+ ipz_rc);
+ return -EBUSY;
+ }
+
+ ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
+ nr_rq_pages,
+ EHCA_PAGESIZE, rwqe_size, nr_receive_sges);
+ if (!ipz_rc) {
+ ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
+ ipz_rc);
+ ret = -EBUSY;
+ goto init_qp_queues0;
+ }
+ /* register SQ pages */
+ for (cnt = 0; cnt < nr_sq_pages; cnt++) {
+ vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
+ if (!vpage) {
+ ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
+ "failed p_vpage= %p", vpage);
+ ret = -EINVAL;
+ goto init_qp_queues1;
+ }
+ rpage = virt_to_abs(vpage);
+
+ h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf, 0, 0,
+ rpage, 1,
+ my_qp->galpas.kernel);
+ if (h_ret < H_SUCCESS) {
+ ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
+ " failed rc=%lx", h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto init_qp_queues1;
+ }
+ }
+
+ ipz_qeit_reset(&my_qp->ipz_squeue);
+
+ /* register RQ pages */
+ for (cnt = 0; cnt < nr_rq_pages; cnt++) {
+ vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
+ if (!vpage) {
+ ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
+ "failed p_vpage = %p", vpage);
+ ret = -EINVAL;
+ goto init_qp_queues1;
+ }
+
+ rpage = virt_to_abs(vpage);
+
+ h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf, 0, 1,
+ rpage, 1,my_qp->galpas.kernel);
+ if (h_ret < H_SUCCESS) {
+ ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
+ "rc=%lx", h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto init_qp_queues1;
+ }
+ if (cnt == (nr_rq_pages - 1)) { /* last page! */
+ if (h_ret != H_SUCCESS) {
+ ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
+ "h_ret= %lx ", h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto init_qp_queues1;
+ }
+ vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
+ if (vpage) {
+ ehca_err(ib_dev, "ipz_qpageit_get_inc() "
+ "should not succeed vpage=%p", vpage);
+ ret = -EINVAL;
+ goto init_qp_queues1;
+ }
+ } else {
+ if (h_ret != H_PAGE_REGISTERED) {
+ ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
+ "h_ret= %lx ", h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto init_qp_queues1;
+ }
+ }
+ }
+
+ ipz_qeit_reset(&my_qp->ipz_rqueue);
+
+ return 0;
+
+init_qp_queues1:
+ ipz_queue_dtor(&my_qp->ipz_rqueue);
+init_qp_queues0:
+ ipz_queue_dtor(&my_qp->ipz_squeue);
+ return ret;
+}
+
+struct ib_qp *ehca_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
+ static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
+ struct ehca_qp *my_qp;
+ struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
+ ib_device);
+ struct ib_ucontext *context = NULL;
+ u64 h_ret;
+ int max_send_sge, max_recv_sge, ret;
+
+ /* h_call's out parameters */
+ struct ehca_alloc_qp_parms parms;
+ u32 swqe_size = 0, rwqe_size = 0;
+ u8 daqp_completion, isdaqp;
+ unsigned long flags;
+
+ if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
+ init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
+ ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
+ init_attr->sq_sig_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* save daqp completion bits */
+ daqp_completion = init_attr->qp_type & 0x60;
+ /* save daqp bit */
+ isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0;
+ init_attr->qp_type = init_attr->qp_type & 0x1F;
+
+ if (init_attr->qp_type != IB_QPT_UD &&
+ init_attr->qp_type != IB_QPT_SMI &&
+ init_attr->qp_type != IB_QPT_GSI &&
+ init_attr->qp_type != IB_QPT_UC &&
+ init_attr->qp_type != IB_QPT_RC) {
+ ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+ if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
+ && isdaqp) {
+ ehca_err(pd->device, "unsupported LL QP Type=%x",
+ init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ } else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
+ (init_attr->cap.max_send_wr > 255 ||
+ init_attr->cap.max_recv_wr > 255 )) {
+ ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
+ "or max_rq_wr=%x for QP Type=%x",
+ init_attr->cap.max_send_wr,
+ init_attr->cap.max_recv_wr,init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ } else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
+ init_attr->cap.max_send_wr > 255) {
+ ehca_err(pd->device,
+ "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
+ init_attr->cap.max_send_wr, init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (pd->uobject && udata)
+ context = pd->uobject->context;
+
+ my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
+ if (!my_qp) {
+ ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(my_qp, 0, sizeof(struct ehca_qp));
+ memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
+ spin_lock_init(&my_qp->spinlock_s);
+ spin_lock_init(&my_qp->spinlock_r);
+
+ my_qp->recv_cq =
+ container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
+ my_qp->send_cq =
+ container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
+
+ my_qp->init_attr = *init_attr;
+
+ do {
+ if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ ehca_err(pd->device, "Can't reserve idr resources.");
+ goto create_qp_exit0;
+ }
+
+ spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
+ spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+ } while (ret == -EAGAIN);
+
+ if (ret) {
+ ret = -ENOMEM;
+ ehca_err(pd->device, "Can't allocate new idr entry.");
+ goto create_qp_exit0;
+ }
+
+ parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
+ if (parms.servicetype < 0) {
+ ret = -EINVAL;
+ ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
+ goto create_qp_exit0;
+ }
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ parms.sigtype = HCALL_SIGT_EVERY;
+ else
+ parms.sigtype = HCALL_SIGT_BY_WQE;
+
+ /* UD_AV CIRCUMVENTION */
+ max_send_sge = init_attr->cap.max_send_sge;
+ max_recv_sge = init_attr->cap.max_recv_sge;
+ if (IB_QPT_UD == init_attr->qp_type ||
+ IB_QPT_GSI == init_attr->qp_type ||
+ IB_QPT_SMI == init_attr->qp_type) {
+ max_send_sge += 2;
+ max_recv_sge += 2;
+ }
+
+ parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
+ parms.daqp_ctrl = isdaqp | daqp_completion;
+ parms.pd = my_pd->fw_pd;
+ parms.max_recv_sge = max_recv_sge;
+ parms.max_send_sge = max_send_sge;
+
+ h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);
+
+ if (h_ret != H_SUCCESS) {
+ ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
+ h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto create_qp_exit1;
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC:
+ if (isdaqp == 0) {
+ swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
+ (parms.act_nr_send_sges)]);
+ rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
+ (parms.act_nr_recv_sges)]);
+ } else { /* for daqp we need to use msg size, not wqe size */
+ swqe_size = da_rc_msg_size[max_send_sge];
+ rwqe_size = da_rc_msg_size[max_recv_sge];
+ parms.act_nr_send_sges = 1;
+ parms.act_nr_recv_sges = 1;
+ }
+ break;
+ case IB_QPT_UC:
+ swqe_size = offsetof(struct ehca_wqe,
+ u.nud.sg_list[parms.act_nr_send_sges]);
+ rwqe_size = offsetof(struct ehca_wqe,
+ u.nud.sg_list[parms.act_nr_recv_sges]);
+ break;
+
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ case IB_QPT_SMI:
+ /* UD circumvention */
+ parms.act_nr_recv_sges -= 2;
+ parms.act_nr_send_sges -= 2;
+ if (isdaqp) {
+ swqe_size = da_ud_sq_msg_size[max_send_sge];
+ rwqe_size = da_rc_msg_size[max_recv_sge];
+ parms.act_nr_send_sges = 1;
+ parms.act_nr_recv_sges = 1;
+ } else {
+ swqe_size = offsetof(struct ehca_wqe,
+ u.ud_av.sg_list[parms.act_nr_send_sges]);
+ rwqe_size = offsetof(struct ehca_wqe,
+ u.ud_av.sg_list[parms.act_nr_recv_sges]);
+ }
+
+ if (IB_QPT_GSI == init_attr->qp_type ||
+ IB_QPT_SMI == init_attr->qp_type) {
+ parms.act_nr_send_wqes = init_attr->cap.max_send_wr;
+ parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
+ parms.act_nr_send_sges = init_attr->cap.max_send_sge;
+ parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
+ my_qp->real_qp_num =
+ (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* initializes r/squeue and registers queue pages */
+ ret = init_qp_queues(shca, my_qp,
+ parms.nr_sq_pages, parms.nr_rq_pages,
+ swqe_size, rwqe_size,
+ parms.act_nr_send_sges, parms.act_nr_recv_sges);
+ if (ret) {
+ ehca_err(pd->device,
+ "Couldn't initialize r/squeue and pages ret=%x", ret);
+ goto create_qp_exit2;
+ }
+
+ my_qp->ib_qp.pd = &my_pd->ib_pd;
+ my_qp->ib_qp.device = my_pd->ib_pd.device;
+
+ my_qp->ib_qp.recv_cq = init_attr->recv_cq;
+ my_qp->ib_qp.send_cq = init_attr->send_cq;
+
+ my_qp->ib_qp.qp_num = my_qp->real_qp_num;
+ my_qp->ib_qp.qp_type = init_attr->qp_type;
+
+ my_qp->qp_type = init_attr->qp_type;
+ my_qp->ib_qp.srq = init_attr->srq;
+
+ my_qp->ib_qp.qp_context = init_attr->qp_context;
+ my_qp->ib_qp.event_handler = init_attr->event_handler;
+
+ init_attr->cap.max_inline_data = 0; /* not supported yet */
+ init_attr->cap.max_recv_sge = parms.act_nr_recv_sges;
+ init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes;
+ init_attr->cap.max_send_sge = parms.act_nr_send_sges;
+ init_attr->cap.max_send_wr = parms.act_nr_send_wqes;
+
+ /* NOTE: define_apq0() not supported yet */
+ if (init_attr->qp_type == IB_QPT_GSI) {
+ h_ret = ehca_define_sqp(shca, my_qp, init_attr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx",
+ h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto create_qp_exit3;
+ }
+ }
+ if (init_attr->send_cq) {
+ struct ehca_cq *cq = container_of(init_attr->send_cq,
+ struct ehca_cq, ib_cq);
+ ret = ehca_cq_assign_qp(cq, my_qp);
+ if (ret) {
+ ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
+ ret);
+ goto create_qp_exit3;
+ }
+ my_qp->send_cq = cq;
+ }
+ /* copy queues, galpa data to user space */
+ if (context && udata) {
+ struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
+ struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
+ struct ehca_create_qp_resp resp;
+ struct vm_area_struct * vma;
+ memset(&resp, 0, sizeof(resp));
+
+ resp.qp_num = my_qp->real_qp_num;
+ resp.token = my_qp->token;
+ resp.qp_type = my_qp->qp_type;
+ resp.qkey = my_qp->qkey;
+ resp.real_qp_num = my_qp->real_qp_num;
+ /* rqueue properties */
+ resp.ipz_rqueue.qe_size = ipz_rqueue->qe_size;
+ resp.ipz_rqueue.act_nr_of_sg = ipz_rqueue->act_nr_of_sg;
+ resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
+ resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
+ resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
+ ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
+ ipz_rqueue->queue_length,
+ (void**)&resp.ipz_rqueue.queue,
+ &vma);
+ if (ret) {
+ ehca_err(pd->device, "Could not mmap rqueue pages");
+ goto create_qp_exit3;
+ }
+ my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
+ /* squeue properties */
+ resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
+ resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
+ resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
+ resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
+ resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
+ ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
+ ipz_squeue->queue_length,
+ (void**)&resp.ipz_squeue.queue,
+ &vma);
+ if (ret) {
+ ehca_err(pd->device, "Could not mmap squeue pages");
+ goto create_qp_exit4;
+ }
+ my_qp->uspace_squeue = resp.ipz_squeue.queue;
+ /* fw_handle */
+ resp.galpas = my_qp->galpas;
+ ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
+ (void**)&resp.galpas.kernel.fw_handle,
+ &vma);
+ if (ret) {
+ ehca_err(pd->device, "Could not mmap fw_handle");
+ goto create_qp_exit5;
+ }
+ my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
+
+ if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
+ ehca_err(pd->device, "Copy to udata failed");
+ ret = -EINVAL;
+ goto create_qp_exit6;
+ }
+ }
+
+ return &my_qp->ib_qp;
+
+create_qp_exit6:
+ ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
+
+create_qp_exit5:
+ ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
+
+create_qp_exit4:
+ ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
+
+create_qp_exit3:
+ ipz_queue_dtor(&my_qp->ipz_rqueue);
+ ipz_queue_dtor(&my_qp->ipz_squeue);
+
+create_qp_exit2:
+ hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
+
+create_qp_exit1:
+ spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ idr_remove(&ehca_qp_idr, my_qp->token);
+ spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+create_qp_exit0:
+ kmem_cache_free(qp_cache, my_qp);
+ return ERR_PTR(ret);
+}
+
+/*
+ * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
+ * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
+ * returns total number of bad wqes in bad_wqe_cnt
+ */
+static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
+ int *bad_wqe_cnt)
+{
+ u64 h_ret;
+ struct ipz_queue *squeue;
+ void *bad_send_wqe_p, *bad_send_wqe_v;
+ void *squeue_start_p, *squeue_end_p;
+ void *squeue_start_v, *squeue_end_v;
+ struct ehca_wqe *wqe;
+ int qp_num = my_qp->ib_qp.qp_num;
+
+ /* get send wqe pointer */
+ h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle, &my_qp->pf,
+ &bad_send_wqe_p, NULL, 2);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
+ " ehca_qp=%p qp_num=%x h_ret=%lx",
+ my_qp, qp_num, h_ret);
+ return ehca2ib_return_code(h_ret);
+ }
+ bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63)));
+ ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
+ qp_num, bad_send_wqe_p);
+ /* convert wqe pointer to vadr */
+ bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
+ if (ehca_debug_level)
+ ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
+ squeue = &my_qp->ipz_squeue;
+ squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L));
+ squeue_end_p = squeue_start_p+squeue->queue_length;
+ squeue_start_v = abs_to_virt((u64)squeue_start_p);
+ squeue_end_v = abs_to_virt((u64)squeue_end_p);
+ ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p",
+ qp_num, squeue_start_v, squeue_end_v);
+
+ /* loop sets wqe's purge bit */
+ wqe = (struct ehca_wqe*)bad_send_wqe_v;
+ *bad_wqe_cnt = 0;
+ while (wqe->optype != 0xff && wqe->wqef != 0xff) {
+ if (ehca_debug_level)
+ ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
+ wqe->nr_of_data_seg = 0; /* suppress data access */
+ wqe->wqef = WQEF_PURGE; /* WQE to be purged */
+ wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size);
+ *bad_wqe_cnt = (*bad_wqe_cnt)+1;
+ if ((void*)wqe >= squeue_end_v) {
+ wqe = squeue_start_v;
+ }
+ }
+ /*
+ * bad wqe will be reprocessed and ignored when pol_cq() is called,
+ * i.e. nr of wqes with flush error status is one less
+ */
+ ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
+ qp_num, (*bad_wqe_cnt)-1);
+ wqe->wqef = 0;
+
+ return 0;
+}
+
+/*
+ * internal_modify_qp with circumvention to handle aqp0 properly
+ * smi_reset2init indicates if this is an internal reset-to-init-call for
+ * smi. This flag must always be zero if called from ehca_modify_qp()!
+ * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
+ */
+static int internal_modify_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *attr,
+ int attr_mask, int smi_reset2init)
+{
+ enum ib_qp_state qp_cur_state, qp_new_state;
+ int cnt, qp_attr_idx, ret = 0;
+ enum ib_qp_statetrans statetrans;
+ struct hcp_modify_qp_control_block *mqpcb;
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_shca *shca =
+ container_of(ibqp->pd->device, struct ehca_shca, ib_device);
+ u64 update_mask;
+ u64 h_ret;
+ int bad_wqe_cnt = 0;
+ int squeue_locked = 0;
+ unsigned long spl_flags = 0;
+
+ /* do query_qp to obtain current attr values */
+ mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ if (mqpcb == NULL) {
+ ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
+ "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
+ return -ENOMEM;
+ }
+
+ h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf,
+ mqpcb, my_qp->galpas.kernel);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(ibqp->device, "hipz_h_query_qp() failed "
+ "ehca_qp=%p qp_num=%x h_ret=%lx",
+ my_qp, ibqp->qp_num, h_ret);
+ ret = ehca2ib_return_code(h_ret);
+ goto modify_qp_exit1;
+ }
+
+ qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
+
+ if (qp_cur_state == -EINVAL) { /* invalid qp state */
+ ret = -EINVAL;
+ ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
+ "ehca_qp=%p qp_num=%x",
+ mqpcb->qp_state, my_qp, ibqp->qp_num);
+ goto modify_qp_exit1;
+ }
+ /*
+ * circumvention to set aqp0 initial state to init
+ * as expected by IB spec
+ */
+ if (smi_reset2init == 0 &&
+ ibqp->qp_type == IB_QPT_SMI &&
+ qp_cur_state == IB_QPS_RESET &&
+ (attr_mask & IB_QP_STATE) &&
+ attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
+ struct ib_qp_attr smiqp_attr = {
+ .qp_state = IB_QPS_INIT,
+ .port_num = my_qp->init_attr.port_num,
+ .pkey_index = 0,
+ .qkey = 0
+ };
+ int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
+ IB_QP_PKEY_INDEX | IB_QP_QKEY;
+ int smirc = internal_modify_qp(
+ ibqp, &smiqp_attr, smiqp_attr_mask, 1);
+ if (smirc) {
+ ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
+ "ehca_modify_qp() rc=%x", smirc);
+ ret = H_PARAMETER;
+ goto modify_qp_exit1;
+ }
+ qp_cur_state = IB_QPS_INIT;
+ ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
+ }
+ /* is transmitted current state equal to "real" current state */
+ if ((attr_mask & IB_QP_CUR_STATE) &&
+ qp_cur_state != attr->cur_qp_state) {
+ ret = -EINVAL;
+ ehca_err(ibqp->device,
+ "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
+ " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
+ attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
+ goto modify_qp_exit1;
+ }
+
+ ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x "
+ "new qp_state=%x attribute_mask=%x",
+ my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
+
+ qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
+ if (!smi_reset2init &&
+ !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
+ attr_mask)) {
+ ret = -EINVAL;
+ ehca_err(ibqp->device,
+ "Invalid qp transition new_state=%x cur_state=%x "
+ "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
+ qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
+ goto modify_qp_exit1;
+ }
+
+ if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state)))
+ update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
+ else {
+ ret = -EINVAL;
+ ehca_err(ibqp->device, "Invalid new qp state=%x "
+ "ehca_qp=%p qp_num=%x",
+ qp_new_state, my_qp, ibqp->qp_num);
+ goto modify_qp_exit1;
+ }
+
+ /* retrieve state transition struct to get req and opt attrs */
+ statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
+ if (statetrans < 0) {
+ ret = -EINVAL;
+ ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
+ "new_qp_state=%x State_xsition=%x ehca_qp=%p "
+ "qp_num=%x", qp_cur_state, qp_new_state,
+ statetrans, my_qp, ibqp->qp_num);
+ goto modify_qp_exit1;
+ }
+
+ qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
+
+ if (qp_attr_idx < 0) {
+ ret = qp_attr_idx;
+ ehca_err(ibqp->device,
+ "Invalid QP type=%x ehca_qp=%p qp_num=%x",
+ ibqp->qp_type, my_qp, ibqp->qp_num);
+ goto modify_qp_exit1;
+ }
+
+ ehca_dbg(ibqp->device,
+ "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
+ my_qp, ibqp->qp_num, statetrans);
+
+ /* sqe -> rts: set purge bit of bad wqe before actual trans */
+ if ((my_qp->qp_type == IB_QPT_UD ||
+ my_qp->qp_type == IB_QPT_GSI ||
+ my_qp->qp_type == IB_QPT_SMI) &&
+ statetrans == IB_QPST_SQE2RTS) {
+ /* mark next free wqe if kernel */
+ if (my_qp->uspace_squeue == 0) {
+ struct ehca_wqe *wqe;
+ /* lock send queue */
+ spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
+ squeue_locked = 1;
+ /* mark next free wqe */
+ wqe = (struct ehca_wqe*)
+ ipz_qeit_get(&my_qp->ipz_squeue);
+ wqe->optype = wqe->wqef = 0xff;
+ ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
+ ibqp->qp_num, wqe);
+ }
+ ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
+ if (ret) {
+ ehca_err(ibqp->device, "prepare_sqe_rts() failed "
+ "ehca_qp=%p qp_num=%x ret=%x",
+ my_qp, ibqp->qp_num, ret);
+ goto modify_qp_exit2;
+ }
+ }
+
+ /*
+ * enable RDMA_Atomic_Control if reset->init und reliable con
+ * this is necessary since gen2 does not provide that flag,
+ * but pHyp requires it
+ */
+ if (statetrans == IB_QPST_RESET2INIT &&
+ (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
+ mqpcb->rdma_atomic_ctrl = 3;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
+ }
+ /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
+ if (statetrans == IB_QPST_INIT2RTR &&
+ (ibqp->qp_type == IB_QPT_UC) &&
+ !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
+ mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ mqpcb->prim_p_key_idx = attr->pkey_index;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
+ }
+ if (attr_mask & IB_QP_PORT) {
+ if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
+ ret = -EINVAL;
+ ehca_err(ibqp->device, "Invalid port=%x. "
+ "ehca_qp=%p qp_num=%x num_ports=%x",
+ attr->port_num, my_qp, ibqp->qp_num,
+ shca->num_ports);
+ goto modify_qp_exit2;
+ }
+ mqpcb->prim_phys_port = attr->port_num;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
+ }
+ if (attr_mask & IB_QP_QKEY) {
+ mqpcb->qkey = attr->qkey;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
+ }
+ if (attr_mask & IB_QP_AV) {
+ int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate);
+ int ehca_mult = ib_rate_to_mult(shca->sport[my_qp->
+ init_attr.port_num].rate);
+
+ mqpcb->dlid = attr->ah_attr.dlid;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
+ mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
+ mqpcb->service_level = attr->ah_attr.sl;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
+
+ if (ah_mult < ehca_mult)
+ mqpcb->max_static_rate = (ah_mult > 0) ?
+ ((ehca_mult - 1) / ah_mult) : 0;
+ else
+ mqpcb->max_static_rate = 0;
+
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
+
+ /*
+ * only if GRH is TRUE we might consider SOURCE_GID_IDX
+ * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
+ */
+ if (attr->ah_attr.ah_flags == IB_AH_GRH) {
+ mqpcb->send_grh_flag = 1 << 31;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
+ mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
+
+ for (cnt = 0; cnt < 16; cnt++)
+ mqpcb->dest_gid.byte[cnt] =
+ attr->ah_attr.grh.dgid.raw[cnt];
+
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
+ mqpcb->flow_label = attr->ah_attr.grh.flow_label;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
+ mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
+ mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ mqpcb->path_mtu = attr->path_mtu;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
+ }
+ if (attr_mask & IB_QP_TIMEOUT) {
+ mqpcb->timeout = attr->timeout;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
+ }
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ mqpcb->retry_count = attr->retry_cnt;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
+ }
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ mqpcb->rnr_retry_count = attr->rnr_retry;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
+ }
+ if (attr_mask & IB_QP_RQ_PSN) {
+ mqpcb->receive_psn = attr->rq_psn;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
+ }
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
+ attr->max_dest_rd_atomic : 2;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
+ }
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
+ attr->max_rd_atomic : 2;
+ update_mask |=
+ EHCA_BMASK_SET
+ (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
+ }
+ if (attr_mask & IB_QP_ALT_PATH) {
+ int ah_mult = ib_rate_to_mult(attr->alt_ah_attr.static_rate);
+ int ehca_mult = ib_rate_to_mult(
+ shca->sport[my_qp->init_attr.port_num].rate);
+
+ mqpcb->dlid_al = attr->alt_ah_attr.dlid;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1);
+ mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1);
+ mqpcb->service_level_al = attr->alt_ah_attr.sl;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1);
+
+ if (ah_mult < ehca_mult)
+ mqpcb->max_static_rate = (ah_mult > 0) ?
+ ((ehca_mult - 1) / ah_mult) : 0;
+ else
+ mqpcb->max_static_rate_al = 0;
+
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1);
+
+ /*
+ * only if GRH is TRUE we might consider SOURCE_GID_IDX
+ * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
+ */
+ if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
+ mqpcb->send_grh_flag_al = 1 << 31;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
+ mqpcb->source_gid_idx_al =
+ attr->alt_ah_attr.grh.sgid_index;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1);
+
+ for (cnt = 0; cnt < 16; cnt++)
+ mqpcb->dest_gid_al.byte[cnt] =
+ attr->alt_ah_attr.grh.dgid.raw[cnt];
+
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1);
+ mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1);
+ mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1);
+ mqpcb->traffic_class_al =
+ attr->alt_ah_attr.grh.traffic_class;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
+ }
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ mqpcb->send_psn = attr->sq_psn;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN) {
+ mqpcb->dest_qp_nr = attr->dest_qp_num;
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ mqpcb->path_migration_state = attr->path_mig_state;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
+ }
+
+ if (attr_mask & IB_QP_CAP) {
+ mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
+ mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
+ /* no support for max_send/recv_sge yet */
+ }
+
+ if (ehca_debug_level)
+ ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
+
+ h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf,
+ update_mask,
+ mqpcb, my_qp->galpas.kernel);
+
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+ ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
+ "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num);
+ goto modify_qp_exit2;
+ }
+
+ if ((my_qp->qp_type == IB_QPT_UD ||
+ my_qp->qp_type == IB_QPT_GSI ||
+ my_qp->qp_type == IB_QPT_SMI) &&
+ statetrans == IB_QPST_SQE2RTS) {
+ /* doorbell to reprocessing wqes */
+ iosync(); /* serialize GAL register access */
+ hipz_update_sqa(my_qp, bad_wqe_cnt-1);
+ ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
+ }
+
+ if (statetrans == IB_QPST_RESET2INIT ||
+ statetrans == IB_QPST_INIT2INIT) {
+ mqpcb->qp_enable = 1;
+ mqpcb->qp_state = EHCA_QPS_INIT;
+ update_mask = 0;
+ update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
+
+ h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf,
+ update_mask,
+ mqpcb,
+ my_qp->galpas.kernel);
+
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+ ehca_err(ibqp->device, "ENABLE in context of "
+ "RESET_2_INIT failed! Maybe you didn't get "
+ "a LID h_ret=%lx ehca_qp=%p qp_num=%x",
+ h_ret, my_qp, ibqp->qp_num);
+ goto modify_qp_exit2;
+ }
+ }
+
+ if (statetrans == IB_QPST_ANY2RESET) {
+ ipz_qeit_reset(&my_qp->ipz_rqueue);
+ ipz_qeit_reset(&my_qp->ipz_squeue);
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ my_qp->qkey = attr->qkey;
+
+modify_qp_exit2:
+ if (squeue_locked) { /* this means: sqe -> rts */
+ spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
+ my_qp->sqerr_purgeflag = 1;
+ }
+
+modify_qp_exit1:
+ kfree(mqpcb);
+
+ return ret;
+}
+
+int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+ ib_pd);
+ u32 cur_pid = current->tgid;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ return internal_modify_qp(ibqp, attr, attr_mask, 0);
+}
+
+int ehca_query_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+ struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+ ib_pd);
+ struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
+ ib_device);
+ struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
+ struct hcp_modify_qp_control_block *qpcb;
+ u32 cur_pid = current->tgid;
+ int cnt, ret = 0;
+ u64 h_ret;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
+ ehca_err(qp->device,"Invalid attribute mask "
+ "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
+ my_qp, qp->qp_num, qp_attr_mask);
+ return -EINVAL;
+ }
+
+ qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL );
+ if (!qpcb) {
+ ehca_err(qp->device,"Out of memory for qpcb "
+ "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
+ return -ENOMEM;
+ }
+
+ h_ret = hipz_h_query_qp(adapter_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf,
+ qpcb, my_qp->galpas.kernel);
+
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+ ehca_err(qp->device,"hipz_h_query_qp() failed "
+ "ehca_qp=%p qp_num=%x h_ret=%lx",
+ my_qp, qp->qp_num, h_ret);
+ goto query_qp_exit1;
+ }
+
+ qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
+ qp_attr->qp_state = qp_attr->cur_qp_state;
+
+ if (qp_attr->cur_qp_state == -EINVAL) {
+ ret = -EINVAL;
+ ehca_err(qp->device,"Got invalid ehca_qp_state=%x "
+ "ehca_qp=%p qp_num=%x",
+ qpcb->qp_state, my_qp, qp->qp_num);
+ goto query_qp_exit1;
+ }
+
+ if (qp_attr->qp_state == IB_QPS_SQD)
+ qp_attr->sq_draining = 1;
+
+ qp_attr->qkey = qpcb->qkey;
+ qp_attr->path_mtu = qpcb->path_mtu;
+ qp_attr->path_mig_state = qpcb->path_migration_state;
+ qp_attr->rq_psn = qpcb->receive_psn;
+ qp_attr->sq_psn = qpcb->send_psn;
+ qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
+ qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
+ qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
+ /* UD_AV CIRCUMVENTION */
+ if (my_qp->qp_type == IB_QPT_UD) {
+ qp_attr->cap.max_send_sge =
+ qpcb->actual_nr_sges_in_sq_wqe - 2;
+ qp_attr->cap.max_recv_sge =
+ qpcb->actual_nr_sges_in_rq_wqe - 2;
+ } else {
+ qp_attr->cap.max_send_sge =
+ qpcb->actual_nr_sges_in_sq_wqe;
+ qp_attr->cap.max_recv_sge =
+ qpcb->actual_nr_sges_in_rq_wqe;
+ }
+
+ qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
+ qp_attr->dest_qp_num = qpcb->dest_qp_nr;
+
+ qp_attr->pkey_index =
+ EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx);
+
+ qp_attr->port_num =
+ EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
+
+ qp_attr->timeout = qpcb->timeout;
+ qp_attr->retry_cnt = qpcb->retry_count;
+ qp_attr->rnr_retry = qpcb->rnr_retry_count;
+
+ qp_attr->alt_pkey_index =
+ EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
+
+ qp_attr->alt_port_num = qpcb->alt_phys_port;
+ qp_attr->alt_timeout = qpcb->timeout_al;
+
+ /* primary av */
+ qp_attr->ah_attr.sl = qpcb->service_level;
+
+ if (qpcb->send_grh_flag) {
+ qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+ }
+
+ qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
+ qp_attr->ah_attr.dlid = qpcb->dlid;
+ qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
+ qp_attr->ah_attr.port_num = qp_attr->port_num;
+
+ /* primary GRH */
+ qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
+ qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
+ qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
+ qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
+
+ for (cnt = 0; cnt < 16; cnt++)
+ qp_attr->ah_attr.grh.dgid.raw[cnt] =
+ qpcb->dest_gid.byte[cnt];
+
+ /* alternate AV */
+ qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
+ if (qpcb->send_grh_flag_al) {
+ qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
+ }
+
+ qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
+ qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
+ qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
+
+ /* alternate GRH */
+ qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
+ qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
+ qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
+ qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
+
+ for (cnt = 0; cnt < 16; cnt++)
+ qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
+ qpcb->dest_gid_al.byte[cnt];
+
+ /* return init attributes given in ehca_create_qp */
+ if (qp_init_attr)
+ *qp_init_attr = my_qp->init_attr;
+
+ if (ehca_debug_level)
+ ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
+
+query_qp_exit1:
+ kfree(qpcb);
+
+ return ret;
+}
+
+int ehca_destroy_qp(struct ib_qp *ibqp)
+{
+ struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
+ struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
+ ib_device);
+ struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
+ ib_pd);
+ u32 cur_pid = current->tgid;
+ u32 qp_num = ibqp->qp_num;
+ int ret;
+ u64 h_ret;
+ u8 port_num;
+ enum ib_qp_type qp_type;
+ unsigned long flags;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ if (my_qp->send_cq) {
+ ret = ehca_cq_unassign_qp(my_qp->send_cq,
+ my_qp->real_qp_num);
+ if (ret) {
+ ehca_err(ibqp->device, "Couldn't unassign qp from "
+ "send_cq ret=%x qp_num=%x cq_num=%x", ret,
+ my_qp->ib_qp.qp_num, my_qp->send_cq->cq_number);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ idr_remove(&ehca_qp_idr, my_qp->token);
+ spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+ /* un-mmap if vma alloc */
+ if (my_qp->uspace_rqueue) {
+ ret = ehca_munmap(my_qp->uspace_rqueue,
+ my_qp->ipz_rqueue.queue_length);
+ if (ret)
+ ehca_err(ibqp->device, "Could not munmap rqueue "
+ "qp_num=%x", qp_num);
+ ret = ehca_munmap(my_qp->uspace_squeue,
+ my_qp->ipz_squeue.queue_length);
+ if (ret)
+ ehca_err(ibqp->device, "Could not munmap squeue "
+ "qp_num=%x", qp_num);
+ ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
+ if (ret)
+ ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
+ qp_num);
+ }
+
+ h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
+ "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
+ return ehca2ib_return_code(h_ret);
+ }
+
+ port_num = my_qp->init_attr.port_num;
+ qp_type = my_qp->init_attr.qp_type;
+
+ /* no support for IB_QPT_SMI yet */
+ if (qp_type == IB_QPT_GSI) {
+ struct ib_event event;
+ ehca_info(ibqp->device, "device %s: port %x is inactive.",
+ shca->ib_device.name, port_num);
+ event.device = &shca->ib_device;
+ event.event = IB_EVENT_PORT_ERR;
+ event.element.port_num = port_num;
+ shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
+ ib_dispatch_event(&event);
+ }
+
+ ipz_queue_dtor(&my_qp->ipz_rqueue);
+ ipz_queue_dtor(&my_qp->ipz_squeue);
+ kmem_cache_free(qp_cache, my_qp);
+ return 0;
+}
+
+int ehca_init_qp_cache(void)
+{
+ qp_cache = kmem_cache_create("ehca_cache_qp",
+ sizeof(struct ehca_qp), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!qp_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ehca_cleanup_qp_cache(void)
+{
+ if (qp_cache)
+ kmem_cache_destroy(qp_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
new file mode 100644
index 00000000000..b46bda1bf85
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -0,0 +1,653 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * post_send/recv, poll_cq, req_notify
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm-powerpc/system.h>
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+#include "hipz_fns.h"
+
+static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
+ struct ehca_wqe *wqe_p,
+ struct ib_recv_wr *recv_wr)
+{
+ u8 cnt_ds;
+ if (unlikely((recv_wr->num_sge < 0) ||
+ (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
+ ehca_gen_err("Invalid number of WQE SGE. "
+ "num_sqe=%x max_nr_of_sg=%x",
+ recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
+ return -EINVAL; /* invalid SG list length */
+ }
+
+ /* clear wqe header until sglist */
+ memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
+
+ wqe_p->work_request_id = recv_wr->wr_id;
+ wqe_p->nr_of_data_seg = recv_wr->num_sge;
+
+ for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
+ wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
+ recv_wr->sg_list[cnt_ds].addr;
+ wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
+ recv_wr->sg_list[cnt_ds].lkey;
+ wqe_p->u.all_rcv.sg_list[cnt_ds].length =
+ recv_wr->sg_list[cnt_ds].length;
+ }
+
+ if (ehca_debug_level) {
+ ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
+ ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
+ }
+
+ return 0;
+}
+
+#if defined(DEBUG_GSI_SEND_WR)
+
+/* need ib_mad struct */
+#include <rdma/ib_mad.h>
+
+static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
+{
+ int idx;
+ int j;
+ while (send_wr) {
+ struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
+ struct ib_sge *sge = send_wr->sg_list;
+ ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
+ "send_flags=%x opcode=%x",idx, send_wr->wr_id,
+ send_wr->num_sge, send_wr->send_flags,
+ send_wr->opcode);
+ if (mad_hdr) {
+ ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
+ "mgmt_class=%x class_version=%x method=%x "
+ "status=%x class_specific=%x tid=%lx "
+ "attr_id=%x resv=%x attr_mod=%x",
+ idx, mad_hdr->base_version,
+ mad_hdr->mgmt_class,
+ mad_hdr->class_version, mad_hdr->method,
+ mad_hdr->status, mad_hdr->class_specific,
+ mad_hdr->tid, mad_hdr->attr_id,
+ mad_hdr->resv,
+ mad_hdr->attr_mod);
+ }
+ for (j = 0; j < send_wr->num_sge; j++) {
+ u8 *data = (u8 *) abs_to_virt(sge->addr);
+ ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
+ "lkey=%x",
+ idx, j, data, sge->length, sge->lkey);
+ /* assume length is n*16 */
+ ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
+ idx, j);
+ sge++;
+ } /* eof for j */
+ idx++;
+ send_wr = send_wr->next;
+ } /* eof while send_wr */
+}
+
+#endif /* DEBUG_GSI_SEND_WR */
+
+static inline int ehca_write_swqe(struct ehca_qp *qp,
+ struct ehca_wqe *wqe_p,
+ const struct ib_send_wr *send_wr)
+{
+ u32 idx;
+ u64 dma_length;
+ struct ehca_av *my_av;
+ u32 remote_qkey = send_wr->wr.ud.remote_qkey;
+
+ if (unlikely((send_wr->num_sge < 0) ||
+ (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
+ ehca_gen_err("Invalid number of WQE SGE. "
+ "num_sqe=%x max_nr_of_sg=%x",
+ send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
+ return -EINVAL; /* invalid SG list length */
+ }
+
+ /* clear wqe header until sglist */
+ memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
+
+ wqe_p->work_request_id = send_wr->wr_id;
+
+ switch (send_wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ wqe_p->optype = WQE_OPTYPE_SEND;
+ break;
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
+ break;
+ case IB_WR_RDMA_READ:
+ wqe_p->optype = WQE_OPTYPE_RDMAREAD;
+ break;
+ default:
+ ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
+ return -EINVAL; /* invalid opcode */
+ }
+
+ wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
+
+ wqe_p->wr_flag = 0;
+
+ if (send_wr->send_flags & IB_SEND_SIGNALED)
+ wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
+
+ if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
+ send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
+ /* this might not work as long as HW does not support it */
+ wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
+ wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
+ }
+
+ wqe_p->nr_of_data_seg = send_wr->num_sge;
+
+ switch (qp->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ /* no break is intential here */
+ case IB_QPT_UD:
+ /* IB 1.2 spec C10-15 compliance */
+ if (send_wr->wr.ud.remote_qkey & 0x80000000)
+ remote_qkey = qp->qkey;
+
+ wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
+ wqe_p->local_ee_context_qkey = remote_qkey;
+ if (!send_wr->wr.ud.ah) {
+ ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
+ return -EINVAL;
+ }
+ my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
+ wqe_p->u.ud_av.ud_av = my_av->av;
+
+ /*
+ * omitted check of IB_SEND_INLINE
+ * since HW does not support it
+ */
+ for (idx = 0; idx < send_wr->num_sge; idx++) {
+ wqe_p->u.ud_av.sg_list[idx].vaddr =
+ send_wr->sg_list[idx].addr;
+ wqe_p->u.ud_av.sg_list[idx].lkey =
+ send_wr->sg_list[idx].lkey;
+ wqe_p->u.ud_av.sg_list[idx].length =
+ send_wr->sg_list[idx].length;
+ } /* eof for idx */
+ if (qp->qp_type == IB_QPT_SMI ||
+ qp->qp_type == IB_QPT_GSI)
+ wqe_p->u.ud_av.ud_av.pmtu = 1;
+ if (qp->qp_type == IB_QPT_GSI) {
+ wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
+#ifdef DEBUG_GSI_SEND_WR
+ trace_send_wr_ud(send_wr);
+#endif /* DEBUG_GSI_SEND_WR */
+ }
+ break;
+
+ case IB_QPT_UC:
+ if (send_wr->send_flags & IB_SEND_FENCE)
+ wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
+ /* no break is intentional here */
+ case IB_QPT_RC:
+ /* TODO: atomic not implemented */
+ wqe_p->u.nud.remote_virtual_adress =
+ send_wr->wr.rdma.remote_addr;
+ wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
+
+ /*
+ * omitted checking of IB_SEND_INLINE
+ * since HW does not support it
+ */
+ dma_length = 0;
+ for (idx = 0; idx < send_wr->num_sge; idx++) {
+ wqe_p->u.nud.sg_list[idx].vaddr =
+ send_wr->sg_list[idx].addr;
+ wqe_p->u.nud.sg_list[idx].lkey =
+ send_wr->sg_list[idx].lkey;
+ wqe_p->u.nud.sg_list[idx].length =
+ send_wr->sg_list[idx].length;
+ dma_length += send_wr->sg_list[idx].length;
+ } /* eof idx */
+ wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
+
+ break;
+
+ default:
+ ehca_gen_err("Invalid qptype=%x", qp->qp_type);
+ return -EINVAL;
+ }
+
+ if (ehca_debug_level) {
+ ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
+ ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
+ }
+ return 0;
+}
+
+/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
+static inline void map_ib_wc_status(u32 cqe_status,
+ enum ib_wc_status *wc_status)
+{
+ if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
+ switch (cqe_status & 0x3F) {
+ case 0x01:
+ case 0x21:
+ *wc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case 0x02:
+ case 0x22:
+ *wc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case 0x03:
+ case 0x23:
+ *wc_status = IB_WC_LOC_EEC_OP_ERR;
+ break;
+ case 0x04:
+ case 0x24:
+ *wc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case 0x05:
+ case 0x25:
+ *wc_status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case 0x06:
+ *wc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case 0x07: /* remote error - look into bits 20:24 */
+ switch ((cqe_status
+ & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
+ case 0x0:
+ /*
+ * PSN Sequence Error!
+ * couldn't find a matching status!
+ */
+ *wc_status = IB_WC_GENERAL_ERR;
+ break;
+ case 0x1:
+ *wc_status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case 0x2:
+ *wc_status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case 0x3:
+ *wc_status = IB_WC_REM_OP_ERR;
+ break;
+ case 0x4:
+ *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
+ break;
+ }
+ break;
+ case 0x08:
+ *wc_status = IB_WC_RETRY_EXC_ERR;
+ break;
+ case 0x09:
+ *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case 0x0A:
+ case 0x2D:
+ *wc_status = IB_WC_REM_ABORT_ERR;
+ break;
+ case 0x0B:
+ case 0x2E:
+ *wc_status = IB_WC_INV_EECN_ERR;
+ break;
+ case 0x0C:
+ case 0x2F:
+ *wc_status = IB_WC_INV_EEC_STATE_ERR;
+ break;
+ case 0x0D:
+ *wc_status = IB_WC_BAD_RESP_ERR;
+ break;
+ case 0x10:
+ /* WQE purged */
+ *wc_status = IB_WC_WR_FLUSH_ERR;
+ break;
+ default:
+ *wc_status = IB_WC_FATAL_ERR;
+
+ }
+ } else
+ *wc_status = IB_WC_SUCCESS;
+}
+
+int ehca_post_send(struct ib_qp *qp,
+ struct ib_send_wr *send_wr,
+ struct ib_send_wr **bad_send_wr)
+{
+ struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+ struct ib_send_wr *cur_send_wr;
+ struct ehca_wqe *wqe_p;
+ int wqe_cnt = 0;
+ int ret = 0;
+ unsigned long spl_flags;
+
+ /* LOCK the QUEUE */
+ spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
+
+ /* loop processes list of send reqs */
+ for (cur_send_wr = send_wr; cur_send_wr != NULL;
+ cur_send_wr = cur_send_wr->next) {
+ u64 start_offset = my_qp->ipz_squeue.current_q_offset;
+ /* get pointer next to free WQE */
+ wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
+ if (unlikely(!wqe_p)) {
+ /* too many posted work requests: queue overflow */
+ if (bad_send_wr)
+ *bad_send_wr = cur_send_wr;
+ if (wqe_cnt == 0) {
+ ret = -ENOMEM;
+ ehca_err(qp->device, "Too many posted WQEs "
+ "qp_num=%x", qp->qp_num);
+ }
+ goto post_send_exit0;
+ }
+ /* write a SEND WQE into the QUEUE */
+ ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
+ /*
+ * if something failed,
+ * reset the free entry pointer to the start value
+ */
+ if (unlikely(ret)) {
+ my_qp->ipz_squeue.current_q_offset = start_offset;
+ *bad_send_wr = cur_send_wr;
+ if (wqe_cnt == 0) {
+ ret = -EINVAL;
+ ehca_err(qp->device, "Could not write WQE "
+ "qp_num=%x", qp->qp_num);
+ }
+ goto post_send_exit0;
+ }
+ wqe_cnt++;
+ ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+ my_qp, qp->qp_num, wqe_cnt);
+ } /* eof for cur_send_wr */
+
+post_send_exit0:
+ /* UNLOCK the QUEUE */
+ spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
+ iosync(); /* serialize GAL register access */
+ hipz_update_sqa(my_qp, wqe_cnt);
+ return ret;
+}
+
+int ehca_post_recv(struct ib_qp *qp,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
+{
+ struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
+ struct ib_recv_wr *cur_recv_wr;
+ struct ehca_wqe *wqe_p;
+ int wqe_cnt = 0;
+ int ret = 0;
+ unsigned long spl_flags;
+
+ /* LOCK the QUEUE */
+ spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
+
+ /* loop processes list of send reqs */
+ for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
+ cur_recv_wr = cur_recv_wr->next) {
+ u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
+ /* get pointer next to free WQE */
+ wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
+ if (unlikely(!wqe_p)) {
+ /* too many posted work requests: queue overflow */
+ if (bad_recv_wr)
+ *bad_recv_wr = cur_recv_wr;
+ if (wqe_cnt == 0) {
+ ret = -ENOMEM;
+ ehca_err(qp->device, "Too many posted WQEs "
+ "qp_num=%x", qp->qp_num);
+ }
+ goto post_recv_exit0;
+ }
+ /* write a RECV WQE into the QUEUE */
+ ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
+ /*
+ * if something failed,
+ * reset the free entry pointer to the start value
+ */
+ if (unlikely(ret)) {
+ my_qp->ipz_rqueue.current_q_offset = start_offset;
+ *bad_recv_wr = cur_recv_wr;
+ if (wqe_cnt == 0) {
+ ret = -EINVAL;
+ ehca_err(qp->device, "Could not write WQE "
+ "qp_num=%x", qp->qp_num);
+ }
+ goto post_recv_exit0;
+ }
+ wqe_cnt++;
+ ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
+ my_qp, qp->qp_num, wqe_cnt);
+ } /* eof for cur_recv_wr */
+
+post_recv_exit0:
+ spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
+ iosync(); /* serialize GAL register access */
+ hipz_update_rqa(my_qp, wqe_cnt);
+ return ret;
+}
+
+/*
+ * ib_wc_opcode table converts ehca wc opcode to ib
+ * Since we use zero to indicate invalid opcode, the actual ib opcode must
+ * be decremented!!!
+ */
+static const u8 ib_wc_opcode[255] = {
+ [0x01] = IB_WC_RECV+1,
+ [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
+ [0x04] = IB_WC_BIND_MW+1,
+ [0x08] = IB_WC_FETCH_ADD+1,
+ [0x10] = IB_WC_COMP_SWAP+1,
+ [0x20] = IB_WC_RDMA_WRITE+1,
+ [0x40] = IB_WC_RDMA_READ+1,
+ [0x80] = IB_WC_SEND+1
+};
+
+/* internal function to poll one entry of cq */
+static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
+{
+ int ret = 0;
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ struct ehca_cqe *cqe;
+ int cqe_count = 0;
+
+poll_cq_one_read_cqe:
+ cqe = (struct ehca_cqe *)
+ ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
+ if (!cqe) {
+ ret = -EAGAIN;
+ ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
+ "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret);
+ goto poll_cq_one_exit0;
+ }
+
+ /* prevents loads being reordered across this point */
+ rmb();
+
+ cqe_count++;
+ if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
+ struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
+ int purgeflag;
+ unsigned long spl_flags;
+ if (!qp) {
+ ehca_err(cq->device, "cq_num=%x qp_num=%x "
+ "could not find qp -> ignore cqe",
+ my_cq->cq_number, cqe->local_qp_number);
+ ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
+ my_cq->cq_number, cqe->local_qp_number);
+ /* ignore this purged cqe */
+ goto poll_cq_one_read_cqe;
+ }
+ spin_lock_irqsave(&qp->spinlock_s, spl_flags);
+ purgeflag = qp->sqerr_purgeflag;
+ spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
+
+ if (purgeflag) {
+ ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
+ "src_qp=%x",
+ cqe->local_qp_number, cqe->remote_qp_number);
+ if (ehca_debug_level)
+ ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
+ cqe->local_qp_number,
+ cqe->remote_qp_number);
+ /*
+ * ignore this to avoid double cqes of bad wqe
+ * that caused sqe and turn off purge flag
+ */
+ qp->sqerr_purgeflag = 0;
+ goto poll_cq_one_read_cqe;
+ }
+ }
+
+ /* tracing cqe */
+ if (ehca_debug_level) {
+ ehca_dbg(cq->device,
+ "Received COMPLETION ehca_cq=%p cq_num=%x -----",
+ my_cq, my_cq->cq_number);
+ ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
+ my_cq, my_cq->cq_number);
+ ehca_dbg(cq->device,
+ "ehca_cq=%p cq_num=%x -------------------------",
+ my_cq, my_cq->cq_number);
+ }
+
+ /* we got a completion! */
+ wc->wr_id = cqe->work_request_id;
+
+ /* eval ib_wc_opcode */
+ wc->opcode = ib_wc_opcode[cqe->optype]-1;
+ if (unlikely(wc->opcode == -1)) {
+ ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
+ "ehca_cq=%p cq_num=%x",
+ cqe->optype, cqe->status, my_cq, my_cq->cq_number);
+ /* dump cqe for other infos */
+ ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
+ my_cq, my_cq->cq_number);
+ /* update also queue adder to throw away this entry!!! */
+ goto poll_cq_one_exit0;
+ }
+ /* eval ib_wc_status */
+ if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
+ /* complete with errors */
+ map_ib_wc_status(cqe->status, &wc->status);
+ wc->vendor_err = wc->status;
+ } else
+ wc->status = IB_WC_SUCCESS;
+
+ wc->qp_num = cqe->local_qp_number;
+ wc->byte_len = cqe->nr_bytes_transferred;
+ wc->pkey_index = cqe->pkey_index;
+ wc->slid = cqe->rlid;
+ wc->dlid_path_bits = cqe->dlid;
+ wc->src_qp = cqe->remote_qp_number;
+ wc->wc_flags = cqe->w_completion_flags;
+ wc->imm_data = cpu_to_be32(cqe->immediate_data);
+ wc->sl = cqe->service_level;
+
+ if (wc->status != IB_WC_SUCCESS)
+ ehca_dbg(cq->device,
+ "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
+ "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
+ "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
+ cqe->status, cqe->local_qp_number,
+ cqe->remote_qp_number, cqe->work_request_id, cqe);
+
+poll_cq_one_exit0:
+ if (cqe_count > 0)
+ hipz_update_feca(my_cq, cqe_count);
+
+ return ret;
+}
+
+int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
+{
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+ int nr;
+ struct ib_wc *current_wc = wc;
+ int ret = 0;
+ unsigned long spl_flags;
+
+ if (num_entries < 1) {
+ ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
+ "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
+ ret = -EINVAL;
+ goto poll_cq_exit0;
+ }
+
+ spin_lock_irqsave(&my_cq->spinlock, spl_flags);
+ for (nr = 0; nr < num_entries; nr++) {
+ ret = ehca_poll_cq_one(cq, current_wc);
+ if (ret)
+ break;
+ current_wc++;
+ } /* eof for nr */
+ spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
+ if (ret == -EAGAIN || !ret)
+ ret = nr;
+
+poll_cq_exit0:
+ return ret;
+}
+
+int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
+{
+ struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
+
+ switch (cq_notify) {
+ case IB_CQ_SOLICITED:
+ hipz_set_cqx_n0(my_cq, 1);
+ break;
+ case IB_CQ_NEXT_COMP:
+ hipz_set_cqx_n1(my_cq, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
new file mode 100644
index 00000000000..9f16e9c7939
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -0,0 +1,111 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * SQP functions
+ *
+ * Authors: Khadija Souissi <souissi@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+#include "ehca_iverbs.h"
+#include "hcp_if.h"
+
+
+/**
+ * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
+ * pair is created successfully, the corresponding port gets active.
+ *
+ * Define Special Queue pair 0 (SMI QP) is still not supported.
+ *
+ * @qp_init_attr: Queue pair init attributes with port and queue pair type
+ */
+
+u64 ehca_define_sqp(struct ehca_shca *shca,
+ struct ehca_qp *ehca_qp,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ u32 pma_qp_nr, bma_qp_nr;
+ u64 ret;
+ u8 port = qp_init_attr->port_num;
+ int counter;
+
+ shca->sport[port - 1].port_state = IB_PORT_DOWN;
+
+ switch (qp_init_attr->qp_type) {
+ case IB_QPT_SMI:
+ /* function not supported yet */
+ break;
+ case IB_QPT_GSI:
+ ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
+ ehca_qp->ipz_qp_handle,
+ ehca_qp->galpas.kernel,
+ (u32) qp_init_attr->port_num,
+ &pma_qp_nr, &bma_qp_nr);
+
+ if (ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device,
+ "Can't define AQP1 for port %x. rc=%lx",
+ port, ret);
+ return ret;
+ }
+ break;
+ default:
+ ehca_err(&shca->ib_device, "invalid qp_type=%x",
+ qp_init_attr->qp_type);
+ return H_PARAMETER;
+ }
+
+ for (counter = 0;
+ shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
+ counter < ehca_port_act_time;
+ counter++) {
+ ehca_dbg(&shca->ib_device, "... wait until port %x is active",
+ port);
+ msleep_interruptible(1000);
+ }
+
+ if (counter == ehca_port_act_time) {
+ ehca_err(&shca->ib_device, "Port %x is not active.", port);
+ return H_HARDWARE;
+ }
+
+ return H_SUCCESS;
+}
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
new file mode 100644
index 00000000000..9f56bb846d9
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -0,0 +1,172 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * auxiliary functions
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Khadija Souissi <souissik@de.ibm.com>
+ * Waleri Fomin <fomin@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef EHCA_TOOLS_H
+#define EHCA_TOOLS_H
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+
+#include <asm/abs_addr.h>
+#include <asm/ibmebus.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+extern int ehca_debug_level;
+
+#define ehca_dbg(ib_dev, format, arg...) \
+ do { \
+ if (unlikely(ehca_debug_level)) \
+ dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
+ "PU%04x EHCA_DBG:%s " format "\n", \
+ get_paca()->paca_index, __FUNCTION__, \
+ ## arg); \
+ } while (0)
+
+#define ehca_info(ib_dev, format, arg...) \
+ dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
+ get_paca()->paca_index, __FUNCTION__, ## arg)
+
+#define ehca_warn(ib_dev, format, arg...) \
+ dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
+ get_paca()->paca_index, __FUNCTION__, ## arg)
+
+#define ehca_err(ib_dev, format, arg...) \
+ dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
+ get_paca()->paca_index, __FUNCTION__, ## arg)
+
+/* use this one only if no ib_dev available */
+#define ehca_gen_dbg(format, arg...) \
+ do { \
+ if (unlikely(ehca_debug_level)) \
+ printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\
+ get_paca()->paca_index, __FUNCTION__, ## arg); \
+ } while (0)
+
+#define ehca_gen_warn(format, arg...) \
+ do { \
+ if (unlikely(ehca_debug_level)) \
+ printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\
+ get_paca()->paca_index, __FUNCTION__, ## arg); \
+ } while (0)
+
+#define ehca_gen_err(format, arg...) \
+ printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
+ get_paca()->paca_index, __FUNCTION__, ## arg)
+
+/**
+ * ehca_dmp - printk a memory block, whose length is n*8 bytes.
+ * Each line has the following layout:
+ * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
+ */
+#define ehca_dmp(adr, len, format, args...) \
+ do { \
+ unsigned int x; \
+ unsigned int l = (unsigned int)(len); \
+ unsigned char *deb = (unsigned char*)(adr); \
+ for (x = 0; x < l; x += 16) { \
+ printk("EHCA_DMP:%s" format \
+ " adr=%p ofs=%04x %016lx %016lx\n", \
+ __FUNCTION__, ##args, deb, x, \
+ *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
+ deb += 16; \
+ } \
+ } while (0)
+
+/* define a bitmask, little endian version */
+#define EHCA_BMASK(pos,length) (((pos)<<16)+(length))
+
+/* define a bitmask, the ibm way... */
+#define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1))
+
+/* internal function, don't use */
+#define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff)
+
+/* internal function, don't use */
+#define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff))
+
+/**
+ * EHCA_BMASK_SET - return value shifted and masked by mask
+ * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
+ * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
+ * in variable
+ */
+#define EHCA_BMASK_SET(mask,value) \
+ ((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask))
+
+/**
+ * EHCA_BMASK_GET - extract a parameter from value by mask
+ */
+#define EHCA_BMASK_GET(mask,value) \
+ (EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
+
+
+/* Converts ehca to ib return code */
+static inline int ehca2ib_return_code(u64 ehca_rc)
+{
+ switch (ehca_rc) {
+ case H_SUCCESS:
+ return 0;
+ case H_BUSY:
+ return -EBUSY;
+ case H_NO_MEM:
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
+
+#endif /* EHCA_TOOLS_H */
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
new file mode 100644
index 00000000000..e08764e4aef
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -0,0 +1,392 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * userspace support verbs
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/current.h>
+
+#include "ehca_classes.h"
+#include "ehca_iverbs.h"
+#include "ehca_mrmw.h"
+#include "ehca_tools.h"
+#include "hcp_if.h"
+
+struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
+ struct ib_udata *udata)
+{
+ struct ehca_ucontext *my_context;
+
+ my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
+ if (!my_context) {
+ ehca_err(device, "Out of memory device=%p", device);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return &my_context->ib_ucontext;
+}
+
+int ehca_dealloc_ucontext(struct ib_ucontext *context)
+{
+ kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
+ return 0;
+}
+
+struct page *ehca_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ struct page *mypage = NULL;
+ u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
+ u32 idr_handle = fileoffset >> 32;
+ u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */
+ u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
+ u32 cur_pid = current->tgid;
+ unsigned long flags;
+ struct ehca_cq *cq;
+ struct ehca_qp *qp;
+ struct ehca_pd *pd;
+ u64 offset;
+ void *vaddr;
+
+ switch (q_type) {
+ case 1: /* CQ */
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ cq = idr_find(&ehca_cq_idr, idr_handle);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+ /* make sure this mmap really belongs to the authorized user */
+ if (!cq) {
+ ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS");
+ return NOPAGE_SIGBUS;
+ }
+
+ if (cq->ownpid != cur_pid) {
+ ehca_err(cq->ib_cq.device,
+ "Invalid caller pid=%x ownpid=%x",
+ cur_pid, cq->ownpid);
+ return NOPAGE_SIGBUS;
+ }
+
+ if (rsrc_type == 2) {
+ ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq);
+ offset = address - vma->vm_start;
+ vaddr = ipz_qeit_calc(&cq->ipz_queue, offset);
+ ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p",
+ offset, vaddr);
+ mypage = virt_to_page(vaddr);
+ }
+ break;
+
+ case 2: /* QP */
+ spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ qp = idr_find(&ehca_qp_idr, idr_handle);
+ spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+ /* make sure this mmap really belongs to the authorized user */
+ if (!qp) {
+ ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS");
+ return NOPAGE_SIGBUS;
+ }
+
+ pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
+ if (pd->ownpid != cur_pid) {
+ ehca_err(qp->ib_qp.device,
+ "Invalid caller pid=%x ownpid=%x",
+ cur_pid, pd->ownpid);
+ return NOPAGE_SIGBUS;
+ }
+
+ if (rsrc_type == 2) { /* rqueue */
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp);
+ offset = address - vma->vm_start;
+ vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset);
+ ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
+ offset, vaddr);
+ mypage = virt_to_page(vaddr);
+ } else if (rsrc_type == 3) { /* squeue */
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp);
+ offset = address - vma->vm_start;
+ vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
+ ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
+ offset, vaddr);
+ mypage = virt_to_page(vaddr);
+ }
+ break;
+
+ default:
+ ehca_gen_err("bad queue type %x", q_type);
+ return NOPAGE_SIGBUS;
+ }
+
+ if (!mypage) {
+ ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
+ return NOPAGE_SIGBUS;
+ }
+ get_page(mypage);
+
+ return mypage;
+}
+
+static struct vm_operations_struct ehcau_vm_ops = {
+ .nopage = ehca_nopage,
+};
+
+int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
+ u32 idr_handle = fileoffset >> 32;
+ u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */
+ u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
+ u32 cur_pid = current->tgid;
+ u32 ret;
+ u64 vsize, physical;
+ unsigned long flags;
+ struct ehca_cq *cq;
+ struct ehca_qp *qp;
+ struct ehca_pd *pd;
+
+ switch (q_type) {
+ case 1: /* CQ */
+ spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ cq = idr_find(&ehca_cq_idr, idr_handle);
+ spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+ /* make sure this mmap really belongs to the authorized user */
+ if (!cq)
+ return -EINVAL;
+
+ if (cq->ownpid != cur_pid) {
+ ehca_err(cq->ib_cq.device,
+ "Invalid caller pid=%x ownpid=%x",
+ cur_pid, cq->ownpid);
+ return -ENOMEM;
+ }
+
+ if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
+ return -EINVAL;
+
+ switch (rsrc_type) {
+ case 1: /* galpa fw handle */
+ ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq);
+ vma->vm_flags |= VM_RESERVED;
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize != EHCA_PAGESIZE) {
+ ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
+ vma->vm_end - vma->vm_start);
+ return -EINVAL;
+ }
+
+ physical = cq->galpas.user.fw_handle;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+
+ ehca_dbg(cq->ib_cq.device,
+ "vsize=%lx physical=%lx", vsize, physical);
+ ret = remap_pfn_range(vma, vma->vm_start,
+ physical >> PAGE_SHIFT, vsize,
+ vma->vm_page_prot);
+ if (ret) {
+ ehca_err(cq->ib_cq.device,
+ "remap_pfn_range() failed ret=%x",
+ ret);
+ return -ENOMEM;
+ }
+ break;
+
+ case 2: /* cq queue_addr */
+ ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &ehcau_vm_ops;
+ break;
+
+ default:
+ ehca_err(cq->ib_cq.device, "bad resource type %x",
+ rsrc_type);
+ return -EINVAL;
+ }
+ break;
+
+ case 2: /* QP */
+ spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ qp = idr_find(&ehca_qp_idr, idr_handle);
+ spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+
+ /* make sure this mmap really belongs to the authorized user */
+ if (!qp)
+ return -EINVAL;
+
+ pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
+ if (pd->ownpid != cur_pid) {
+ ehca_err(qp->ib_qp.device,
+ "Invalid caller pid=%x ownpid=%x",
+ cur_pid, pd->ownpid);
+ return -ENOMEM;
+ }
+
+ if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
+ return -EINVAL;
+
+ switch (rsrc_type) {
+ case 1: /* galpa fw handle */
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp);
+ vma->vm_flags |= VM_RESERVED;
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize != EHCA_PAGESIZE) {
+ ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
+ vma->vm_end - vma->vm_start);
+ return -EINVAL;
+ }
+
+ physical = qp->galpas.user.fw_handle;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+
+ ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
+ vsize, physical);
+ ret = remap_pfn_range(vma, vma->vm_start,
+ physical >> PAGE_SHIFT, vsize,
+ vma->vm_page_prot);
+ if (ret) {
+ ehca_err(qp->ib_qp.device,
+ "remap_pfn_range() failed ret=%x",
+ ret);
+ return -ENOMEM;
+ }
+ break;
+
+ case 2: /* qp rqueue_addr */
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &ehcau_vm_ops;
+ break;
+
+ case 3: /* qp squeue_addr */
+ ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &ehcau_vm_ops;
+ break;
+
+ default:
+ ehca_err(qp->ib_qp.device, "bad resource type %x",
+ rsrc_type);
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ ehca_gen_err("bad queue type %x", q_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
+ struct vm_area_struct **vma)
+{
+ down_write(&current->mm->mmap_sem);
+ *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS,
+ foffset);
+ up_write(&current->mm->mmap_sem);
+ if (!(*mapped)) {
+ ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
+ foffset, length);
+ return -EINVAL;
+ }
+
+ *vma = find_vma(current->mm, (u64)*mapped);
+ if (!(*vma)) {
+ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, 0, length);
+ up_write(&current->mm->mmap_sem);
+ ehca_gen_err("couldn't find vma queue=%p", *mapped);
+ return -EINVAL;
+ }
+ (*vma)->vm_flags |= VM_RESERVED;
+ (*vma)->vm_ops = &ehcau_vm_ops;
+
+ return 0;
+}
+
+int ehca_mmap_register(u64 physical, void **mapped,
+ struct vm_area_struct **vma)
+{
+ int ret;
+ unsigned long vsize;
+ /* ehca hw supports only 4k page */
+ ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
+ if (ret) {
+ ehca_gen_err("could'nt mmap physical=%lx", physical);
+ return ret;
+ }
+
+ (*vma)->vm_flags |= VM_RESERVED;
+ vsize = (*vma)->vm_end - (*vma)->vm_start;
+ if (vsize != EHCA_PAGESIZE) {
+ ehca_gen_err("invalid vsize=%lx",
+ (*vma)->vm_end - (*vma)->vm_start);
+ return -EINVAL;
+ }
+
+ (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
+ (*vma)->vm_flags |= VM_IO | VM_RESERVED;
+
+ ret = remap_pfn_range((*vma), (*vma)->vm_start,
+ physical >> PAGE_SHIFT, vsize,
+ (*vma)->vm_page_prot);
+ if (ret) {
+ ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
+ return -ENOMEM;
+ }
+
+ return 0;
+
+}
+
+int ehca_munmap(unsigned long addr, size_t len) {
+ int ret = 0;
+ struct mm_struct *mm = current->mm;
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+ }
+ return ret;
+}
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
new file mode 100644
index 00000000000..3fb46e67df8
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -0,0 +1,874 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Firmware Infiniband Interface code for POWER
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Gerd Bayer <gerd.bayer@de.ibm.com>
+ * Waleri Fomin <fomin@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/hvcall.h>
+#include "ehca_tools.h"
+#include "hcp_if.h"
+#include "hcp_phyp.h"
+#include "hipz_fns.h"
+#include "ipz_pt_fn.h"
+
+#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
+#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
+#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
+#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
+#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
+#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
+#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
+
+#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
+#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
+
+#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
+#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
+
+#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
+
+/* direct access qp controls */
+#define DAQP_CTRL_ENABLE 0x01
+#define DAQP_CTRL_SEND_COMP 0x20
+#define DAQP_CTRL_RECV_COMP 0x40
+
+static u32 get_longbusy_msecs(int longbusy_rc)
+{
+ switch (longbusy_rc) {
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ return 1;
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ return 10;
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ return 100;
+ case H_LONG_BUSY_ORDER_1_SEC:
+ return 1000;
+ case H_LONG_BUSY_ORDER_10_SEC:
+ return 10000;
+ case H_LONG_BUSY_ORDER_100_SEC:
+ return 100000;
+ default:
+ return 1;
+ }
+}
+
+static long ehca_plpar_hcall_norets(unsigned long opcode,
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5,
+ unsigned long arg6,
+ unsigned long arg7)
+{
+ long ret;
+ int i, sleep_msecs;
+
+ ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
+ "arg5=%lx arg6=%lx arg7=%lx",
+ opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+
+ for (i = 0; i < 5; i++) {
+ ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
+ arg5, arg6, arg7);
+
+ if (H_IS_LONG_BUSY(ret)) {
+ sleep_msecs = get_longbusy_msecs(ret);
+ msleep_interruptible(sleep_msecs);
+ continue;
+ }
+
+ if (ret < H_SUCCESS)
+ ehca_gen_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx ",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7);
+
+ ehca_gen_dbg("opcode=%lx ret=%lx", opcode, ret);
+ return ret;
+
+ }
+
+ return H_BUSY;
+}
+
+static long ehca_plpar_hcall9(unsigned long opcode,
+ unsigned long *outs, /* array of 9 outputs */
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5,
+ unsigned long arg6,
+ unsigned long arg7,
+ unsigned long arg8,
+ unsigned long arg9)
+{
+ long ret;
+ int i, sleep_msecs;
+
+ ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
+ "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
+ opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
+ arg8, arg9);
+
+ for (i = 0; i < 5; i++) {
+ ret = plpar_hcall9(opcode, outs,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9);
+
+ if (H_IS_LONG_BUSY(ret)) {
+ sleep_msecs = get_longbusy_msecs(ret);
+ msleep_interruptible(sleep_msecs);
+ continue;
+ }
+
+ if (ret < H_SUCCESS)
+ ehca_gen_err("opcode=%lx ret=%lx"
+ " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+ " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+ " arg9=%lx"
+ " out1=%lx out2=%lx out3=%lx out4=%lx"
+ " out5=%lx out6=%lx out7=%lx out8=%lx"
+ " out9=%lx",
+ opcode, ret,
+ arg1, arg2, arg3, arg4, arg5,
+ arg6, arg7, arg8, arg9,
+ outs[0], outs[1], outs[2], outs[3],
+ outs[4], outs[5], outs[6], outs[7],
+ outs[8]);
+
+ ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
+ "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx "
+ "out9=%lx",
+ opcode, ret, outs[0], outs[1], outs[2], outs[3],
+ outs[4], outs[5], outs[6], outs[7], outs[8]);
+ return ret;
+
+ }
+
+ return H_BUSY;
+}
+u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_pfeq *pfeq,
+ const u32 neq_control,
+ const u32 number_of_entries,
+ struct ipz_eq_handle *eq_handle,
+ u32 *act_nr_of_entries,
+ u32 *act_pages,
+ u32 *eq_ist)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+ u64 allocate_controls;
+
+ /* resource type */
+ allocate_controls = 3ULL;
+
+ /* ISN is associated */
+ if (neq_control != 1)
+ allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
+ else /* notification event queue */
+ allocate_controls = (1ULL << 63) | allocate_controls;
+
+ ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+ adapter_handle.handle, /* r4 */
+ allocate_controls, /* r5 */
+ number_of_entries, /* r6 */
+ 0, 0, 0, 0, 0, 0);
+ eq_handle->handle = outs[0];
+ *act_nr_of_entries = (u32)outs[3];
+ *act_pages = (u32)outs[4];
+ *eq_ist = (u32)outs[5];
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+ ehca_gen_err("Not enough resource - ret=%lx ", ret);
+
+ return ret;
+}
+
+u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
+ struct ipz_eq_handle eq_handle,
+ const u64 event_mask)
+{
+ return ehca_plpar_hcall_norets(H_RESET_EVENTS,
+ adapter_handle.handle, /* r4 */
+ eq_handle.handle, /* r5 */
+ event_mask, /* r6 */
+ 0, 0, 0, 0);
+}
+
+u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_cq *cq,
+ struct ehca_alloc_cq_parms *param)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+ adapter_handle.handle, /* r4 */
+ 2, /* r5 */
+ param->eq_handle.handle, /* r6 */
+ cq->token, /* r7 */
+ param->nr_cqe, /* r8 */
+ 0, 0, 0, 0);
+ cq->ipz_cq_handle.handle = outs[0];
+ param->act_nr_of_entries = (u32)outs[3];
+ param->act_pages = (u32)outs[4];
+
+ if (ret == H_SUCCESS)
+ hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+ ehca_gen_err("Not enough resources. ret=%lx", ret);
+
+ return ret;
+}
+
+u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_qp *qp,
+ struct ehca_alloc_qp_parms *parms)
+{
+ u64 ret;
+ u64 allocate_controls;
+ u64 max_r10_reg;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+ u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
+ u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
+ int daqp_ctrl = parms->daqp_ctrl;
+
+ allocate_controls =
+ EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS,
+ (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
+ (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
+ (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
+ parms->ud_av_l_key_ctl)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
+
+ max_r10_reg =
+ EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
+ max_nr_send_wqes)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
+ max_nr_receive_wqes)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
+ parms->max_send_sge)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
+ parms->max_recv_sge);
+
+ ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+ adapter_handle.handle, /* r4 */
+ allocate_controls, /* r5 */
+ qp->send_cq->ipz_cq_handle.handle,
+ qp->recv_cq->ipz_cq_handle.handle,
+ parms->ipz_eq_handle.handle,
+ ((u64)qp->token << 32) | parms->pd.value,
+ max_r10_reg, /* r10 */
+ parms->ud_av_l_key_ctl, /* r11 */
+ 0);
+ qp->ipz_qp_handle.handle = outs[0];
+ qp->real_qp_num = (u32)outs[1];
+ parms->act_nr_send_sges =
+ (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
+ parms->act_nr_recv_wqes =
+ (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
+ parms->act_nr_send_sges =
+ (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
+ parms->act_nr_recv_sges =
+ (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
+ parms->nr_sq_pages =
+ (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
+ parms->nr_rq_pages =
+ (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
+
+ if (ret == H_SUCCESS)
+ hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]);
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+ ehca_gen_err("Not enough resources. ret=%lx", ret);
+
+ return ret;
+}
+
+u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
+ const u8 port_id,
+ struct hipz_query_port *query_port_response_block)
+{
+ u64 ret;
+ u64 r_cb = virt_to_abs(query_port_response_block);
+
+ if (r_cb & (EHCA_PAGESIZE-1)) {
+ ehca_gen_err("response block not page aligned");
+ return H_PARAMETER;
+ }
+
+ ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
+ adapter_handle.handle, /* r4 */
+ port_id, /* r5 */
+ r_cb, /* r6 */
+ 0, 0, 0, 0);
+
+ if (ehca_debug_level)
+ ehca_dmp(query_port_response_block, 64, "response_block");
+
+ return ret;
+}
+
+u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
+ struct hipz_query_hca *query_hca_rblock)
+{
+ u64 r_cb = virt_to_abs(query_hca_rblock);
+
+ if (r_cb & (EHCA_PAGESIZE-1)) {
+ ehca_gen_err("response_block=%p not page aligned",
+ query_hca_rblock);
+ return H_PARAMETER;
+ }
+
+ return ehca_plpar_hcall_norets(H_QUERY_HCA,
+ adapter_handle.handle, /* r4 */
+ r_cb, /* r5 */
+ 0, 0, 0, 0, 0);
+}
+
+u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 resource_handle,
+ const u64 logical_address_of_page,
+ u64 count)
+{
+ return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
+ adapter_handle.handle, /* r4 */
+ queue_type | pagesize << 8, /* r5 */
+ resource_handle, /* r6 */
+ logical_address_of_page, /* r7 */
+ count, /* r8 */
+ 0, 0);
+}
+
+u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_eq_handle eq_handle,
+ struct ehca_pfeq *pfeq,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 logical_address_of_page,
+ const u64 count)
+{
+ if (count != 1) {
+ ehca_gen_err("Ppage counter=%lx", count);
+ return H_PARAMETER;
+ }
+ return hipz_h_register_rpage(adapter_handle,
+ pagesize,
+ queue_type,
+ eq_handle.handle,
+ logical_address_of_page, count);
+}
+
+u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
+ u32 ist)
+{
+ u64 ret;
+ ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
+ adapter_handle.handle, /* r4 */
+ ist, /* r5 */
+ 0, 0, 0, 0, 0);
+
+ if (ret != H_SUCCESS && ret != H_BUSY)
+ ehca_gen_err("Could not query interrupt state.");
+
+ return ret;
+}
+
+u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_cq_handle cq_handle,
+ struct ehca_pfcq *pfcq,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 logical_address_of_page,
+ const u64 count,
+ const struct h_galpa gal)
+{
+ if (count != 1) {
+ ehca_gen_err("Page counter=%lx", count);
+ return H_PARAMETER;
+ }
+
+ return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
+ cq_handle.handle, logical_address_of_page,
+ count);
+}
+
+u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct ehca_pfqp *pfqp,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 logical_address_of_page,
+ const u64 count,
+ const struct h_galpa galpa)
+{
+ if (count != 1) {
+ ehca_gen_err("Page counter=%lx", count);
+ return H_PARAMETER;
+ }
+
+ return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
+ qp_handle.handle,logical_address_of_page,
+ count);
+}
+
+u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct ehca_pfqp *pfqp,
+ void **log_addr_next_sq_wqe2processed,
+ void **log_addr_next_rq_wqe2processed,
+ int dis_and_get_function_code)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
+ adapter_handle.handle, /* r4 */
+ dis_and_get_function_code, /* r5 */
+ qp_handle.handle, /* r6 */
+ 0, 0, 0, 0, 0, 0);
+ if (log_addr_next_sq_wqe2processed)
+ *log_addr_next_sq_wqe2processed = (void*)outs[0];
+ if (log_addr_next_rq_wqe2processed)
+ *log_addr_next_rq_wqe2processed = (void*)outs[1];
+
+ return ret;
+}
+
+u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct ehca_pfqp *pfqp,
+ const u64 update_mask,
+ struct hcp_modify_qp_control_block *mqpcb,
+ struct h_galpa gal)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+ ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ update_mask, /* r6 */
+ virt_to_abs(mqpcb), /* r7 */
+ 0, 0, 0, 0, 0);
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+ ehca_gen_err("Insufficient resources ret=%lx", ret);
+
+ return ret;
+}
+
+u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct ehca_pfqp *pfqp,
+ struct hcp_modify_qp_control_block *qqpcb,
+ struct h_galpa gal)
+{
+ return ehca_plpar_hcall_norets(H_QUERY_QP,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ virt_to_abs(qqpcb), /* r6 */
+ 0, 0, 0, 0);
+}
+
+u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_qp *qp)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = hcp_galpas_dtor(&qp->galpas);
+ if (ret) {
+ ehca_gen_err("Could not destruct qp->galpas");
+ return H_RESOURCE;
+ }
+ ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
+ adapter_handle.handle, /* r4 */
+ /* function code */
+ 1, /* r5 */
+ qp->ipz_qp_handle.handle, /* r6 */
+ 0, 0, 0, 0, 0, 0);
+ if (ret == H_HARDWARE)
+ ehca_gen_err("HCA not operational. ret=%lx", ret);
+
+ ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ qp->ipz_qp_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0);
+
+ if (ret == H_RESOURCE)
+ ehca_gen_err("Resource still in use. ret=%lx", ret);
+
+ return ret;
+}
+
+u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct h_galpa gal,
+ u32 port)
+{
+ return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ port, /* r6 */
+ 0, 0, 0, 0);
+}
+
+u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct h_galpa gal,
+ u32 port, u32 * pma_qp_nr,
+ u32 * bma_qp_nr)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ port, /* r6 */
+ 0, 0, 0, 0, 0, 0);
+ *pma_qp_nr = (u32)outs[0];
+ *bma_qp_nr = (u32)outs[1];
+
+ if (ret == H_ALIAS_EXIST)
+ ehca_gen_err("AQP1 already exists. ret=%lx", ret);
+
+ return ret;
+}
+
+u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct h_galpa gal,
+ u16 mcg_dlid,
+ u64 subnet_prefix, u64 interface_id)
+{
+ u64 ret;
+
+ ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ mcg_dlid, /* r6 */
+ interface_id, /* r7 */
+ subnet_prefix, /* r8 */
+ 0, 0);
+
+ if (ret == H_NOT_ENOUGH_RESOURCES)
+ ehca_gen_err("Not enough resources. ret=%lx", ret);
+
+ return ret;
+}
+
+u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct h_galpa gal,
+ u16 mcg_dlid,
+ u64 subnet_prefix, u64 interface_id)
+{
+ return ehca_plpar_hcall_norets(H_DETACH_MCQP,
+ adapter_handle.handle, /* r4 */
+ qp_handle.handle, /* r5 */
+ mcg_dlid, /* r6 */
+ interface_id, /* r7 */
+ subnet_prefix, /* r8 */
+ 0, 0);
+}
+
+u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_cq *cq,
+ u8 force_flag)
+{
+ u64 ret;
+
+ ret = hcp_galpas_dtor(&cq->galpas);
+ if (ret) {
+ ehca_gen_err("Could not destruct cp->galpas");
+ return H_RESOURCE;
+ }
+
+ ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ cq->ipz_cq_handle.handle, /* r5 */
+ force_flag != 0 ? 1L : 0L, /* r6 */
+ 0, 0, 0, 0);
+
+ if (ret == H_RESOURCE)
+ ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret);
+
+ return ret;
+}
+
+u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_eq *eq)
+{
+ u64 ret;
+
+ ret = hcp_galpas_dtor(&eq->galpas);
+ if (ret) {
+ ehca_gen_err("Could not destruct eq->galpas");
+ return H_RESOURCE;
+ }
+
+ ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ eq->ipz_eq_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0);
+
+ if (ret == H_RESOURCE)
+ ehca_gen_err("Resource in use. ret=%lx ", ret);
+
+ return ret;
+}
+
+u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ const u64 vaddr,
+ const u64 length,
+ const u32 access_ctrl,
+ const struct ipz_pd pd,
+ struct ehca_mr_hipzout_parms *outparms)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+ adapter_handle.handle, /* r4 */
+ 5, /* r5 */
+ vaddr, /* r6 */
+ length, /* r7 */
+ (((u64)access_ctrl) << 32ULL), /* r8 */
+ pd.value, /* r9 */
+ 0, 0, 0);
+ outparms->handle.handle = outs[0];
+ outparms->lkey = (u32)outs[2];
+ outparms->rkey = (u32)outs[3];
+
+ return ret;
+}
+
+u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 logical_address_of_page,
+ const u64 count)
+{
+ u64 ret;
+
+ if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
+ ehca_gen_err("logical_address_of_page not on a 4k boundary "
+ "adapter_handle=%lx mr=%p mr_handle=%lx "
+ "pagesize=%x queue_type=%x "
+ "logical_address_of_page=%lx count=%lx",
+ adapter_handle.handle, mr,
+ mr->ipz_mr_handle.handle, pagesize, queue_type,
+ logical_address_of_page, count);
+ ret = H_PARAMETER;
+ } else
+ ret = hipz_h_register_rpage(adapter_handle, pagesize,
+ queue_type,
+ mr->ipz_mr_handle.handle,
+ logical_address_of_page, count);
+ return ret;
+}
+
+u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ struct ehca_mr_hipzout_parms *outparms)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
+ adapter_handle.handle, /* r4 */
+ mr->ipz_mr_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0, 0, 0);
+ outparms->len = outs[0];
+ outparms->vaddr = outs[1];
+ outparms->acl = outs[4] >> 32;
+ outparms->lkey = (u32)(outs[5] >> 32);
+ outparms->rkey = (u32)(outs[5] & (0xffffffff));
+
+ return ret;
+}
+
+u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr)
+{
+ return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ mr->ipz_mr_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0);
+}
+
+u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ const u64 vaddr_in,
+ const u64 length,
+ const u32 access_ctrl,
+ const struct ipz_pd pd,
+ const u64 mr_addr_cb,
+ struct ehca_mr_hipzout_parms *outparms)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
+ adapter_handle.handle, /* r4 */
+ mr->ipz_mr_handle.handle, /* r5 */
+ vaddr_in, /* r6 */
+ length, /* r7 */
+ /* r8 */
+ ((((u64)access_ctrl) << 32ULL) | pd.value),
+ mr_addr_cb, /* r9 */
+ 0, 0, 0);
+ outparms->vaddr = outs[1];
+ outparms->lkey = (u32)outs[2];
+ outparms->rkey = (u32)outs[3];
+
+ return ret;
+}
+
+u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ const struct ehca_mr *orig_mr,
+ const u64 vaddr_in,
+ const u32 access_ctrl,
+ const struct ipz_pd pd,
+ struct ehca_mr_hipzout_parms *outparms)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
+ adapter_handle.handle, /* r4 */
+ orig_mr->ipz_mr_handle.handle, /* r5 */
+ vaddr_in, /* r6 */
+ (((u64)access_ctrl) << 32ULL), /* r7 */
+ pd.value, /* r8 */
+ 0, 0, 0, 0);
+ outparms->handle.handle = outs[0];
+ outparms->lkey = (u32)outs[2];
+ outparms->rkey = (u32)outs[3];
+
+ return ret;
+}
+
+u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mw *mw,
+ const struct ipz_pd pd,
+ struct ehca_mw_hipzout_parms *outparms)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
+ adapter_handle.handle, /* r4 */
+ 6, /* r5 */
+ pd.value, /* r6 */
+ 0, 0, 0, 0, 0, 0);
+ outparms->handle.handle = outs[0];
+ outparms->rkey = (u32)outs[3];
+
+ return ret;
+}
+
+u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mw *mw,
+ struct ehca_mw_hipzout_parms *outparms)
+{
+ u64 ret;
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+ ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
+ adapter_handle.handle, /* r4 */
+ mw->ipz_mw_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0, 0, 0);
+ outparms->rkey = (u32)outs[3];
+
+ return ret;
+}
+
+u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mw *mw)
+{
+ return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
+ adapter_handle.handle, /* r4 */
+ mw->ipz_mw_handle.handle, /* r5 */
+ 0, 0, 0, 0, 0);
+}
+
+u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
+ const u64 ressource_handle,
+ void *rblock,
+ unsigned long *byte_count)
+{
+ u64 r_cb = virt_to_abs(rblock);
+
+ if (r_cb & (EHCA_PAGESIZE-1)) {
+ ehca_gen_err("rblock not page aligned.");
+ return H_PARAMETER;
+ }
+
+ return ehca_plpar_hcall_norets(H_ERROR_DATA,
+ adapter_handle.handle,
+ ressource_handle,
+ r_cb,
+ 0, 0, 0, 0);
+}
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
new file mode 100644
index 00000000000..587ebd47095
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -0,0 +1,261 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Firmware Infiniband Interface code for POWER
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Gerd Bayer <gerd.bayer@de.ibm.com>
+ * Waleri Fomin <fomin@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HCP_IF_H__
+#define __HCP_IF_H__
+
+#include "ehca_classes.h"
+#include "ehca_tools.h"
+#include "hipz_hw.h"
+
+/*
+ * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initalize
+ * resources, create the empty EQPT (ring).
+ */
+u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_pfeq *pfeq,
+ const u32 neq_control,
+ const u32 number_of_entries,
+ struct ipz_eq_handle *eq_handle,
+ u32 * act_nr_of_entries,
+ u32 * act_pages,
+ u32 * eq_ist);
+
+u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
+ struct ipz_eq_handle eq_handle,
+ const u64 event_mask);
+/*
+ * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
+ * resources, create the empty CQPT (ring).
+ */
+u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_cq *cq,
+ struct ehca_alloc_cq_parms *param);
+
+
+/*
+ * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
+ * initialize resources, create empty QPPTs (2 rings).
+ */
+u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_qp *qp,
+ struct ehca_alloc_qp_parms *parms);
+
+u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
+ const u8 port_id,
+ struct hipz_query_port *query_port_response_block);
+
+u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
+ struct hipz_query_hca *query_hca_rblock);
+
+/*
+ * hipz_h_register_rpage internal function in hcp_if.h for all
+ * hcp_H_REGISTER_RPAGE calls.
+ */
+u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 resource_handle,
+ const u64 logical_address_of_page,
+ u64 count);
+
+u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_eq_handle eq_handle,
+ struct ehca_pfeq *pfeq,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 logical_address_of_page,
+ const u64 count);
+
+u64 hipz_h_query_int_state(const struct ipz_adapter_handle
+ hcp_adapter_handle,
+ u32 ist);
+
+u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_cq_handle cq_handle,
+ struct ehca_pfcq *pfcq,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 logical_address_of_page,
+ const u64 count,
+ const struct h_galpa gal);
+
+u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct ehca_pfqp *pfqp,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 logical_address_of_page,
+ const u64 count,
+ const struct h_galpa galpa);
+
+u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct ehca_pfqp *pfqp,
+ void **log_addr_next_sq_wqe_tb_processed,
+ void **log_addr_next_rq_wqe_tb_processed,
+ int dis_and_get_function_code);
+enum hcall_sigt {
+ HCALL_SIGT_NO_CQE = 0,
+ HCALL_SIGT_BY_WQE = 1,
+ HCALL_SIGT_EVERY = 2
+};
+
+u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct ehca_pfqp *pfqp,
+ const u64 update_mask,
+ struct hcp_modify_qp_control_block *mqpcb,
+ struct h_galpa gal);
+
+u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct ehca_pfqp *pfqp,
+ struct hcp_modify_qp_control_block *qqpcb,
+ struct h_galpa gal);
+
+u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_qp *qp);
+
+u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct h_galpa gal,
+ u32 port);
+
+u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct h_galpa gal,
+ u32 port, u32 * pma_qp_nr,
+ u32 * bma_qp_nr);
+
+u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct h_galpa gal,
+ u16 mcg_dlid,
+ u64 subnet_prefix, u64 interface_id);
+
+u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
+ const struct ipz_qp_handle qp_handle,
+ struct h_galpa gal,
+ u16 mcg_dlid,
+ u64 subnet_prefix, u64 interface_id);
+
+u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_cq *cq,
+ u8 force_flag);
+
+u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
+ struct ehca_eq *eq);
+
+/*
+ * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
+ * resources.
+ */
+u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ const u64 vaddr,
+ const u64 length,
+ const u32 access_ctrl,
+ const struct ipz_pd pd,
+ struct ehca_mr_hipzout_parms *outparms);
+
+/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
+u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ const u8 pagesize,
+ const u8 queue_type,
+ const u64 logical_address_of_page,
+ const u64 count);
+
+/* hipz_h_query_mr queries MR in HW and FW */
+u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ struct ehca_mr_hipzout_parms *outparms);
+
+/* hipz_h_free_resource_mr frees MR resources in HW and FW */
+u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr);
+
+/* hipz_h_reregister_pmr reregisters MR in HW and FW */
+u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ const u64 vaddr_in,
+ const u64 length,
+ const u32 access_ctrl,
+ const struct ipz_pd pd,
+ const u64 mr_addr_cb,
+ struct ehca_mr_hipzout_parms *outparms);
+
+/* hipz_h_register_smr register shared MR in HW and FW */
+u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mr *mr,
+ const struct ehca_mr *orig_mr,
+ const u64 vaddr_in,
+ const u32 access_ctrl,
+ const struct ipz_pd pd,
+ struct ehca_mr_hipzout_parms *outparms);
+
+/*
+ * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
+ * resources.
+ */
+u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mw *mw,
+ const struct ipz_pd pd,
+ struct ehca_mw_hipzout_parms *outparms);
+
+/* hipz_h_query_mw queries MW in HW and FW */
+u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mw *mw,
+ struct ehca_mw_hipzout_parms *outparms);
+
+/* hipz_h_free_resource_mw frees MW resources in HW and FW */
+u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
+ const struct ehca_mw *mw);
+
+u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
+ const u64 ressource_handle,
+ void *rblock,
+ unsigned long *byte_count);
+
+#endif /* __HCP_IF_H__ */
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
new file mode 100644
index 00000000000..0b1a4772c78
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -0,0 +1,80 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * load store abstraction for ehca register access with tracing
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_classes.h"
+#include "hipz_hw.h"
+
+int hcall_map_page(u64 physaddr, u64 *mapaddr)
+{
+ *mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
+ return 0;
+}
+
+int hcall_unmap_page(u64 mapaddr)
+{
+ iounmap((volatile void __iomem*)mapaddr);
+ return 0;
+}
+
+int hcp_galpas_ctor(struct h_galpas *galpas,
+ u64 paddr_kernel, u64 paddr_user)
+{
+ int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
+ if (ret)
+ return ret;
+
+ galpas->user.fw_handle = paddr_user;
+
+ return 0;
+}
+
+int hcp_galpas_dtor(struct h_galpas *galpas)
+{
+ if (galpas->kernel.fw_handle) {
+ int ret = hcall_unmap_page(galpas->kernel.fw_handle);
+ if (ret)
+ return ret;
+ }
+
+ galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
new file mode 100644
index 00000000000..5305c2a3ed9
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.h
@@ -0,0 +1,90 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * Firmware calls
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Waleri Fomin <fomin@de.ibm.com>
+ * Gerd Bayer <gerd.bayer@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HCP_PHYP_H__
+#define __HCP_PHYP_H__
+
+
+/*
+ * eHCA page (mapped into memory)
+ * resource to access eHCA register pages in CPU address space
+*/
+struct h_galpa {
+ u64 fw_handle;
+ /* for pSeries this is a 64bit memory address where
+ I/O memory is mapped into CPU address space (kv) */
+};
+
+/*
+ * resource to access eHCA address space registers, all types
+ */
+struct h_galpas {
+ u32 pid; /*PID of userspace galpa checking */
+ struct h_galpa user; /* user space accessible resource,
+ set to 0 if unused */
+ struct h_galpa kernel; /* kernel space accessible resource,
+ set to 0 if unused */
+};
+
+static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
+{
+ u64 addr = galpa.fw_handle + offset;
+ return *(volatile u64 __force *)addr;
+}
+
+static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
+{
+ u64 addr = galpa.fw_handle + offset;
+ *(volatile u64 __force *)addr = value;
+}
+
+int hcp_galpas_ctor(struct h_galpas *galpas,
+ u64 paddr_kernel, u64 paddr_user);
+
+int hcp_galpas_dtor(struct h_galpas *galpas);
+
+int hcall_map_page(u64 physaddr, u64 * mapaddr);
+
+int hcall_unmap_page(u64 mapaddr);
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/infiniband/hw/ehca/hipz_fns.h
new file mode 100644
index 00000000000..9dac93d0214
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hipz_fns.h
@@ -0,0 +1,68 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * HW abstraction register functions
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HIPZ_FNS_H__
+#define __HIPZ_FNS_H__
+
+#include "ehca_classes.h"
+#include "hipz_hw.h"
+
+#include "hipz_fns_core.h"
+
+#define hipz_galpa_store_eq(gal, offset, value) \
+ hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
+
+#define hipz_galpa_load_eq(gal, offset) \
+ hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
+
+#define hipz_galpa_store_qped(gal, offset, value) \
+ hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
+
+#define hipz_galpa_load_qped(gal, offset) \
+ hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
+
+#define hipz_galpa_store_mrmw(gal, offset, value) \
+ hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
+
+#define hipz_galpa_load_mrmw(gal, offset) \
+ hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/infiniband/hw/ehca/hipz_fns_core.h
new file mode 100644
index 00000000000..20898a15344
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hipz_fns_core.h
@@ -0,0 +1,100 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * HW abstraction register functions
+ *
+ * Authors: Christoph Raisch <raisch@de.ibm.com>
+ * Heiko J Schick <schickhj@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HIPZ_FNS_CORE_H__
+#define __HIPZ_FNS_CORE_H__
+
+#include "hcp_phyp.h"
+#include "hipz_hw.h"
+
+#define hipz_galpa_store_cq(gal, offset, value) \
+ hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
+
+#define hipz_galpa_load_cq(gal, offset) \
+ hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
+
+#define hipz_galpa_store_qp(gal,offset, value) \
+ hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
+#define hipz_galpa_load_qp(gal, offset) \
+ hipz_galpa_load(gal,QPTEMM_OFFSET(offset))
+
+static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
+{
+ /* ringing doorbell :-) */
+ hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
+ EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
+}
+
+static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
+{
+ /* ringing doorbell :-) */
+ hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
+ EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
+}
+
+static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
+{
+ hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
+ EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
+}
+
+static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
+{
+ u64 cqx_n0_reg;
+
+ hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
+ EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
+ value));
+ cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
+}
+
+static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
+{
+ u64 cqx_n1_reg;
+
+ hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
+ EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
+ cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
+}
+
+#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
new file mode 100644
index 00000000000..3fc92b031c5
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hipz_hw.h
@@ -0,0 +1,388 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * eHCA register definitions
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __HIPZ_HW_H__
+#define __HIPZ_HW_H__
+
+#include "ehca_tools.h"
+
+/* QP Table Entry Memory Map */
+struct hipz_qptemm {
+ u64 qpx_hcr;
+ u64 qpx_c;
+ u64 qpx_herr;
+ u64 qpx_aer;
+/* 0x20*/
+ u64 qpx_sqa;
+ u64 qpx_sqc;
+ u64 qpx_rqa;
+ u64 qpx_rqc;
+/* 0x40*/
+ u64 qpx_st;
+ u64 qpx_pmstate;
+ u64 qpx_pmfa;
+ u64 qpx_pkey;
+/* 0x60*/
+ u64 qpx_pkeya;
+ u64 qpx_pkeyb;
+ u64 qpx_pkeyc;
+ u64 qpx_pkeyd;
+/* 0x80*/
+ u64 qpx_qkey;
+ u64 qpx_dqp;
+ u64 qpx_dlidp;
+ u64 qpx_portp;
+/* 0xa0*/
+ u64 qpx_slidp;
+ u64 qpx_slidpp;
+ u64 qpx_dlida;
+ u64 qpx_porta;
+/* 0xc0*/
+ u64 qpx_slida;
+ u64 qpx_slidpa;
+ u64 qpx_slvl;
+ u64 qpx_ipd;
+/* 0xe0*/
+ u64 qpx_mtu;
+ u64 qpx_lato;
+ u64 qpx_rlimit;
+ u64 qpx_rnrlimit;
+/* 0x100*/
+ u64 qpx_t;
+ u64 qpx_sqhp;
+ u64 qpx_sqptp;
+ u64 qpx_nspsn;
+/* 0x120*/
+ u64 qpx_nspsnhwm;
+ u64 reserved1;
+ u64 qpx_sdsi;
+ u64 qpx_sdsbc;
+/* 0x140*/
+ u64 qpx_sqwsize;
+ u64 qpx_sqwts;
+ u64 qpx_lsn;
+ u64 qpx_nssn;
+/* 0x160 */
+ u64 qpx_mor;
+ u64 qpx_cor;
+ u64 qpx_sqsize;
+ u64 qpx_erc;
+/* 0x180*/
+ u64 qpx_rnrrc;
+ u64 qpx_ernrwt;
+ u64 qpx_rnrresp;
+ u64 qpx_lmsna;
+/* 0x1a0 */
+ u64 qpx_sqhpc;
+ u64 qpx_sqcptp;
+ u64 qpx_sigt;
+ u64 qpx_wqecnt;
+/* 0x1c0*/
+ u64 qpx_rqhp;
+ u64 qpx_rqptp;
+ u64 qpx_rqsize;
+ u64 qpx_nrr;
+/* 0x1e0*/
+ u64 qpx_rdmac;
+ u64 qpx_nrpsn;
+ u64 qpx_lapsn;
+ u64 qpx_lcr;
+/* 0x200*/
+ u64 qpx_rwc;
+ u64 qpx_rwva;
+ u64 qpx_rdsi;
+ u64 qpx_rdsbc;
+/* 0x220*/
+ u64 qpx_rqwsize;
+ u64 qpx_crmsn;
+ u64 qpx_rdd;
+ u64 qpx_larpsn;
+/* 0x240*/
+ u64 qpx_pd;
+ u64 qpx_scqn;
+ u64 qpx_rcqn;
+ u64 qpx_aeqn;
+/* 0x260*/
+ u64 qpx_aaelog;
+ u64 qpx_ram;
+ u64 qpx_rdmaqe0;
+ u64 qpx_rdmaqe1;
+/* 0x280*/
+ u64 qpx_rdmaqe2;
+ u64 qpx_rdmaqe3;
+ u64 qpx_nrpsnhwm;
+/* 0x298*/
+ u64 reserved[(0x400 - 0x298) / 8];
+/* 0x400 extended data */
+ u64 reserved_ext[(0x500 - 0x400) / 8];
+/* 0x500 */
+ u64 reserved2[(0x1000 - 0x500) / 8];
+/* 0x1000 */
+};
+
+#define QPX_SQADDER EHCA_BMASK_IBM(48,63)
+#define QPX_RQADDER EHCA_BMASK_IBM(48,63)
+
+#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x)
+
+/* MRMWPT Entry Memory Map */
+struct hipz_mrmwmm {
+ /* 0x00 */
+ u64 mrx_hcr;
+
+ u64 mrx_c;
+ u64 mrx_herr;
+ u64 mrx_aer;
+ /* 0x20 */
+ u64 mrx_pp;
+ u64 reserved1;
+ u64 reserved2;
+ u64 reserved3;
+ /* 0x40 */
+ u64 reserved4[(0x200 - 0x40) / 8];
+ /* 0x200 */
+ u64 mrx_ctl[64];
+
+};
+
+#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x)
+
+struct hipz_qpedmm {
+ /* 0x00 */
+ u64 reserved0[(0x400) / 8];
+ /* 0x400 */
+ u64 qpedx_phh;
+ u64 qpedx_ppsgp;
+ /* 0x410 */
+ u64 qpedx_ppsgu;
+ u64 qpedx_ppdgp;
+ /* 0x420 */
+ u64 qpedx_ppdgu;
+ u64 qpedx_aph;
+ /* 0x430 */
+ u64 qpedx_apsgp;
+ u64 qpedx_apsgu;
+ /* 0x440 */
+ u64 qpedx_apdgp;
+ u64 qpedx_apdgu;
+ /* 0x450 */
+ u64 qpedx_apav;
+ u64 qpedx_apsav;
+ /* 0x460 */
+ u64 qpedx_hcr;
+ u64 reserved1[4];
+ /* 0x488 */
+ u64 qpedx_rrl0;
+ /* 0x490 */
+ u64 qpedx_rrrkey0;
+ u64 qpedx_rrva0;
+ /* 0x4a0 */
+ u64 reserved2;
+ u64 qpedx_rrl1;
+ /* 0x4b0 */
+ u64 qpedx_rrrkey1;
+ u64 qpedx_rrva1;
+ /* 0x4c0 */
+ u64 reserved3;
+ u64 qpedx_rrl2;
+ /* 0x4d0 */
+ u64 qpedx_rrrkey2;
+ u64 qpedx_rrva2;
+ /* 0x4e0 */
+ u64 reserved4;
+ u64 qpedx_rrl3;
+ /* 0x4f0 */
+ u64 qpedx_rrrkey3;
+ u64 qpedx_rrva3;
+};
+
+#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x)
+
+/* CQ Table Entry Memory Map */
+struct hipz_cqtemm {
+ u64 cqx_hcr;
+ u64 cqx_c;
+ u64 cqx_herr;
+ u64 cqx_aer;
+/* 0x20 */
+ u64 cqx_ptp;
+ u64 cqx_tp;
+ u64 cqx_fec;
+ u64 cqx_feca;
+/* 0x40 */
+ u64 cqx_ep;
+ u64 cqx_eq;
+/* 0x50 */
+ u64 reserved1;
+ u64 cqx_n0;
+/* 0x60 */
+ u64 cqx_n1;
+ u64 reserved2[(0x1000 - 0x60) / 8];
+/* 0x1000 */
+};
+
+#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32,63)
+#define CQX_FECADDER EHCA_BMASK_IBM(32,63)
+#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0)
+#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0)
+
+#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x)
+
+/* EQ Table Entry Memory Map */
+struct hipz_eqtemm {
+ u64 eqx_hcr;
+ u64 eqx_c;
+
+ u64 eqx_herr;
+ u64 eqx_aer;
+/* 0x20 */
+ u64 eqx_ptp;
+ u64 eqx_tp;
+ u64 eqx_ssba;
+ u64 eqx_psba;
+
+/* 0x40 */
+ u64 eqx_cec;
+ u64 eqx_meql;
+ u64 eqx_xisbi;
+ u64 eqx_xisc;
+/* 0x60 */
+ u64 eqx_it;
+
+};
+
+#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x)
+
+/* access control defines for MR/MW */
+#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
+#define HIPZ_ACCESSCTRL_R_WRITE 0x00400000
+#define HIPZ_ACCESSCTRL_R_READ 0x00200000
+#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
+#define HIPZ_ACCESSCTRL_MW_BIND 0x00080000
+
+/* query hca response block */
+struct hipz_query_hca {
+ u32 cur_reliable_dg;
+ u32 cur_qp;
+ u32 cur_cq;
+ u32 cur_eq;
+ u32 cur_mr;
+ u32 cur_mw;
+ u32 cur_ee_context;
+ u32 cur_mcast_grp;
+ u32 cur_qp_attached_mcast_grp;
+ u32 reserved1;
+ u32 cur_ipv6_qp;
+ u32 cur_eth_qp;
+ u32 cur_hp_mr;
+ u32 reserved2[3];
+ u32 max_rd_domain;
+ u32 max_qp;
+ u32 max_cq;
+ u32 max_eq;
+ u32 max_mr;
+ u32 max_hp_mr;
+ u32 max_mw;
+ u32 max_mrwpte;
+ u32 max_special_mrwpte;
+ u32 max_rd_ee_context;
+ u32 max_mcast_grp;
+ u32 max_total_mcast_qp_attach;
+ u32 max_mcast_qp_attach;
+ u32 max_raw_ipv6_qp;
+ u32 max_raw_ethy_qp;
+ u32 internal_clock_frequency;
+ u32 max_pd;
+ u32 max_ah;
+ u32 max_cqe;
+ u32 max_wqes_wq;
+ u32 max_partitions;
+ u32 max_rr_ee_context;
+ u32 max_rr_qp;
+ u32 max_rr_hca;
+ u32 max_act_wqs_ee_context;
+ u32 max_act_wqs_qp;
+ u32 max_sge;
+ u32 max_sge_rd;
+ u32 memory_page_size_supported;
+ u64 max_mr_size;
+ u32 local_ca_ack_delay;
+ u32 num_ports;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 node_guid;
+ u64 hca_cap_indicators;
+ u32 data_counter_register_size;
+ u32 max_shared_rq;
+ u32 max_isns_eq;
+ u32 max_neq;
+} __attribute__ ((packed));
+
+/* query port response block */
+struct hipz_query_port {
+ u32 state;
+ u32 bad_pkey_cntr;
+ u32 lmc;
+ u32 lid;
+ u32 subnet_timeout;
+ u32 qkey_viol_cntr;
+ u32 sm_sl;
+ u32 sm_lid;
+ u32 capability_mask;
+ u32 init_type_reply;
+ u32 pkey_tbl_len;
+ u32 gid_tbl_len;
+ u64 gid_prefix;
+ u32 port_nr;
+ u16 pkey_entries[16];
+ u8 reserved1[32];
+ u32 trent_size;
+ u32 trbuf_size;
+ u64 max_msg_sz;
+ u32 max_mtu;
+ u32 vl_cap;
+ u8 reserved2[1900];
+ u64 guid_entries[255];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
new file mode 100644
index 00000000000..e028ff1588c
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -0,0 +1,149 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * internal queue handling
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ehca_tools.h"
+#include "ipz_pt_fn.h"
+
+void *ipz_qpageit_get_inc(struct ipz_queue *queue)
+{
+ void *ret = ipz_qeit_get(queue);
+ queue->current_q_offset += queue->pagesize;
+ if (queue->current_q_offset > queue->queue_length) {
+ queue->current_q_offset -= queue->pagesize;
+ ret = NULL;
+ }
+ if (((u64)ret) % EHCA_PAGESIZE) {
+ ehca_gen_err("ERROR!! not at PAGE-Boundary");
+ return NULL;
+ }
+ return ret;
+}
+
+void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
+{
+ void *ret = ipz_qeit_get(queue);
+ u64 last_entry_in_q = queue->queue_length - queue->qe_size;
+
+ queue->current_q_offset += queue->qe_size;
+ if (queue->current_q_offset > last_entry_in_q) {
+ queue->current_q_offset = 0;
+ queue->toggle_state = (~queue->toggle_state) & 1;
+ }
+
+ return ret;
+}
+
+int ipz_queue_ctor(struct ipz_queue *queue,
+ const u32 nr_of_pages,
+ const u32 pagesize, const u32 qe_size, const u32 nr_of_sg)
+{
+ int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
+ int f;
+
+ if (pagesize > PAGE_SIZE) {
+ ehca_gen_err("FATAL ERROR: pagesize=%x is greater "
+ "than kernel page size", pagesize);
+ return 0;
+ }
+ if (!pages_per_kpage) {
+ ehca_gen_err("FATAL ERROR: invalid kernel page size. "
+ "pages_per_kpage=%x", pages_per_kpage);
+ return 0;
+ }
+ queue->queue_length = nr_of_pages * pagesize;
+ queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+ if (!queue->queue_pages) {
+ ehca_gen_err("ERROR!! didn't get the memory");
+ return 0;
+ }
+ memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
+ /*
+ * allocate pages for queue:
+ * outer loop allocates whole kernel pages (page aligned) and
+ * inner loop divides a kernel page into smaller hca queue pages
+ */
+ f = 0;
+ while (f < nr_of_pages) {
+ u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+ int k;
+ if (!kpage)
+ goto ipz_queue_ctor_exit0; /*NOMEM*/
+ for (k = 0; k < pages_per_kpage && f < nr_of_pages; k++) {
+ (queue->queue_pages)[f] = (struct ipz_page *)kpage;
+ kpage += EHCA_PAGESIZE;
+ f++;
+ }
+ }
+
+ queue->current_q_offset = 0;
+ queue->qe_size = qe_size;
+ queue->act_nr_of_sg = nr_of_sg;
+ queue->pagesize = pagesize;
+ queue->toggle_state = 1;
+ return 1;
+
+ ipz_queue_ctor_exit0:
+ ehca_gen_err("Couldn't get alloc pages queue=%p f=%x nr_of_pages=%x",
+ queue, f, nr_of_pages);
+ for (f = 0; f < nr_of_pages; f += pages_per_kpage) {
+ if (!(queue->queue_pages)[f])
+ break;
+ free_page((unsigned long)(queue->queue_pages)[f]);
+ }
+ return 0;
+}
+
+int ipz_queue_dtor(struct ipz_queue *queue)
+{
+ int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
+ int g;
+ int nr_pages;
+
+ if (!queue || !queue->queue_pages) {
+ ehca_gen_dbg("queue or queue_pages is NULL");
+ return 0;
+ }
+ nr_pages = queue->queue_length / queue->pagesize;
+ for (g = 0; g < nr_pages; g += pages_per_kpage)
+ free_page((unsigned long)(queue->queue_pages)[g]);
+ vfree(queue->queue_pages);
+
+ return 1;
+}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
new file mode 100644
index 00000000000..2f13509d525
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -0,0 +1,247 @@
+/*
+ * IBM eServer eHCA Infiniband device driver for Linux on POWER
+ *
+ * internal queue handling
+ *
+ * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Reinhard Ernst <rernst@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __IPZ_PT_FN_H__
+#define __IPZ_PT_FN_H__
+
+#define EHCA_PAGESHIFT 12
+#define EHCA_PAGESIZE 4096UL
+#define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
+#define EHCA_PT_ENTRIES 512UL
+
+#include "ehca_tools.h"
+#include "ehca_qes.h"
+
+/* struct generic ehca page */
+struct ipz_page {
+ u8 entries[EHCA_PAGESIZE];
+};
+
+/* struct generic queue in linux kernel virtual memory (kv) */
+struct ipz_queue {
+ u64 current_q_offset; /* current queue entry */
+
+ struct ipz_page **queue_pages; /* array of pages belonging to queue */
+ u32 qe_size; /* queue entry size */
+ u32 act_nr_of_sg;
+ u32 queue_length; /* queue length allocated in bytes */
+ u32 pagesize;
+ u32 toggle_state; /* toggle flag - per page */
+ u32 dummy3; /* 64 bit alignment */
+};
+
+/*
+ * return current Queue Entry for a certain q_offset
+ * returns address (kv) of Queue Entry
+ */
+static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
+{
+ struct ipz_page *current_page;
+ if (q_offset >= queue->queue_length)
+ return NULL;
+ current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
+ return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
+}
+
+/*
+ * return current Queue Entry
+ * returns address (kv) of Queue Entry
+ */
+static inline void *ipz_qeit_get(struct ipz_queue *queue)
+{
+ return ipz_qeit_calc(queue, queue->current_q_offset);
+}
+
+/*
+ * return current Queue Page , increment Queue Page iterator from
+ * page to page in struct ipz_queue, last increment will return 0! and
+ * NOT wrap
+ * returns address (kv) of Queue Page
+ * warning don't use in parallel with ipz_QE_get_inc()
+ */
+void *ipz_qpageit_get_inc(struct ipz_queue *queue);
+
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ */
+static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
+{
+ void *ret = ipz_qeit_get(queue);
+ queue->current_q_offset += queue->qe_size;
+ if (queue->current_q_offset >= queue->queue_length) {
+ queue->current_q_offset = 0;
+ /* toggle the valid flag */
+ queue->toggle_state = (~queue->toggle_state) & 1;
+ }
+
+ return ret;
+}
+
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * returns 0 and does not increment, if wrong valid state
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ */
+static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
+{
+ struct ehca_cqe *cqe = ipz_qeit_get(queue);
+ u32 cqe_flags = cqe->cqe_flags;
+
+ if ((cqe_flags >> 7) != (queue->toggle_state & 1))
+ return NULL;
+
+ ipz_qeit_get_inc(queue);
+ return cqe;
+}
+
+/*
+ * returns and resets Queue Entry iterator
+ * returns address (kv) of first Queue Entry
+ */
+static inline void *ipz_qeit_reset(struct ipz_queue *queue)
+{
+ queue->current_q_offset = 0;
+ return ipz_qeit_get(queue);
+}
+
+/* struct generic page table */
+struct ipz_pt {
+ u64 entries[EHCA_PT_ENTRIES];
+};
+
+/* struct page table for a queue, only to be used in pf */
+struct ipz_qpt {
+ /* queue page tables (kv), use u64 because we know the element length */
+ u64 *qpts;
+ u32 n_qpts;
+ u32 n_ptes; /* number of page table entries */
+ u64 *current_pte_addr;
+};
+
+/*
+ * constructor for a ipz_queue_t, placement new for ipz_queue_t,
+ * new for all dependent datastructors
+ * all QP Tables are the same
+ * flow:
+ * allocate+pin queue
+ * see ipz_qpt_ctor()
+ * returns true if ok, false if out of memory
+ */
+int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages,
+ const u32 pagesize, const u32 qe_size,
+ const u32 nr_of_sg);
+
+/*
+ * destructor for a ipz_queue_t
+ * -# free queue
+ * see ipz_queue_ctor()
+ * returns true if ok, false if queue was NULL-ptr of free failed
+ */
+int ipz_queue_dtor(struct ipz_queue *queue);
+
+/*
+ * constructor for a ipz_qpt_t,
+ * placement new for struct ipz_queue, new for all dependent datastructors
+ * all QP Tables are the same,
+ * flow:
+ * -# allocate+pin queue
+ * -# initialise ptcb
+ * -# allocate+pin PTs
+ * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
+ * -# the ring must have room for exactly nr_of_PTEs
+ * see ipz_qpt_ctor()
+ */
+void ipz_qpt_ctor(struct ipz_qpt *qpt,
+ const u32 nr_of_qes,
+ const u32 pagesize,
+ const u32 qe_size,
+ const u8 lowbyte, const u8 toggle,
+ u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
+
+/*
+ * return current Queue Entry, increment Queue Entry iterator by one
+ * step in struct ipz_queue, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * warning don't use in parallel with ipz_qpageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ * fix EQ page problems
+ */
+void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
+
+/*
+ * return current Event Queue Entry, increment Queue Entry iterator
+ * by one step in struct ipz_queue if valid, will wrap in ringbuffer
+ * returns address (kv) of Queue Entry BEFORE increment
+ * returns 0 and does not increment, if wrong valid state
+ * warning don't use in parallel with ipz_queue_QPageit_get_inc()
+ * warning unpredictable results may occur if steps>act_nr_of_queue_entries
+ */
+static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
+{
+ void *ret = ipz_qeit_get(queue);
+ u32 qe = *(u8 *) ret;
+ if ((qe >> 7) != (queue->toggle_state & 1))
+ return NULL;
+ ipz_qeit_eq_get_inc(queue); /* this is a good one */
+ return ret;
+}
+
+/* returns address (GX) of first queue entry */
+static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
+{
+ return be64_to_cpu(qpt->qpts[0]);
+}
+
+/* returns address (kv) of first page of queue page table */
+static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
+{
+ return qpt->qpts;
+}
+
+#endif /* __IPZ_PT_FN_H__ */
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 1db9489f1e8..574a678e7fd 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,16 +1,9 @@
-config IPATH_CORE
- tristate "QLogic InfiniPath Driver"
- depends on 64BIT && PCI_MSI && NET
- ---help---
- This is a low-level driver for QLogic InfiniPath host channel
- adapters (HCAs) based on the HT-400 and PE-800 chips.
-
config INFINIBAND_IPATH
- tristate "QLogic InfiniPath Verbs Driver"
- depends on IPATH_CORE && INFINIBAND
+ tristate "QLogic InfiniPath Driver"
+ depends on PCI_MSI && 64BIT && INFINIBAND
---help---
- This is a driver that provides InfiniBand verbs support for
- QLogic InfiniPath host channel adapters (HCAs). This
- allows these devices to be used with both kernel upper level
- protocols such as IP-over-InfiniBand as well as with userspace
- applications (in conjunction with InfiniBand userspace access).
+ This is a driver for QLogic InfiniPath host channel adapters,
+ including InfiniBand verbs support. This driver allows these
+ devices to be used with both kernel upper level protocols such
+ as IP-over-InfiniBand as well as with userspace applications
+ (in conjunction with InfiniBand userspace access).
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index b0bf7286413..5e29cb0095e 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -1,36 +1,35 @@
EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
-DIPATH_KERN_TYPE=0
-obj-$(CONFIG_IPATH_CORE) += ipath_core.o
obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
-ipath_core-y := \
+ib_ipath-y := \
+ ipath_cq.o \
ipath_diag.o \
ipath_driver.o \
ipath_eeprom.o \
ipath_file_ops.o \
ipath_fs.o \
- ipath_ht400.o \
+ ipath_iba6110.o \
+ ipath_iba6120.o \
ipath_init_chip.o \
ipath_intr.o \
- ipath_layer.o \
- ipath_pe800.o \
- ipath_stats.o \
- ipath_sysfs.o \
- ipath_user_pages.o
-
-ipath_core-$(CONFIG_X86_64) += ipath_wc_x86_64.o
-
-ib_ipath-y := \
- ipath_cq.o \
ipath_keys.o \
+ ipath_layer.o \
ipath_mad.o \
+ ipath_mmap.o \
ipath_mr.o \
ipath_qp.o \
ipath_rc.o \
ipath_ruc.o \
ipath_srq.o \
+ ipath_stats.o \
+ ipath_sysfs.o \
ipath_uc.o \
ipath_ud.o \
- ipath_verbs.o \
- ipath_verbs_mcast.o
+ ipath_user_pages.o \
+ ipath_verbs_mcast.o \
+ ipath_verbs.o
+
+ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
+ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 062bd392e7e..f577905e3ac 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -106,9 +106,9 @@ struct infinipath_stats {
__u64 sps_ether_spkts;
/* number of "ethernet" packets received by driver */
__u64 sps_ether_rpkts;
- /* number of SMA packets sent by driver */
+ /* number of SMA packets sent by driver. Obsolete. */
__u64 sps_sma_spkts;
- /* number of SMA packets received by driver */
+ /* number of SMA packets received by driver. Obsolete. */
__u64 sps_sma_rpkts;
/* number of times all ports rcvhdrq was full and packet dropped */
__u64 sps_hdrqfull;
@@ -138,7 +138,7 @@ struct infinipath_stats {
__u64 sps_pageunlocks;
/*
* Number of packets dropped in kernel other than errors (ether
- * packets if ipath not configured, sma/mad, etc.)
+ * packets if ipath not configured, etc.)
*/
__u64 sps_krdrops;
/* pad for future growth */
@@ -153,8 +153,6 @@ struct infinipath_stats {
#define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */
/* Device has been disabled via admin request */
#define IPATH_STATUS_ADMIN_DISABLED 0x4
-#define IPATH_STATUS_OIB_SMA 0x8 /* ipath_mad kernel SMA running */
-#define IPATH_STATUS_SMA 0x10 /* user SMA running */
/* Chip has been found and initted */
#define IPATH_STATUS_CHIP_PRESENT 0x20
/* IB link is at ACTIVE, usable for data traffic */
@@ -465,12 +463,11 @@ struct __ipath_sendpkt {
struct ipath_iovec sps_iov[4];
};
-/* Passed into SMA special file's ->read and ->write methods. */
-struct ipath_sma_pkt
-{
- __u32 unit; /* unit on which to send packet */
- __u64 data; /* address of payload in userspace */
- __u32 len; /* length of payload */
+/* Passed into diag data special file's ->write method. */
+struct ipath_diag_pkt {
+ __u32 unit;
+ __u64 data;
+ __u32 len;
};
/*
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 3efee341c9b..049221bc590 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -42,20 +42,28 @@
* @entry: work completion entry to add
* @sig: true if @entry is a solicitated entry
*
- * This may be called with one of the qp->s_lock or qp->r_rq.lock held.
+ * This may be called with qp->s_lock held.
*/
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
{
+ struct ipath_cq_wc *wc = cq->queue;
unsigned long flags;
+ u32 head;
u32 next;
spin_lock_irqsave(&cq->lock, flags);
- if (cq->head == cq->ibcq.cqe)
+ /*
+ * Note that the head pointer might be writable by user processes.
+ * Take care to verify it is a sane value.
+ */
+ head = wc->head;
+ if (head >= (unsigned) cq->ibcq.cqe) {
+ head = cq->ibcq.cqe;
next = 0;
- else
- next = cq->head + 1;
- if (unlikely(next == cq->tail)) {
+ } else
+ next = head + 1;
+ if (unlikely(next == wc->tail)) {
spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) {
struct ib_event ev;
@@ -67,8 +75,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
}
return;
}
- cq->queue[cq->head] = *entry;
- cq->head = next;
+ wc->queue[head] = *entry;
+ wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
@@ -101,19 +109,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
struct ipath_cq *cq = to_icq(ibcq);
+ struct ipath_cq_wc *wc = cq->queue;
unsigned long flags;
int npolled;
spin_lock_irqsave(&cq->lock, flags);
for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
- if (cq->tail == cq->head)
+ if (wc->tail == wc->head)
break;
- *entry = cq->queue[cq->tail];
- if (cq->tail == cq->ibcq.cqe)
- cq->tail = 0;
+ *entry = wc->queue[wc->tail];
+ if (wc->tail >= cq->ibcq.cqe)
+ wc->tail = 0;
else
- cq->tail++;
+ wc->tail++;
}
spin_unlock_irqrestore(&cq->lock, flags);
@@ -160,38 +169,74 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
{
struct ipath_ibdev *dev = to_idev(ibdev);
struct ipath_cq *cq;
- struct ib_wc *wc;
+ struct ipath_cq_wc *wc;
struct ib_cq *ret;
- if (entries > ib_ipath_max_cqes) {
+ if (entries < 1 || entries > ib_ipath_max_cqes) {
ret = ERR_PTR(-EINVAL);
- goto bail;
+ goto done;
}
if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto done;
}
- /*
- * Need to use vmalloc() if we want to support large #s of
- * entries.
- */
+ /* Allocate the completion queue structure. */
cq = kmalloc(sizeof(*cq), GFP_KERNEL);
if (!cq) {
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto done;
}
/*
- * Need to use vmalloc() if we want to support large #s of entries.
+ * Allocate the completion queue entries and head/tail pointers.
+ * This is allocated separately so that it can be resized and
+ * also mapped into user space.
+ * We need to use vmalloc() in order to support mmap and large
+ * numbers of entries.
*/
- wc = vmalloc(sizeof(*wc) * (entries + 1));
+ wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries);
if (!wc) {
- kfree(cq);
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto bail_cq;
}
+
+ /*
+ * Return the address of the WC as the offset to mmap.
+ * See ipath_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ struct ipath_mmap_info *ip;
+ __u64 offset = (__u64) wc;
+ int err;
+
+ err = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_wc;
+ }
+
+ /* Allocate info for ipath_mmap(). */
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wc;
+ }
+ cq->ip = ip;
+ ip->context = context;
+ ip->obj = wc;
+ kref_init(&ip->ref);
+ ip->mmap_cnt = 0;
+ ip->size = PAGE_ALIGN(sizeof(*wc) +
+ sizeof(struct ib_wc) * entries);
+ spin_lock_irq(&dev->pending_lock);
+ ip->next = dev->pending_mmaps;
+ dev->pending_mmaps = ip;
+ spin_unlock_irq(&dev->pending_lock);
+ } else
+ cq->ip = NULL;
+
/*
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
* The number of entries should be >= the number requested or return
@@ -202,15 +247,22 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
cq->triggered = 0;
spin_lock_init(&cq->lock);
tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
- cq->head = 0;
- cq->tail = 0;
+ wc->head = 0;
+ wc->tail = 0;
cq->queue = wc;
ret = &cq->ibcq;
dev->n_cqs_allocated++;
+ goto done;
-bail:
+bail_wc:
+ vfree(wc);
+
+bail_cq:
+ kfree(cq);
+
+done:
return ret;
}
@@ -229,7 +281,10 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
tasklet_kill(&cq->comptask);
dev->n_cqs_allocated--;
- vfree(cq->queue);
+ if (cq->ip)
+ kref_put(&cq->ip->ref, ipath_release_mmap_info);
+ else
+ vfree(cq->queue);
kfree(cq);
return 0;
@@ -253,7 +308,7 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
spin_lock_irqsave(&cq->lock, flags);
/*
* Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
- * any other transitions.
+ * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
*/
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = notify;
@@ -264,46 +319,86 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
struct ipath_cq *cq = to_icq(ibcq);
- struct ib_wc *wc, *old_wc;
- u32 n;
+ struct ipath_cq_wc *old_wc = cq->queue;
+ struct ipath_cq_wc *wc;
+ u32 head, tail, n;
int ret;
+ if (cqe < 1 || cqe > ib_ipath_max_cqes) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
- wc = vmalloc(sizeof(*wc) * (cqe + 1));
+ wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe);
if (!wc) {
ret = -ENOMEM;
goto bail;
}
+ /*
+ * Return the address of the WC as the offset to mmap.
+ * See ipath_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ __u64 offset = (__u64) wc;
+
+ ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (ret)
+ goto bail;
+ }
+
spin_lock_irq(&cq->lock);
- if (cq->head < cq->tail)
- n = cq->ibcq.cqe + 1 + cq->head - cq->tail;
+ /*
+ * Make sure head and tail are sane since they
+ * might be user writable.
+ */
+ head = old_wc->head;
+ if (head > (u32) cq->ibcq.cqe)
+ head = (u32) cq->ibcq.cqe;
+ tail = old_wc->tail;
+ if (tail > (u32) cq->ibcq.cqe)
+ tail = (u32) cq->ibcq.cqe;
+ if (head < tail)
+ n = cq->ibcq.cqe + 1 + head - tail;
else
- n = cq->head - cq->tail;
+ n = head - tail;
if (unlikely((u32)cqe < n)) {
spin_unlock_irq(&cq->lock);
vfree(wc);
ret = -EOVERFLOW;
goto bail;
}
- for (n = 0; cq->tail != cq->head; n++) {
- wc[n] = cq->queue[cq->tail];
- if (cq->tail == cq->ibcq.cqe)
- cq->tail = 0;
+ for (n = 0; tail != head; n++) {
+ wc->queue[n] = old_wc->queue[tail];
+ if (tail == (u32) cq->ibcq.cqe)
+ tail = 0;
else
- cq->tail++;
+ tail++;
}
cq->ibcq.cqe = cqe;
- cq->head = n;
- cq->tail = 0;
- old_wc = cq->queue;
+ wc->head = n;
+ wc->tail = 0;
cq->queue = wc;
spin_unlock_irq(&cq->lock);
vfree(old_wc);
+ if (cq->ip) {
+ struct ipath_ibdev *dev = to_idev(ibcq->device);
+ struct ipath_mmap_info *ip = cq->ip;
+
+ ip->obj = wc;
+ ip->size = PAGE_ALIGN(sizeof(*wc) +
+ sizeof(struct ib_wc) * cqe);
+ spin_lock_irq(&dev->pending_lock);
+ ip->next = dev->pending_mmaps;
+ dev->pending_mmaps = ip;
+ spin_unlock_irq(&dev->pending_lock);
+ }
+
ret = 0;
bail:
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index f415beda0d3..df69f0d80b8 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -60,7 +60,6 @@
#define __IPATH_USER_SEND 0x1000 /* use user mode send */
#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
-#define __IPATH_SMADBG 0x8000 /* sma packet debug */
#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
@@ -84,7 +83,6 @@
/* print mmap/nopage stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x0
#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
-#define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */
#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 147dd89e21c..28b6b46c106 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -41,11 +41,11 @@
* through the /sys/bus/pci resource mmap interface.
*/
+#include <linux/io.h>
#include <linux/pci.h>
#include <asm/uaccess.h>
#include "ipath_kernel.h"
-#include "ipath_layer.h"
#include "ipath_common.h"
int ipath_diag_inuse;
@@ -274,6 +274,158 @@ bail:
return ret;
}
+static ssize_t ipath_diagpkt_write(struct file *fp,
+ const char __user *data,
+ size_t count, loff_t *off);
+
+static struct file_operations diagpkt_file_ops = {
+ .owner = THIS_MODULE,
+ .write = ipath_diagpkt_write,
+};
+
+static struct cdev *diagpkt_cdev;
+static struct class_device *diagpkt_class_dev;
+
+int __init ipath_diagpkt_add(void)
+{
+ return ipath_cdev_init(IPATH_DIAGPKT_MINOR,
+ "ipath_diagpkt", &diagpkt_file_ops,
+ &diagpkt_cdev, &diagpkt_class_dev);
+}
+
+void __exit ipath_diagpkt_remove(void)
+{
+ ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev);
+}
+
+/**
+ * ipath_diagpkt_write - write an IB packet
+ * @fp: the diag data device file pointer
+ * @data: ipath_diag_pkt structure saying where to get the packet
+ * @count: size of data to write
+ * @off: unused by this code
+ */
+static ssize_t ipath_diagpkt_write(struct file *fp,
+ const char __user *data,
+ size_t count, loff_t *off)
+{
+ u32 __iomem *piobuf;
+ u32 plen, clen, pbufn;
+ struct ipath_diag_pkt dp;
+ u32 *tmpbuf = NULL;
+ struct ipath_devdata *dd;
+ ssize_t ret = 0;
+ u64 val;
+
+ if (count < sizeof(dp)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ if (copy_from_user(&dp, data, sizeof(dp))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ /* send count must be an exact number of dwords */
+ if (dp.len & 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ clen = dp.len >> 2;
+
+ dd = ipath_lookup(dp.unit);
+ if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
+ !dd->ipath_kregbase) {
+ ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
+ dp.unit);
+ ret = -ENODEV;
+ goto bail;
+ }
+
+ if (ipath_diag_inuse && !diag_set_link &&
+ !(dd->ipath_flags & IPATH_LINKACTIVE)) {
+ diag_set_link = 1;
+ ipath_cdbg(VERBOSE, "Trying to set to set link active for "
+ "diag pkt\n");
+ ipath_set_linkstate(dd, IPATH_IB_LINKARM);
+ ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
+ }
+
+ if (!(dd->ipath_flags & IPATH_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
+ ret = -ENODEV;
+ goto bail;
+ }
+ val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
+ if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM &&
+ val != IPATH_IBSTATE_ACTIVE) {
+ ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
+ dd->ipath_unit, (unsigned long long) val);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* need total length before first word written */
+ /* +1 word is for the qword padding */
+ plen = sizeof(u32) + dp.len;
+
+ if ((plen + 4) > dd->ipath_ibmaxlen) {
+ ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
+ plen - 4, dd->ipath_ibmaxlen);
+ ret = -EINVAL;
+ goto bail; /* before writing pbc */
+ }
+ tmpbuf = vmalloc(plen);
+ if (!tmpbuf) {
+ dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
+ "failing\n");
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ if (copy_from_user(tmpbuf,
+ (const void __user *) (unsigned long) dp.data,
+ dp.len)) {
+ ret = -EFAULT;
+ goto bail;
+ }
+
+ piobuf = ipath_getpiobuf(dd, &pbufn);
+ if (!piobuf) {
+ ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
+ dd->ipath_unit);
+ ret = -EBUSY;
+ goto bail;
+ }
+
+ plen >>= 2; /* in dwords */
+
+ if (ipath_debug & __IPATH_PKTDBG)
+ ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
+ dd->ipath_unit, plen - 1, pbufn);
+
+ /* we have to flush after the PBC for correctness on some cpus
+ * or WC buffer can be written out of order */
+ writeq(plen, piobuf);
+ ipath_flush_wc();
+ /* copy all by the trigger word, then flush, so it's written
+ * to chip before trigger word, then write trigger word, then
+ * flush again, so packet is sent. */
+ __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
+ ipath_flush_wc();
+ __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
+ ipath_flush_wc();
+
+ ret = sizeof(dp);
+
+bail:
+ vfree(tmpbuf);
+ return ret;
+}
+
static int ipath_diag_release(struct inode *in, struct file *fp)
{
mutex_lock(&ipath_mutex);
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index f98518d912b..2108466c7e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -39,7 +39,7 @@
#include <linux/vmalloc.h>
#include "ipath_kernel.h"
-#include "ipath_layer.h"
+#include "ipath_verbs.h"
#include "ipath_common.h"
static void ipath_update_pio_bufs(struct ipath_devdata *);
@@ -51,8 +51,6 @@ const char *ipath_get_unit_name(int unit)
return iname;
}
-EXPORT_SYMBOL_GPL(ipath_get_unit_name);
-
#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
#define PFX IPATH_DRV_NAME ": "
@@ -60,13 +58,13 @@ EXPORT_SYMBOL_GPL(ipath_get_unit_name);
* The size has to be longer than this string, so we can append
* board/chip information to it in the init code.
*/
-const char ipath_core_version[] = IPATH_IDSTR "\n";
+const char ib_ipath_version[] = IPATH_IDSTR "\n";
static struct idr unit_table;
DEFINE_SPINLOCK(ipath_devs_lock);
LIST_HEAD(ipath_dev_list);
-wait_queue_head_t ipath_sma_state_wait;
+wait_queue_head_t ipath_state_wait;
unsigned ipath_debug = __IPATH_INFO;
@@ -403,10 +401,10 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
/* setup the chip-specific functions, as early as possible. */
switch (ent->device) {
case PCI_DEVICE_ID_INFINIPATH_HT:
- ipath_init_ht400_funcs(dd);
+ ipath_init_iba6110_funcs(dd);
break;
case PCI_DEVICE_ID_INFINIPATH_PE800:
- ipath_init_pe800_funcs(dd);
+ ipath_init_iba6120_funcs(dd);
break;
default:
ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
@@ -440,7 +438,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
}
dd->ipath_pcirev = rev;
+#if defined(__powerpc__)
+ /* There isn't a generic way to specify writethrough mappings */
+ dd->ipath_kregbase = __ioremap(addr, len,
+ (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
+#else
dd->ipath_kregbase = ioremap_nocache(addr, len);
+#endif
if (!dd->ipath_kregbase) {
ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
@@ -503,7 +507,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
ipathfs_add_device(dd);
ipath_user_add(dd);
ipath_diag_add(dd);
- ipath_layer_add(dd);
+ ipath_register_ib_device(dd);
goto bail;
@@ -532,7 +536,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
return;
dd = pci_get_drvdata(pdev);
- ipath_layer_remove(dd);
+ ipath_unregister_ib_device(dd->verbs_dev);
ipath_diag_remove(dd);
ipath_user_remove(dd);
ipathfs_remove_device(dd);
@@ -607,21 +611,23 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
*
* wait up to msecs milliseconds for IB link state change to occur for
* now, take the easy polling route. Currently used only by
- * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise
+ * ipath_set_linkstate. Returns 0 if state reached, otherwise
* -ETIMEDOUT state can have multiple states set, for any of several
* transitions.
*/
-int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
+static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
+ int msecs)
{
- dd->ipath_sma_state_wanted = state;
- wait_event_interruptible_timeout(ipath_sma_state_wait,
+ dd->ipath_state_wanted = state;
+ wait_event_interruptible_timeout(ipath_state_wait,
(dd->ipath_flags & state),
msecs_to_jiffies(msecs));
- dd->ipath_sma_state_wanted = 0;
+ dd->ipath_state_wanted = 0;
if (!(dd->ipath_flags & state)) {
u64 val;
- ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n",
+ ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
+ " ms\n",
/* test INIT ahead of DOWN, both can be set */
(state & IPATH_LINKINIT) ? "INIT" :
((state & IPATH_LINKDOWN) ? "DOWN" :
@@ -807,58 +813,6 @@ bail:
return skb;
}
-/**
- * ipath_rcv_layer - receive a packet for the layered (ethernet) driver
- * @dd: the infinipath device
- * @etail: the sk_buff number
- * @tlen: the total packet length
- * @hdr: the ethernet header
- *
- * Separate routine for better overall optimization
- */
-static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
- u32 tlen, struct ether_header *hdr)
-{
- u32 elen;
- u8 pad, *bthbytes;
- struct sk_buff *skb, *nskb;
-
- if (dd->ipath_port0_skbs &&
- hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) {
- /*
- * Allocate a new sk_buff to replace the one we give
- * to the network stack.
- */
- nskb = ipath_alloc_skb(dd, GFP_ATOMIC);
- if (!nskb) {
- /* count OK packets that we drop */
- ipath_stats.sps_krdrops++;
- return;
- }
-
- bthbytes = (u8 *) hdr->bth;
- pad = (bthbytes[1] >> 4) & 3;
- /* +CRC32 */
- elen = tlen - (sizeof(*hdr) + pad + sizeof(u32));
-
- skb = dd->ipath_port0_skbs[etail];
- dd->ipath_port0_skbs[etail] = nskb;
- skb_put(skb, elen);
-
- dd->ipath_f_put_tid(dd, etail + (u64 __iomem *)
- ((char __iomem *) dd->ipath_kregbase
- + dd->ipath_rcvegrbase), 0,
- virt_to_phys(nskb->data));
-
- __ipath_layer_rcv(dd, hdr, skb);
-
- /* another ether packet received */
- ipath_stats.sps_ether_rpkts++;
- }
- else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP)
- __ipath_layer_rcv_lid(dd, hdr);
-}
-
static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
u32 eflags,
u32 l,
@@ -972,26 +926,17 @@ reloop:
if (unlikely(eflags))
ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
- int ret = __ipath_verbs_rcv(dd, rc + 1,
- ebuf, tlen);
- if (ret == -ENODEV)
- ipath_cdbg(VERBOSE,
- "received IB packet, "
- "not SMA (QP=%x)\n", qp);
- if (dd->ipath_lli_counter)
- dd->ipath_lli_counter--;
-
- } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
- if (qp == IPATH_KD_QP &&
- bthbytes[0] == ipath_layer_rcv_opcode &&
- ebuf)
- ipath_rcv_layer(dd, etail, tlen,
- (struct ether_header *)hdr);
- else
- ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
- "qp=%x), len %x; ignored\n",
- etype, bthbytes[0], qp, tlen);
+ ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
+ if (dd->ipath_lli_counter)
+ dd->ipath_lli_counter--;
+ ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
+ "qp=%x), len %x; ignored\n",
+ etype, bthbytes[0], qp, tlen);
}
+ else if (etype == RCVHQ_RCV_TYPE_EAGER)
+ ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
+ "qp=%x), len %x; ignored\n",
+ etype, bthbytes[0], qp, tlen);
else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
be32_to_cpu(hdr->bth[0]) & 0xff);
@@ -1024,7 +969,8 @@ reloop:
*/
if (l == hdrqtail || (i && !(i&0xf))) {
u64 lval;
- if (l == hdrqtail) /* PE-800 interrupt only on last */
+ if (l == hdrqtail)
+ /* request IBA6120 interrupt only on last */
lval = dd->ipath_rhdrhead_intr_off | l;
else
lval = l;
@@ -1038,7 +984,7 @@ reloop:
}
if (!dd->ipath_rhdrhead_intr_off && !reloop) {
- /* HT-400 workaround; we can have a race clearing chip
+ /* IBA6110 workaround; we can have a race clearing chip
* interrupt with another interrupt about to be delivered,
* and can clear it before it is delivered on the GPIO
* workaround. By doing the extra check here for the
@@ -1211,7 +1157,7 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
*
* do appropriate marking as busy, etc.
* returns buffer number if one found (>=0), negative number is error.
- * Used by ipath_sma_send_pkt and ipath_layer_send
+ * Used by ipath_layer_send
*/
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
{
@@ -1317,13 +1263,6 @@ rescan:
goto bail;
}
- if (updated)
- /*
- * ran out of bufs, now some (at least this one we just
- * got) are now available, so tell the layered driver.
- */
- __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
-
/*
* set next starting place. Since it's just an optimization,
* it doesn't matter who wins on this, so no locking
@@ -1500,7 +1439,7 @@ int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
return ret;
}
-void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
+static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
{
static const char *what[4] = {
[0] = "DOWN",
@@ -1511,7 +1450,7 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
INFINIPATH_IBCC_LINKCMD_MASK;
- ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate "
+ ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate "
"is %s\n", dd->ipath_unit,
what[linkcmd],
ipath_ibcstatus_str[
@@ -1520,7 +1459,7 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
/* flush all queued sends when going to DOWN or INIT, to be sure that
- * they don't block SMA and other MAD packets */
+ * they don't block MAD packets */
if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
INFINIPATH_S_ABORT);
@@ -1534,6 +1473,180 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
dd->ipath_ibcctrl | which);
}
+int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
+{
+ u32 lstate;
+ int ret;
+
+ switch (newstate) {
+ case IPATH_IB_LINKDOWN:
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
+ INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case IPATH_IB_LINKDOWN_SLEEP:
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
+ INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case IPATH_IB_LINKDOWN_DISABLE:
+ ipath_set_ib_lstate(dd,
+ INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
+ INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+ /* don't wait */
+ ret = 0;
+ goto bail;
+
+ case IPATH_IB_LINKINIT:
+ if (dd->ipath_flags & IPATH_LINKINIT) {
+ ret = 0;
+ goto bail;
+ }
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
+ INFINIPATH_IBCC_LINKCMD_SHIFT);
+ lstate = IPATH_LINKINIT;
+ break;
+
+ case IPATH_IB_LINKARM:
+ if (dd->ipath_flags & IPATH_LINKARMED) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(dd->ipath_flags &
+ (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
+ INFINIPATH_IBCC_LINKCMD_SHIFT);
+ /*
+ * Since the port can transition to ACTIVE by receiving
+ * a non VL 15 packet, wait for either state.
+ */
+ lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
+ break;
+
+ case IPATH_IB_LINKACTIVE:
+ if (dd->ipath_flags & IPATH_LINKACTIVE) {
+ ret = 0;
+ goto bail;
+ }
+ if (!(dd->ipath_flags & IPATH_LINKARMED)) {
+ ret = -EINVAL;
+ goto bail;
+ }
+ ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
+ INFINIPATH_IBCC_LINKCMD_SHIFT);
+ lstate = IPATH_LINKACTIVE;
+ break;
+
+ default:
+ ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
+ ret = -EINVAL;
+ goto bail;
+ }
+ ret = ipath_wait_linkstate(dd, lstate, 2000);
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_set_mtu - set the MTU
+ * @dd: the infinipath device
+ * @arg: the new MTU
+ *
+ * we can handle "any" incoming size, the issue here is whether we
+ * need to restrict our outgoing size. For now, we don't do any
+ * sanity checking on this, and we don't deal with what happens to
+ * programs that are already running when the size changes.
+ * NOTE: changing the MTU will usually cause the IBC to go back to
+ * link initialize (IPATH_IBSTATE_INIT) state...
+ */
+int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
+{
+ u32 piosize;
+ int changed = 0;
+ int ret;
+
+ /*
+ * mtu is IB data payload max. It's the largest power of 2 less
+ * than piosize (or even larger, since it only really controls the
+ * largest we can receive; we can send the max of the mtu and
+ * piosize). We check that it's one of the valid IB sizes.
+ */
+ if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
+ arg != 4096) {
+ ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
+ ret = -EINVAL;
+ goto bail;
+ }
+ if (dd->ipath_ibmtu == arg) {
+ ret = 0; /* same as current */
+ goto bail;
+ }
+
+ piosize = dd->ipath_ibmaxlen;
+ dd->ipath_ibmtu = arg;
+
+ if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
+ /* Only if it's not the initial value (or reset to it) */
+ if (piosize != dd->ipath_init_ibmaxlen) {
+ dd->ipath_ibmaxlen = piosize;
+ changed = 1;
+ }
+ } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
+ piosize = arg + IPATH_PIO_MAXIBHDR;
+ ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
+ "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
+ arg);
+ dd->ipath_ibmaxlen = piosize;
+ changed = 1;
+ }
+
+ if (changed) {
+ /*
+ * set the IBC maxpktlength to the size of our pio
+ * buffers in words
+ */
+ u64 ibc = dd->ipath_ibcctrl;
+ ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
+ INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
+
+ piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
+ dd->ipath_ibmaxlen = piosize;
+ piosize /= sizeof(u32); /* in words */
+ /*
+ * for ICRC, which we only send in diag test pkt mode, and
+ * we don't need to worry about that for mtu
+ */
+ piosize += 1;
+
+ ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
+ dd->ipath_ibcctrl = ibc;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl);
+ dd->ipath_f_tidtemplate(dd);
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
+{
+ dd->ipath_lid = arg;
+ dd->ipath_lmc = lmc;
+
+ return 0;
+}
+
/**
* ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
* @dd: the infinipath device
@@ -1637,13 +1750,6 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
INFINIPATH_IBCC_LINKINITCMD_SHIFT);
- /*
- * we are shutting down, so tell the layered driver. We don't do
- * this on just a link state change, much like ethernet, a cable
- * unplug, etc. doesn't change driver state
- */
- ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN);
-
/* disable IBC */
dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
@@ -1743,7 +1849,7 @@ static int __init infinipath_init(void)
{
int ret;
- ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version);
+ ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
/*
* These must be called before the driver is registered with
@@ -1776,8 +1882,18 @@ static int __init infinipath_init(void)
goto bail_group;
}
+ ret = ipath_diagpkt_add();
+ if (ret < 0) {
+ printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
+ "diag data device: error %d\n", -ret);
+ goto bail_ipathfs;
+ }
+
goto bail;
+bail_ipathfs:
+ ipath_exit_ipathfs();
+
bail_group:
ipath_driver_remove_group(&ipath_driver.driver);
@@ -1888,6 +2004,8 @@ static void __exit infinipath_cleanup(void)
struct ipath_devdata *dd, *tmp;
unsigned long flags;
+ ipath_diagpkt_remove();
+
ipath_exit_ipathfs();
ipath_driver_remove_group(&ipath_driver.driver);
@@ -1998,5 +2116,22 @@ bail:
return ret;
}
+int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
+{
+ u64 val;
+ if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) {
+ return -1;
+ }
+ if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
+ dd->ipath_rx_pol_inv = new_pol_inv;
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
+ val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
+ INFINIPATH_XGXS_RX_POL_SHIFT);
+ val |= ((u64)dd->ipath_rx_pol_inv) <<
+ INFINIPATH_XGXS_RX_POL_SHIFT;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
+ }
+ return 0;
+}
module_init(infinipath_init);
module_exit(infinipath_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index bbaa70e57db..29930e22318 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -39,7 +39,6 @@
#include <asm/pgtable.h>
#include "ipath_kernel.h"
-#include "ipath_layer.h"
#include "ipath_common.h"
static int ipath_open(struct inode *, struct file *);
@@ -985,15 +984,17 @@ static int mmap_piobufs(struct vm_area_struct *vma,
* write combining behavior we want on the PIO buffers!
*/
- if (vma->vm_flags & VM_READ) {
- dev_info(&dd->pcidev->dev,
- "Can't map piobufs as readable (flags=%lx)\n",
- vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
+#if defined(__powerpc__)
+ /* There isn't a generic way to specify writethrough mappings */
+ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+ pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
+#endif
- /* don't allow them to later change to readable with mprotect */
+ /*
+ * don't allow them to later change to readable with mprotect (for when
+ * not initially mapped readable, as is normally the case)
+ */
vma->vm_flags &= ~VM_MAYREAD;
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
@@ -1109,7 +1110,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
ret = mmap_rcvegrbufs(vma, pd);
else if (pgaddr == (u64) pd->port_rcvhdrq_phys) {
/*
- * The rcvhdrq itself; readonly except on HT-400 (so have
+ * The rcvhdrq itself; readonly except on HT (so have
* to allow writable mapping), multiple pages, contiguous
* from an i/o perspective.
*/
@@ -1149,6 +1150,7 @@ static unsigned int ipath_poll(struct file *fp,
struct ipath_portdata *pd;
u32 head, tail;
int bit;
+ unsigned pollflag = 0;
struct ipath_devdata *dd;
pd = port_fp(fp);
@@ -1185,9 +1187,12 @@ static unsigned int ipath_poll(struct file *fp,
clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
pd->port_rcvwait_to++;
}
+ else
+ pollflag = POLLIN | POLLRDNORM;
}
else {
/* it's already happened; don't do wait_event overhead */
+ pollflag = POLLIN | POLLRDNORM;
pd->port_rcvnowait++;
}
@@ -1195,7 +1200,7 @@ static unsigned int ipath_poll(struct file *fp,
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
- return 0;
+ return pollflag;
}
static int try_alloc_port(struct ipath_devdata *dd, int port,
@@ -1297,14 +1302,14 @@ static int find_best_unit(struct file *fp)
* This code is present to allow a knowledgeable person to
* specify the layout of processes to processors before opening
* this driver, and then we'll assign the process to the "closest"
- * HT-400 to that processor (we assume reasonable connectivity,
+ * InfiniPath chip to that processor (we assume reasonable connectivity,
* for now). This code assumes that if affinity has been set
* before this point, that at most one cpu is set; for now this
* is reasonable. I check for both cpus_empty() and cpus_full(),
* in case some kernel variant sets none of the bits when no
* affinity is set. 2.6.11 and 12 kernels have all present
* cpus set. Some day we'll have to fix it up further to handle
- * a cpu subset. This algorithm fails for two HT-400's connected
+ * a cpu subset. This algorithm fails for two HT chips connected
* in tunnel fashion. Eventually this needs real topology
* information. There may be some issues with dual core numbering
* as well. This needs more work prior to release.
@@ -1815,7 +1820,7 @@ int ipath_user_add(struct ipath_devdata *dd)
if (ret < 0) {
ipath_dev_err(dd, "Could not create wildcard "
"minor: error %d\n", -ret);
- goto bail_sma;
+ goto bail_user;
}
atomic_set(&user_setup, 1);
@@ -1831,7 +1836,7 @@ int ipath_user_add(struct ipath_devdata *dd)
goto bail;
-bail_sma:
+bail_user:
user_cleanup();
bail:
return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 0936d8e8d70..a5eb30a06a5 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -191,8 +191,8 @@ static ssize_t atomic_port_info_read(struct file *file, char __user *buf,
portinfo[4] = (dd->ipath_lid << 16);
/*
- * Notimpl yet SMLID (should we store this in the driver, in case
- * SMA dies?) CapabilityMask is 0, we don't support any of these
+ * Notimpl yet SMLID.
+ * CapabilityMask is 0, we don't support any of these
* DiagCode is 0; we don't store any diag info for now Notimpl yet
* M_KeyLeasePeriod (we don't support M_Key)
*/
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 3db015da6e7..bf2455a6d56 100644
--- a/drivers/infiniband/hw/ipath/ipath_ht400.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -33,7 +33,7 @@
/*
* This file contains all of the code that is specific to the InfiniPath
- * HT-400 chip.
+ * HT chip.
*/
#include <linux/pci.h>
@@ -43,7 +43,7 @@
#include "ipath_registers.h"
/*
- * This lists the InfiniPath HT400 registers, in the actual chip layout.
+ * This lists the InfiniPath registers, in the actual chip layout.
* This structure should never be directly accessed.
*
* The names are in InterCap form because they're taken straight from
@@ -461,8 +461,9 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
* times.
*/
if (dd->ipath_flags & IPATH_INITTED) {
- ipath_dev_err(dd, "Fatal Error (freeze "
- "mode), no longer usable\n");
+ ipath_dev_err(dd, "Fatal Hardware Error (freeze "
+ "mode), no longer usable, SN %.16s\n",
+ dd->ipath_serial);
isfatal = 1;
}
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
@@ -537,7 +538,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
if (hwerrs & INFINIPATH_HWE_HTCMISCERR7)
strlcat(msg, "[HT core Misc7]", msgl);
if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, HT-400 unusable]",
+ strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */
dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
@@ -553,7 +554,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
if (hwerrs & _IPATH_PLL_FAIL) {
snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), HT-400 unusable]",
+ "[PLL failed (%llx), InfiniPath hardware unusable]",
(unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
strlcat(msg, bitsmsg, msgl);
/* ignore from now on, so disable until driver reloaded */
@@ -610,18 +611,18 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
break;
case 5:
/*
- * HT-460 original production board; two production levels, with
+ * original production board; two production levels, with
* different serial number ranges. See ipath_ht_early_init() for
* case where we enable IPATH_GPIO_INTR for later serial # range.
*/
- n = "InfiniPath_HT-460";
+ n = "InfiniPath_QHT7040";
break;
case 6:
n = "OEM_Board_3";
break;
case 7:
- /* HT-460 small form factor production board */
- n = "InfiniPath_HT-465";
+ /* small form factor production board */
+ n = "InfiniPath_QHT7140";
break;
case 8:
n = "LS/X-1";
@@ -633,7 +634,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
n = "OEM_Board_2";
break;
case 11:
- n = "InfiniPath_HT-470";
+ n = "InfiniPath_HT-470"; /* obsoleted */
break;
case 12:
n = "OEM_Board_4";
@@ -641,7 +642,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
default: /* don't know, just print the number */
ipath_dev_err(dd, "Don't yet know about board "
"with ID %u\n", boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_HT-4xx_%u",
+ snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
boardrev);
break;
}
@@ -650,11 +651,10 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) {
/*
- * This version of the driver only supports the HT-400
- * Rev 3.2
+ * This version of the driver only supports Rev 3.2 and 3.3
*/
ipath_dev_err(dd,
- "Unsupported HT-400 revision %u.%u!\n",
+ "Unsupported InfiniPath hardware revision %u.%u!\n",
dd->ipath_majrev, dd->ipath_minrev);
ret = 1;
goto bail;
@@ -738,7 +738,7 @@ static void ipath_check_htlink(struct ipath_devdata *dd)
static int ipath_setup_ht_reset(struct ipath_devdata *dd)
{
- ipath_dbg("No reset possible for HT-400\n");
+ ipath_dbg("No reset possible for this InfiniPath hardware\n");
return 0;
}
@@ -925,7 +925,7 @@ static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev,
/*
* kernels with CONFIG_PCI_MSI set the vector in the irq field of
- * struct pci_device, so we use that to program the HT-400 internal
+ * struct pci_device, so we use that to program the internal
* interrupt register (not config space) with that value. The BIOS
* must still have done the basic MSI setup.
*/
@@ -1013,7 +1013,7 @@ bail:
* @dd: the infinipath device
*
* Called during driver unload.
- * This is currently a nop for the HT-400, not for all chips
+ * This is currently a nop for the HT chip, not for all chips
*/
static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
{
@@ -1290,6 +1290,15 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
val &= ~INFINIPATH_XGXS_RESET;
change = 1;
}
+ if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
+ INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
+ /* need to compensate for Tx inversion in partner */
+ val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
+ INFINIPATH_XGXS_RX_POL_SHIFT);
+ val |= dd->ipath_rx_pol_inv <<
+ INFINIPATH_XGXS_RX_POL_SHIFT;
+ change = 1;
+ }
if (change)
ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
@@ -1470,7 +1479,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
/*
- * For HT-400, we allocate a somewhat overly large eager buffer,
+ * For HT, we allocate a somewhat overly large eager buffer,
* such that we can guarantee that we can receive the largest
* packet that we can send out. To truly support a 4KB MTU,
* we need to bump this to a large value. To date, other than
@@ -1531,7 +1540,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
/*
- * Later production HT-460 has same changes as HT-465, so
+ * Later production QHT7040 has same changes as QHT7140, so
* can use GPIO interrupts. They have serial #'s starting
* with 128, rather than 112.
*/
@@ -1560,13 +1569,13 @@ static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
}
/**
- * ipath_init_ht400_funcs - set up the chip-specific function pointers
+ * ipath_init_iba6110_funcs - set up the chip-specific function pointers
* @dd: the infinipath device
*
* This is global, and is called directly at init to set up the
* chip-specific function pointers for later use.
*/
-void ipath_init_ht400_funcs(struct ipath_devdata *dd)
+void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
{
dd->ipath_f_intrsetup = ipath_ht_intconfig;
dd->ipath_f_bus = ipath_setup_ht_config;
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index b83f66d8262..d86516d23df 100644
--- a/drivers/infiniband/hw/ipath/ipath_pe800.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -32,7 +32,7 @@
*/
/*
* This file contains all of the code that is specific to the
- * InfiniPath PE-800 chip.
+ * InfiniPath PCIe chip.
*/
#include <linux/interrupt.h>
@@ -45,9 +45,9 @@
/*
* This file contains all the chip-specific register information and
- * access functions for the QLogic InfiniPath PE800, the PCI-Express chip.
+ * access functions for the QLogic InfiniPath PCI-Express chip.
*
- * This lists the InfiniPath PE800 registers, in the actual chip layout.
+ * This lists the InfiniPath registers, in the actual chip layout.
* This structure should never be directly accessed.
*/
struct _infinipath_do_not_use_kernel_regs {
@@ -213,7 +213,6 @@ static const struct ipath_kregs ipath_pe_kregs = {
.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
- /* This group is pe-800-specific; and used only in this file */
/* The rcvpktled register controls one of the debug port signals, so
* a packet activity LED can be connected to it. */
.kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
@@ -364,8 +363,9 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
* and we get here multiple times
*/
if (dd->ipath_flags & IPATH_INITTED) {
- ipath_dev_err(dd, "Fatal Error (freeze "
- "mode), no longer usable\n");
+ ipath_dev_err(dd, "Fatal Hardware Error (freeze "
+ "mode), no longer usable, SN %.16s\n",
+ dd->ipath_serial);
isfatal = 1;
}
/*
@@ -388,7 +388,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
*msg = '\0';
if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
- strlcat(msg, "[Memory BIST test failed, PE-800 unusable]",
+ strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */
*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
@@ -433,7 +433,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
if (hwerrs & _IPATH_PLL_FAIL) {
snprintf(bitsmsg, sizeof bitsmsg,
- "[PLL failed (%llx), PE-800 unusable]",
+ "[PLL failed (%llx), InfiniPath hardware unusable]",
(unsigned long long) hwerrs & _IPATH_PLL_FAIL);
strlcat(msg, bitsmsg, msgl);
/* ignore from now on, so disable until driver reloaded */
@@ -511,22 +511,25 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
n = "InfiniPath_Emulation";
break;
case 1:
- n = "InfiniPath_PE-800-Bringup";
+ n = "InfiniPath_QLE7140-Bringup";
break;
case 2:
- n = "InfiniPath_PE-880";
+ n = "InfiniPath_QLE7140";
break;
case 3:
- n = "InfiniPath_PE-850";
+ n = "InfiniPath_QMI7140";
break;
case 4:
- n = "InfiniPath_PE-860";
+ n = "InfiniPath_QEM7140";
+ break;
+ case 5:
+ n = "InfiniPath_QMH7140";
break;
default:
ipath_dev_err(dd,
"Don't yet know about board with ID %u\n",
boardrev);
- snprintf(name, namelen, "Unknown_InfiniPath_PE-8xx_%u",
+ snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
boardrev);
break;
}
@@ -534,7 +537,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
snprintf(name, namelen, "%s", n);
if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
- ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n",
+ ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
dd->ipath_majrev, dd->ipath_minrev);
ret = 1;
} else
@@ -651,6 +654,15 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
val &= ~INFINIPATH_XGXS_RESET;
change = 1;
}
+ if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
+ INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
+ /* need to compensate for Tx inversion in partner */
+ val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
+ INFINIPATH_XGXS_RX_POL_SHIFT);
+ val |= dd->ipath_rx_pol_inv <<
+ INFINIPATH_XGXS_RX_POL_SHIFT;
+ change = 1;
+ }
if (change)
ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
@@ -705,7 +717,7 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
}
-/* this is not yet needed on the PE800, so just return 0. */
+/* this is not yet needed on this chip, so just return 0. */
static int ipath_pe_intconfig(struct ipath_devdata *dd)
{
return 0;
@@ -759,8 +771,8 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
*
* This is called during driver unload.
* We do the pci_disable_msi here, not in generic code, because it
- * isn't used for the HT-400. If we do end up needing pci_enable_msi
- * at some point in the future for HT-400, we'll move the call back
+ * isn't used for the HT chips. If we do end up needing pci_enable_msi
+ * at some point in the future for HT, we'll move the call back
* into the main init_one code.
*/
static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
@@ -780,10 +792,10 @@ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
* late in 2.6.16).
* All that can be done is to edit the kernel source to remove the quirk
* check until that is fixed.
- * We do not need to call enable_msi() for our HyperTransport chip (HT-400),
- * even those it uses MSI, and we want to avoid the quirk warning, so
- * So we call enable_msi only for the PE-800. If we do end up needing
- * pci_enable_msi at some point in the future for HT-400, we'll move the
+ * We do not need to call enable_msi() for our HyperTransport chip,
+ * even though it uses MSI, and we want to avoid the quirk warning, so
+ * So we call enable_msi only for PCIe. If we do end up needing
+ * pci_enable_msi at some point in the future for HT, we'll move the
* call back into the main init_one code.
* We save the msi lo and hi values, so we can restore them after
* chip reset (the kernel PCI infrastructure doesn't yet handle that
@@ -971,8 +983,7 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
int ret;
/* Use ERROR so it shows up in logs, etc. */
- ipath_dev_err(dd, "Resetting PE-800 unit %u\n",
- dd->ipath_unit);
+ ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
/* keep chip from being accessed in a few places */
dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
val = dd->ipath_control | INFINIPATH_C_RESET;
@@ -1078,7 +1089,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
* @port: the port
*
* clear all TID entries for a port, expected and eager.
- * Used from ipath_close(). On PE800, TIDs are only 32 bits,
+ * Used from ipath_close(). On this chip, TIDs are only 32 bits,
* not 64, but they are still on 64 bit boundaries, so tidbase
* is declared as u64 * for the pointer math, even though we write 32 bits
*/
@@ -1148,9 +1159,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
dd->ipath_flags |= IPATH_4BYTE_TID;
/*
- * For openib, we need to be able to handle an IB header of 96 bytes
- * or 24 dwords. HT-400 has arbitrary sized receive buffers, so we
- * made them the same size as the PIO buffers. The PE-800 does not
+ * For openfabrics, we need to be able to handle an IB header of
+ * 24 dwords. HT chip has arbitrary sized receive buffers, so we
+ * made them the same size as the PIO buffers. This chip does not
* handle arbitrary size buffers, so we need the header large enough
* to handle largest IB header, but still have room for a 2KB MTU
* standard IB packet.
@@ -1158,11 +1169,10 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
dd->ipath_rcvhdrentsize = 24;
dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
- /* For HT-400, we allocate a somewhat overly large eager buffer,
- * such that we can guarantee that we can receive the largest packet
- * that we can send out. To truly support a 4KB MTU, we need to
- * bump this to a larger value. We'll do this when I get around to
- * testing 4KB sends on the PE-800, which I have not yet done.
+ /*
+ * To truly support a 4KB MTU (for usermode), we need to
+ * bump this to a larger value. For now, we use them for
+ * the kernel only.
*/
dd->ipath_rcvegrbufsize = 2048;
/*
@@ -1175,9 +1185,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
/*
- * For PE-800, we can request a receive interrupt for 1 or
+ * We can request a receive interrupt for 1 or
* more packets from current offset. For now, we set this
- * up for a single packet, to match the HT-400 behavior.
+ * up for a single packet.
*/
dd->ipath_rhdrhead_intr_off = 1ULL<<32;
@@ -1216,13 +1226,13 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
}
/**
- * ipath_init_pe800_funcs - set up the chip-specific function pointers
+ * ipath_init_iba6120_funcs - set up the chip-specific function pointers
* @dd: the infinipath device
*
* This is global, and is called directly at init to set up the
* chip-specific function pointers for later use.
*/
-void ipath_init_pe800_funcs(struct ipath_devdata *dd)
+void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
{
dd->ipath_f_intrsetup = ipath_pe_intconfig;
dd->ipath_f_bus = ipath_setup_pe_config;
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 414cdd1d80a..44669dc2e22 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -53,8 +53,8 @@ module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
/*
- * Number of buffers reserved for driver (layered drivers and SMA
- * send). Reserved at end of buffer list. Initialized based on
+ * Number of buffers reserved for driver (verbs and layered drivers.)
+ * Reserved at end of buffer list. Initialized based on
* number of PIO buffers if not set via module interface.
* The problem with this is that it's global, but we'll use different
* numbers for different chip types. So the default value is not
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
*
* Allocate the eager TID buffers and program them into infinipath.
* We use the network layer alloc_skb() allocator to allocate the
- * memory, and either use the buffers as is for things like SMA
+ * memory, and either use the buffers as is for things like verbs
* packets, or pass the buffers up to the ipath layered driver and
* thence the network layer, replacing them as we do so (see
* ipath_rcv_layer()).
@@ -240,7 +240,11 @@ static int init_chip_first(struct ipath_devdata *dd,
"only supports %u\n", ipath_cfgports,
dd->ipath_portcnt);
}
- dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports,
+ /*
+ * Allocate full portcnt array, rather than just cfgports, because
+ * cleanup iterates across all possible ports.
+ */
+ dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt,
GFP_KERNEL);
if (!dd->ipath_pd) {
@@ -446,9 +450,9 @@ static void enable_chip(struct ipath_devdata *dd,
u32 val;
int i;
- if (!reinit) {
- init_waitqueue_head(&ipath_sma_state_wait);
- }
+ if (!reinit)
+ init_waitqueue_head(&ipath_state_wait);
+
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
@@ -687,7 +691,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
/ (sizeof(u64) * BITS_PER_BYTE / 2);
if (ipath_kpiobufs == 0) {
- /* not set by user, or set explictly to default */
+ /* not set by user (this is default) */
if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128)
kpiobufs = 32;
else
@@ -946,6 +950,7 @@ static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
}
+ ipath_kpiobufs = val;
ret = 0;
bail:
spin_unlock_irqrestore(&ipath_devs_lock, flags);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 280e732660a..49bf7bb15b0 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -34,7 +34,7 @@
#include <linux/pci.h>
#include "ipath_kernel.h"
-#include "ipath_layer.h"
+#include "ipath_verbs.h"
#include "ipath_common.h"
/* These are all rcv-related errors which we want to count for stats */
@@ -201,7 +201,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
ib_linkstate(lstate));
}
else
- ipath_cdbg(SMA, "Unit %u link state %s, last "
+ ipath_cdbg(VERBOSE, "Unit %u link state %s, last "
"was %s\n", dd->ipath_unit,
ib_linkstate(lstate),
ib_linkstate((unsigned)
@@ -213,7 +213,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
if (lstate == IPATH_IBSTATE_INIT ||
lstate == IPATH_IBSTATE_ARM ||
lstate == IPATH_IBSTATE_ACTIVE)
- ipath_cdbg(SMA, "Unit %u link state down"
+ ipath_cdbg(VERBOSE, "Unit %u link state down"
" (state 0x%x), from %s\n",
dd->ipath_unit,
(u32)val & IPATH_IBSTATE_MASK,
@@ -269,7 +269,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
INFINIPATH_IBCS_LINKSTATE_MASK)
== INFINIPATH_IBCS_L_STATE_ACTIVE)
/* if from up to down be more vocal */
- ipath_cdbg(SMA,
+ ipath_cdbg(VERBOSE,
"Unit %u link now down (%s)\n",
dd->ipath_unit,
ipath_ibcstatus_str[ltstate]);
@@ -289,8 +289,6 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
*dd->ipath_statusp |=
IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
dd->ipath_f_setextled(dd, lstate, ltstate);
-
- __ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP);
} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
/*
* set INIT and DOWN. Down is checked by most of the other
@@ -598,11 +596,11 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
if (!noprint && *msg)
ipath_dev_err(dd, "%s error\n", msg);
- if (dd->ipath_sma_state_wanted & dd->ipath_flags) {
- ipath_cdbg(VERBOSE, "sma wanted state %x, iflags now %x, "
- "waking\n", dd->ipath_sma_state_wanted,
+ if (dd->ipath_state_wanted & dd->ipath_flags) {
+ ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
+ "waking\n", dd->ipath_state_wanted,
dd->ipath_flags);
- wake_up_interruptible(&ipath_sma_state_wait);
+ wake_up_interruptible(&ipath_state_wait);
}
return chkerrpkts;
@@ -708,11 +706,7 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
{
int ret;
- ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
- if (ret > 0)
- goto set;
-
- ret = __ipath_verbs_piobufavail(dd);
+ ret = ipath_ib_piobufavail(dd->verbs_dev);
if (ret > 0)
goto set;
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index e9f374fb641..a8a56276ff1 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -132,12 +132,6 @@ struct _ipath_layer {
void *l_arg;
};
-/* Verbs layer interface */
-struct _verbs_layer {
- void *l_arg;
- struct timer_list l_timer;
-};
-
struct ipath_devdata {
struct list_head ipath_list;
@@ -198,7 +192,8 @@ struct ipath_devdata {
void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
/* fill out chip-specific fields */
int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
- struct _verbs_layer verbs_layer;
+ struct ipath_ibdev *verbs_dev;
+ struct timer_list verbs_timer;
/* total dwords sent (summed from counter) */
u64 ipath_sword;
/* total dwords rcvd (summed from counter) */
@@ -241,7 +236,7 @@ struct ipath_devdata {
u64 ipath_tidtemplate;
/* value to write to free TIDs */
u64 ipath_tidinvalid;
- /* PE-800 rcv interrupt setup */
+ /* IBA6120 rcv interrupt setup */
u64 ipath_rhdrhead_intr_off;
/* size of memory at ipath_kregbase */
@@ -250,8 +245,8 @@ struct ipath_devdata {
u32 ipath_pioavregs;
/* IPATH_POLL, etc. */
u32 ipath_flags;
- /* ipath_flags sma is waiting for */
- u32 ipath_sma_state_wanted;
+ /* ipath_flags driver is waiting for */
+ u32 ipath_state_wanted;
/* last buffer for user use, first buf for kernel use is this
* index. */
u32 ipath_lastport_piobuf;
@@ -311,10 +306,6 @@ struct ipath_devdata {
u32 ipath_pcibar0;
/* so we can rewrite it after a chip reset */
u32 ipath_pcibar1;
- /* sequential tries for SMA send and no bufs */
- u32 ipath_nosma_bufs;
- /* duration (seconds) ipath_nosma_bufs set */
- u32 ipath_nosma_secs;
/* HT/PCI Vendor ID (here for NodeInfo) */
u16 ipath_vendorid;
@@ -512,6 +503,8 @@ struct ipath_devdata {
u8 ipath_pci_cacheline;
/* LID mask control */
u8 ipath_lmc;
+ /* Rx Polarity inversion (compensate for ~tx on partner) */
+ u8 ipath_rx_pol_inv;
/* local link integrity counter */
u32 ipath_lli_counter;
@@ -523,18 +516,6 @@ extern struct list_head ipath_dev_list;
extern spinlock_t ipath_devs_lock;
extern struct ipath_devdata *ipath_lookup(int unit);
-extern u16 ipath_layer_rcv_opcode;
-extern int __ipath_layer_intr(struct ipath_devdata *, u32);
-extern int ipath_layer_intr(struct ipath_devdata *, u32);
-extern int __ipath_layer_rcv(struct ipath_devdata *, void *,
- struct sk_buff *);
-extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *);
-extern int __ipath_verbs_piobufavail(struct ipath_devdata *);
-extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
-
-void ipath_layer_add(struct ipath_devdata *);
-void ipath_layer_remove(struct ipath_devdata *);
-
int ipath_init_chip(struct ipath_devdata *, int);
int ipath_enable_wc(struct ipath_devdata *dd);
void ipath_disable_wc(struct ipath_devdata *dd);
@@ -549,9 +530,8 @@ void ipath_cdev_cleanup(struct cdev **cdevp,
int ipath_diag_add(struct ipath_devdata *);
void ipath_diag_remove(struct ipath_devdata *);
-void ipath_diag_bringup_link(struct ipath_devdata *);
-extern wait_queue_head_t ipath_sma_state_wait;
+extern wait_queue_head_t ipath_state_wait;
int ipath_user_add(struct ipath_devdata *dd);
void ipath_user_remove(struct ipath_devdata *dd);
@@ -582,12 +562,14 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
int ipath_parse_ushort(const char *str, unsigned short *valp);
-int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
-void ipath_set_ib_lstate(struct ipath_devdata *, int);
void ipath_kreceive(struct ipath_devdata *);
int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
int ipath_reset_device(int);
void ipath_get_faststats(unsigned long);
+int ipath_set_linkstate(struct ipath_devdata *, u8);
+int ipath_set_mtu(struct ipath_devdata *, u16);
+int ipath_set_lid(struct ipath_devdata *, u32, u8);
+int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
/* for use in system calls, where we want to know device type, etc. */
#define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data)
@@ -642,10 +624,8 @@ void ipath_free_data(struct ipath_portdata *dd);
int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
-/* init PE-800-specific func */
-void ipath_init_pe800_funcs(struct ipath_devdata *);
-/* init HT-400-specific func */
-void ipath_init_ht400_funcs(struct ipath_devdata *);
+void ipath_init_iba6120_funcs(struct ipath_devdata *);
+void ipath_init_iba6110_funcs(struct ipath_devdata *);
void ipath_get_eeprom_info(struct ipath_devdata *);
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
@@ -801,7 +781,7 @@ static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
struct device_driver;
-extern const char ipath_core_version[];
+extern const char ib_ipath_version[];
int ipath_driver_create_group(struct device_driver *);
void ipath_driver_remove_group(struct device_driver *);
@@ -810,6 +790,9 @@ int ipath_device_create_group(struct device *, struct ipath_devdata *);
void ipath_device_remove_group(struct device *, struct ipath_devdata *);
int ipath_expose_reset(struct device *);
+int ipath_diagpkt_add(void);
+void ipath_diagpkt_remove(void);
+
int ipath_init_ipathfs(void);
void ipath_exit_ipathfs(void);
int ipathfs_add_device(struct ipath_devdata *);
@@ -831,10 +814,10 @@ const char *ipath_get_unit_name(int unit);
extern struct mutex ipath_mutex;
-#define IPATH_DRV_NAME "ipath_core"
+#define IPATH_DRV_NAME "ib_ipath"
#define IPATH_MAJOR 233
#define IPATH_USER_MINOR_BASE 0
-#define IPATH_SMA_MINOR 128
+#define IPATH_DIAGPKT_MINOR 127
#define IPATH_DIAG_MINOR_BASE 129
#define IPATH_NMINORS 255
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index a5ca279370a..ba1b93226ca 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -34,6 +34,7 @@
#include <asm/io.h>
#include "ipath_verbs.h"
+#include "ipath_kernel.h"
/**
* ipath_alloc_lkey - allocate an lkey
@@ -60,7 +61,7 @@ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
r = (r + 1) & (rkt->max - 1);
if (r == n) {
spin_unlock_irqrestore(&rkt->lock, flags);
- _VERBS_INFO("LKEY table full\n");
+ ipath_dbg(KERN_INFO "LKEY table full\n");
ret = 0;
goto bail;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index b28c6f81c73..e46aa4ed2a7 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -42,26 +42,20 @@
#include "ipath_kernel.h"
#include "ipath_layer.h"
+#include "ipath_verbs.h"
#include "ipath_common.h"
/* Acquire before ipath_devs_lock. */
static DEFINE_MUTEX(ipath_layer_mutex);
-static int ipath_verbs_registered;
-
u16 ipath_layer_rcv_opcode;
static int (*layer_intr)(void *, u32);
static int (*layer_rcv)(void *, void *, struct sk_buff *);
static int (*layer_rcv_lid)(void *, void *);
-static int (*verbs_piobufavail)(void *);
-static void (*verbs_rcv)(void *, void *, void *, u32);
static void *(*layer_add_one)(int, struct ipath_devdata *);
static void (*layer_remove_one)(void *);
-static void *(*verbs_add_one)(int, struct ipath_devdata *);
-static void (*verbs_remove_one)(void *);
-static void (*verbs_timer_cb)(void *);
int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
{
@@ -107,302 +101,16 @@ int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
return ret;
}
-int __ipath_verbs_piobufavail(struct ipath_devdata *dd)
-{
- int ret = -ENODEV;
-
- if (dd->verbs_layer.l_arg && verbs_piobufavail)
- ret = verbs_piobufavail(dd->verbs_layer.l_arg);
-
- return ret;
-}
-
-int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
- u32 tlen)
-{
- int ret = -ENODEV;
-
- if (dd->verbs_layer.l_arg && verbs_rcv) {
- verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
- ret = 0;
- }
-
- return ret;
-}
-
-int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
+void ipath_layer_lid_changed(struct ipath_devdata *dd)
{
- u32 lstate;
- int ret;
-
- switch (newstate) {
- case IPATH_IB_LINKDOWN:
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
- INFINIPATH_IBCC_LINKINITCMD_SHIFT);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case IPATH_IB_LINKDOWN_SLEEP:
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
- INFINIPATH_IBCC_LINKINITCMD_SHIFT);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case IPATH_IB_LINKDOWN_DISABLE:
- ipath_set_ib_lstate(dd,
- INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
- INFINIPATH_IBCC_LINKINITCMD_SHIFT);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case IPATH_IB_LINKINIT:
- if (dd->ipath_flags & IPATH_LINKINIT) {
- ret = 0;
- goto bail;
- }
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
- INFINIPATH_IBCC_LINKCMD_SHIFT);
- lstate = IPATH_LINKINIT;
- break;
-
- case IPATH_IB_LINKARM:
- if (dd->ipath_flags & IPATH_LINKARMED) {
- ret = 0;
- goto bail;
- }
- if (!(dd->ipath_flags &
- (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
- ret = -EINVAL;
- goto bail;
- }
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
- INFINIPATH_IBCC_LINKCMD_SHIFT);
- /*
- * Since the port can transition to ACTIVE by receiving
- * a non VL 15 packet, wait for either state.
- */
- lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
- break;
-
- case IPATH_IB_LINKACTIVE:
- if (dd->ipath_flags & IPATH_LINKACTIVE) {
- ret = 0;
- goto bail;
- }
- if (!(dd->ipath_flags & IPATH_LINKARMED)) {
- ret = -EINVAL;
- goto bail;
- }
- ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
- INFINIPATH_IBCC_LINKCMD_SHIFT);
- lstate = IPATH_LINKACTIVE;
- break;
-
- default:
- ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
- ret = -EINVAL;
- goto bail;
- }
- ret = ipath_wait_linkstate(dd, lstate, 2000);
-
-bail:
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
-
-/**
- * ipath_layer_set_mtu - set the MTU
- * @dd: the infinipath device
- * @arg: the new MTU
- *
- * we can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size. For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link initialize (IPATH_IBSTATE_INIT) state...
- */
-int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
-{
- u32 piosize;
- int changed = 0;
- int ret;
-
- /*
- * mtu is IB data payload max. It's the largest power of 2 less
- * than piosize (or even larger, since it only really controls the
- * largest we can receive; we can send the max of the mtu and
- * piosize). We check that it's one of the valid IB sizes.
- */
- if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
- arg != 4096) {
- ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
- ret = -EINVAL;
- goto bail;
- }
- if (dd->ipath_ibmtu == arg) {
- ret = 0; /* same as current */
- goto bail;
- }
-
- piosize = dd->ipath_ibmaxlen;
- dd->ipath_ibmtu = arg;
-
- if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
- /* Only if it's not the initial value (or reset to it) */
- if (piosize != dd->ipath_init_ibmaxlen) {
- dd->ipath_ibmaxlen = piosize;
- changed = 1;
- }
- } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
- piosize = arg + IPATH_PIO_MAXIBHDR;
- ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
- "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
- arg);
- dd->ipath_ibmaxlen = piosize;
- changed = 1;
- }
-
- if (changed) {
- /*
- * set the IBC maxpktlength to the size of our pio
- * buffers in words
- */
- u64 ibc = dd->ipath_ibcctrl;
- ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
- INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
-
- piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
- dd->ipath_ibmaxlen = piosize;
- piosize /= sizeof(u32); /* in words */
- /*
- * for ICRC, which we only send in diag test pkt mode, and
- * we don't need to worry about that for mtu
- */
- piosize += 1;
-
- ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
- dd->ipath_ibcctrl = ibc;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- dd->ipath_f_tidtemplate(dd);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
-
-int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
-{
- dd->ipath_lid = arg;
- dd->ipath_lmc = lmc;
-
mutex_lock(&ipath_layer_mutex);
if (dd->ipath_layer.l_arg && layer_intr)
layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
mutex_unlock(&ipath_layer_mutex);
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_set_lid);
-
-int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
-{
- /* XXX - need to inform anyone who cares this just happened. */
- dd->ipath_guid = guid;
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
-
-__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
-{
- return dd->ipath_guid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
-
-u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
-{
- return dd->ipath_nguid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
-
-u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
-{
- return dd->ipath_majrev;
}
-EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
-
-u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
-{
- return dd->ipath_minrev;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
-
-u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
-{
- return dd->ipath_pcirev;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
-
-u32 ipath_layer_get_flags(struct ipath_devdata *dd)
-{
- return dd->ipath_flags;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
-
-struct device *ipath_layer_get_device(struct ipath_devdata *dd)
-{
- return &dd->pcidev->dev;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_device);
-
-u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
-{
- return dd->ipath_deviceid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
-
-u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
-{
- return dd->ipath_vendorid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
-
-u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
-{
- return dd->ipath_lastibcstat;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
-
-u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
-{
- return dd->ipath_ibmtu;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
-
void ipath_layer_add(struct ipath_devdata *dd)
{
mutex_lock(&ipath_layer_mutex);
@@ -411,10 +119,6 @@ void ipath_layer_add(struct ipath_devdata *dd)
dd->ipath_layer.l_arg =
layer_add_one(dd->ipath_unit, dd);
- if (verbs_add_one)
- dd->verbs_layer.l_arg =
- verbs_add_one(dd->ipath_unit, dd);
-
mutex_unlock(&ipath_layer_mutex);
}
@@ -427,11 +131,6 @@ void ipath_layer_remove(struct ipath_devdata *dd)
dd->ipath_layer.l_arg = NULL;
}
- if (dd->verbs_layer.l_arg && verbs_remove_one) {
- verbs_remove_one(dd->verbs_layer.l_arg);
- dd->verbs_layer.l_arg = NULL;
- }
-
mutex_unlock(&ipath_layer_mutex);
}
@@ -463,9 +162,6 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
if (dd->ipath_layer.l_arg)
continue;
- if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
- *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
-
spin_unlock_irqrestore(&ipath_devs_lock, flags);
dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -509,107 +205,6 @@ void ipath_layer_unregister(void)
EXPORT_SYMBOL_GPL(ipath_layer_unregister);
-static void __ipath_verbs_timer(unsigned long arg)
-{
- struct ipath_devdata *dd = (struct ipath_devdata *) arg;
-
- /*
- * If port 0 receive packet interrupts are not available, or
- * can be missed, poll the receive queue
- */
- if (dd->ipath_flags & IPATH_POLL_RX_INTR)
- ipath_kreceive(dd);
-
- /* Handle verbs layer timeouts. */
- if (dd->verbs_layer.l_arg && verbs_timer_cb)
- verbs_timer_cb(dd->verbs_layer.l_arg);
-
- mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
-}
-
-/**
- * ipath_verbs_register - verbs layer registration
- * @l_piobufavail: callback for when PIO buffers become available
- * @l_rcv: callback for receiving a packet
- * @l_timer_cb: timer callback
- * @ipath_devdata: device data structure is put here
- */
-int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
- void (*l_remove)(void *arg),
- int (*l_piobufavail) (void *arg),
- void (*l_rcv) (void *arg, void *rhdr,
- void *data, u32 tlen),
- void (*l_timer_cb) (void *arg))
-{
- struct ipath_devdata *dd, *tmp;
- unsigned long flags;
-
- mutex_lock(&ipath_layer_mutex);
-
- verbs_add_one = l_add;
- verbs_remove_one = l_remove;
- verbs_piobufavail = l_piobufavail;
- verbs_rcv = l_rcv;
- verbs_timer_cb = l_timer_cb;
-
- spin_lock_irqsave(&ipath_devs_lock, flags);
-
- list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
- if (!(dd->ipath_flags & IPATH_INITTED))
- continue;
-
- if (dd->verbs_layer.l_arg)
- continue;
-
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
- spin_lock_irqsave(&ipath_devs_lock, flags);
- }
-
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- mutex_unlock(&ipath_layer_mutex);
-
- ipath_verbs_registered = 1;
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_verbs_register);
-
-void ipath_verbs_unregister(void)
-{
- struct ipath_devdata *dd, *tmp;
- unsigned long flags;
-
- mutex_lock(&ipath_layer_mutex);
- spin_lock_irqsave(&ipath_devs_lock, flags);
-
- list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
- *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
-
- if (dd->verbs_layer.l_arg && verbs_remove_one) {
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- verbs_remove_one(dd->verbs_layer.l_arg);
- spin_lock_irqsave(&ipath_devs_lock, flags);
- dd->verbs_layer.l_arg = NULL;
- }
- }
-
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
- verbs_add_one = NULL;
- verbs_remove_one = NULL;
- verbs_piobufavail = NULL;
- verbs_rcv = NULL;
- verbs_timer_cb = NULL;
-
- ipath_verbs_registered = 0;
-
- mutex_unlock(&ipath_layer_mutex);
-}
-
-EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
-
int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
{
int ret;
@@ -698,390 +293,6 @@ u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
-u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
-{
- return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
-
-static void update_sge(struct ipath_sge_state *ss, u32 length)
-{
- struct ipath_sge *sge = &ss->sge;
-
- sge->vaddr += length;
- sge->length -= length;
- sge->sge_length -= length;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr != NULL) {
- if (++sge->n >= IPATH_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- return;
- sge->n = 0;
- }
- sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
- }
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#endif
-
-static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
- u32 length)
-{
- u32 extra = 0;
- u32 data = 0;
- u32 last;
-
- while (1) {
- u32 len = ss->sge.length;
- u32 off;
-
- BUG_ON(len == 0);
- if (len > length)
- len = length;
- if (len > ss->sge.sge_length)
- len = ss->sge.sge_length;
- /* If the source address is not aligned, try to align it. */
- off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
- if (off) {
- u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
- ~(sizeof(u32) - 1));
- u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
- u32 y;
-
- y = sizeof(u32) - off;
- if (len > y)
- len = y;
- if (len + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, extra *
- BITS_PER_BYTE);
- len = sizeof(u32) - extra;
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, len, extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += len;
- }
- } else if (extra) {
- /* Source address is aligned. */
- u32 *addr = (u32 *) ss->sge.vaddr;
- int shift = extra * BITS_PER_BYTE;
- int ushift = 32 - shift;
- u32 l = len;
-
- while (l >= sizeof(u32)) {
- u32 v = *addr;
-
- data |= set_upper_bits(v, shift);
- __raw_writel(data, piobuf);
- data = get_upper_bits(v, ushift);
- piobuf++;
- addr++;
- l -= sizeof(u32);
- }
- /*
- * We still have 'extra' number of bytes leftover.
- */
- if (l) {
- u32 v = *addr;
-
- if (l + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, shift);
- len -= l + extra - sizeof(u32);
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, l,
- extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += l;
- }
- } else if (len == length) {
- last = data;
- break;
- }
- } else if (len == length) {
- u32 w;
-
- /*
- * Need to round up for the last dword in the
- * packet.
- */
- w = (len + 3) >> 2;
- __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
- piobuf += w - 1;
- last = ((u32 *) ss->sge.vaddr)[w - 1];
- break;
- } else {
- u32 w = len >> 2;
-
- __iowrite32_copy(piobuf, ss->sge.vaddr, w);
- piobuf += w;
-
- extra = len & (sizeof(u32) - 1);
- if (extra) {
- u32 v = ((u32 *) ss->sge.vaddr)[w];
-
- /* Clear unused upper bytes */
- data = clear_upper_bytes(v, extra, 0);
- }
- }
- update_sge(ss, len);
- length -= len;
- }
- /* Update address before sending packet. */
- update_sge(ss, length);
- /* must flush early everything before trigger word */
- ipath_flush_wc();
- __raw_writel(last, piobuf);
- /* be sure trigger word is written */
- ipath_flush_wc();
-}
-
-/**
- * ipath_verbs_send - send a packet from the verbs layer
- * @dd: the infinipath device
- * @hdrwords: the number of words in the header
- * @hdr: the packet header
- * @len: the length of the packet in bytes
- * @ss: the SGE to send
- *
- * This is like ipath_sma_send_pkt() in that we need to be able to send
- * packets after the chip is initialized (MADs) but also like
- * ipath_layer_send_hdr() since its used by the verbs layer.
- */
-int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
- u32 *hdr, u32 len, struct ipath_sge_state *ss)
-{
- u32 __iomem *piobuf;
- u32 plen;
- int ret;
-
- /* +1 is for the qword padding of pbc */
- plen = hdrwords + ((len + 3) >> 2) + 1;
- if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
- ipath_dbg("packet len 0x%x too long, failing\n", plen);
- ret = -EINVAL;
- goto bail;
- }
-
- /* Get a PIO buffer to use. */
- piobuf = ipath_getpiobuf(dd, NULL);
- if (unlikely(piobuf == NULL)) {
- ret = -EBUSY;
- goto bail;
- }
-
- /*
- * Write len to control qword, no flags.
- * We have to flush after the PBC for correctness on some cpus
- * or WC buffer can be written out of order.
- */
- writeq(plen, piobuf);
- ipath_flush_wc();
- piobuf += 2;
- if (len == 0) {
- /*
- * If there is just the header portion, must flush before
- * writing last word of header for correctness, and after
- * the last header word (trigger word).
- */
- __iowrite32_copy(piobuf, hdr, hdrwords - 1);
- ipath_flush_wc();
- __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
- ipath_flush_wc();
- ret = 0;
- goto bail;
- }
-
- __iowrite32_copy(piobuf, hdr, hdrwords);
- piobuf += hdrwords;
-
- /* The common case is aligned and contained in one segment. */
- if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
- !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
- u32 w;
- u32 *addr = (u32 *) ss->sge.vaddr;
-
- /* Update address before sending packet. */
- update_sge(ss, len);
- /* Need to round up for the last dword in the packet. */
- w = (len + 3) >> 2;
- __iowrite32_copy(piobuf, addr, w - 1);
- /* must flush early everything before trigger word */
- ipath_flush_wc();
- __raw_writel(addr[w - 1], piobuf + w - 1);
- /* be sure trigger word is written */
- ipath_flush_wc();
- ret = 0;
- goto bail;
- }
- copy_io(piobuf, ss, len);
- ret = 0;
-
-bail:
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_verbs_send);
-
-int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait)
-{
- int ret;
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- /* no hardware, freeze, etc. */
- ipath_dbg("unit %u not usable\n", dd->ipath_unit);
- ret = -EINVAL;
- goto bail;
- }
- *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
- *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
- *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
- *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
- *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
-
-/**
- * ipath_layer_get_counters - get various chip counters
- * @dd: the infinipath device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int ipath_layer_get_counters(struct ipath_devdata *dd,
- struct ipath_layer_counters *cntrs)
-{
- int ret;
-
- if (!(dd->ipath_flags & IPATH_INITTED)) {
- /* no hardware, freeze, etc. */
- ipath_dbg("unit %u not usable\n", dd->ipath_unit);
- ret = -EINVAL;
- goto bail;
- }
- cntrs->symbol_error_counter =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
- cntrs->link_error_recovery_counter =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
- /*
- * The link downed counter counts when the other side downs the
- * connection. We add in the number of times we downed the link
- * due to local link integrity errors to compensate.
- */
- cntrs->link_downed_counter =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
- cntrs->port_rcv_errors =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
- cntrs->port_rcv_remphys_errors =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
- cntrs->port_xmit_discards =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
- cntrs->port_xmit_data =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
- cntrs->port_rcv_data =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
- cntrs->port_xmit_packets =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
- cntrs->port_rcv_packets =
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
- cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
- cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
-
-int ipath_layer_want_buffer(struct ipath_devdata *dd)
-{
- set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
-
int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
{
int ret = 0;
@@ -1153,389 +364,3 @@ int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
}
EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
-
-int ipath_layer_enable_timer(struct ipath_devdata *dd)
-{
- /*
- * HT-400 has a design flaw where the chip and kernel idea
- * of the tail register don't always agree, and therefore we won't
- * get an interrupt on the next packet received.
- * If the board supports per packet receive interrupts, use it.
- * Otherwise, the timer function periodically checks for packets
- * to cover this case.
- * Either way, the timer is needed for verbs layer related
- * processing.
- */
- if (dd->ipath_flags & IPATH_GPIO_INTR) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
- 0x2074076542310ULL);
- /* Enable GPIO bit 2 interrupt */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
- (u64) (1 << 2));
- }
-
- init_timer(&dd->verbs_layer.l_timer);
- dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
- dd->verbs_layer.l_timer.data = (unsigned long)dd;
- dd->verbs_layer.l_timer.expires = jiffies + 1;
- add_timer(&dd->verbs_layer.l_timer);
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
-
-int ipath_layer_disable_timer(struct ipath_devdata *dd)
-{
- /* Disable GPIO bit 2 interrupt */
- if (dd->ipath_flags & IPATH_GPIO_INTR)
- ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
-
- del_timer_sync(&dd->verbs_layer.l_timer);
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
-
-/**
- * ipath_layer_set_verbs_flags - set the verbs layer flags
- * @dd: the infinipath device
- * @flags: the flags to set
- */
-int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
-{
- struct ipath_devdata *ss;
- unsigned long lflags;
-
- spin_lock_irqsave(&ipath_devs_lock, lflags);
-
- list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
- if (!(ss->ipath_flags & IPATH_INITTED))
- continue;
- if ((flags & IPATH_VERBS_KERNEL_SMA) &&
- !(*ss->ipath_statusp & IPATH_STATUS_SMA))
- *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
- else
- *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
- }
-
- spin_unlock_irqrestore(&ipath_devs_lock, lflags);
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
-
-/**
- * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
- * @dd: the infinipath device
- */
-unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
-{
- return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
-
-/**
- * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
- * @dd: the infinipath device
- * @index: the PKEY index
- */
-unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
-{
- unsigned ret;
-
- if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
- ret = 0;
- else
- ret = dd->ipath_pd[0]->port_pkeys[index];
-
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
-
-/**
- * ipath_layer_get_pkeys - return the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the pkey table is placed here
- */
-int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
-{
- struct ipath_portdata *pd = dd->ipath_pd[0];
-
- memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the infinipath device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct ipath_devdata *dd, u16 key)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
- if (dd->ipath_pkeys[i] != key)
- continue;
- if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
- dd->ipath_pkeys[i] = 0;
- ret = 1;
- goto bail;
- }
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @dd: the infinipath device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct ipath_devdata *dd, u16 key)
-{
- int i;
- u16 lkey = key & 0x7FFF;
- int any = 0;
- int ret;
-
- if (lkey == 0x7FFF) {
- ret = 0;
- goto bail;
- }
-
- /* Look for an empty slot or a matching PKEY. */
- for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
- if (!dd->ipath_pkeys[i]) {
- any++;
- continue;
- }
- /* If it matches exactly, try to increment the ref count */
- if (dd->ipath_pkeys[i] == key) {
- if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
- ret = 0;
- goto bail;
- }
- /* Lost the race. Look for an empty slot below. */
- atomic_dec(&dd->ipath_pkeyrefs[i]);
- any++;
- }
- /*
- * It makes no sense to have both the limited and unlimited
- * PKEY set at the same time since the unlimited one will
- * disable the limited one.
- */
- if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
- }
- for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
- if (!dd->ipath_pkeys[i] &&
- atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
- /* for ipathstats, etc. */
- ipath_stats.sps_pkeys[i] = lkey;
- dd->ipath_pkeys[i] = key;
- ret = 1;
- goto bail;
- }
- }
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-/**
- * ipath_layer_set_pkeys - set the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the PKEY table
- */
-int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
-{
- struct ipath_portdata *pd;
- int i;
- int changed = 0;
-
- pd = dd->ipath_pd[0];
-
- for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
- u16 key = pkeys[i];
- u16 okey = pd->port_pkeys[i];
-
- if (key == okey)
- continue;
- /*
- * The value of this PKEY table entry is changing.
- * Remove the old entry in the hardware's array of PKEYs.
- */
- if (okey & 0x7FFF)
- changed |= rm_pkey(dd, okey);
- if (key & 0x7FFF) {
- int ret = add_pkey(dd, key);
-
- if (ret < 0)
- key = 0;
- else
- changed |= ret;
- }
- pd->port_pkeys[i] = key;
- }
- if (changed) {
- u64 pkey;
-
- pkey = (u64) dd->ipath_pkeys[0] |
- ((u64) dd->ipath_pkeys[1] << 16) |
- ((u64) dd->ipath_pkeys[2] << 32) |
- ((u64) dd->ipath_pkeys[3] << 48);
- ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
- (unsigned long long) pkey);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
- pkey);
- }
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
-
-/**
- * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
- * @dd: the infinipath device
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
-{
- return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
-
-/**
- * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
- * @dd: the infinipath device
- * @sleep: the new state
- *
- * Note that this will only take effect when the link state changes.
- */
-int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
- int sleep)
-{
- if (sleep)
- dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
- else
- dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
-
-int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
-{
- return (dd->ipath_ibcctrl >>
- INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
-
-/**
- * ipath_layer_set_phyerrthreshold - set the physical error threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
-{
- unsigned v;
-
- v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
- if (v != n) {
- dd->ipath_ibcctrl &=
- ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
- INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
- dd->ipath_ibcctrl |=
- (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- }
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
-
-int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
-{
- return (dd->ipath_ibcctrl >>
- INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
-
-/**
- * ipath_layer_set_overrunthreshold - set the overrun threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
-{
- unsigned v;
-
- v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
- if (v != n) {
- dd->ipath_ibcctrl &=
- ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
- INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
- dd->ipath_ibcctrl |=
- (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
- dd->ipath_ibcctrl);
- }
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
-
-int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen)
-{
- return dd->ipath_f_get_boardname(dd, name, namelen);
-}
-EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
-
-u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
-{
- return dd->ipath_rcvhdrentsize;
-}
-EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
index 71485096fca..3854a4eae68 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.h
+++ b/drivers/infiniband/hw/ipath/ipath_layer.h
@@ -40,73 +40,9 @@
*/
struct sk_buff;
-struct ipath_sge_state;
struct ipath_devdata;
struct ether_header;
-struct ipath_layer_counters {
- u64 symbol_error_counter;
- u64 link_error_recovery_counter;
- u64 link_downed_counter;
- u64 port_rcv_errors;
- u64 port_rcv_remphys_errors;
- u64 port_xmit_discards;
- u64 port_xmit_data;
- u64 port_rcv_data;
- u64 port_xmit_packets;
- u64 port_rcv_packets;
- u32 local_link_integrity_errors;
- u32 excessive_buffer_overrun_errors;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct ipath_seg {
- void *vaddr;
- size_t length;
-};
-
-/* The number of ipath_segs that fit in a page. */
-#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
-
-struct ipath_segarray {
- struct ipath_seg segs[IPATH_SEGSZ];
-};
-
-struct ipath_mregion {
- u64 user_base; /* User's address for this region */
- u64 iova; /* IB start address of this region */
- size_t length;
- u32 lkey;
- u32 offset; /* offset (bytes) to start of region */
- int access_flags;
- u32 max_segs; /* number of ipath_segs in all the arrays */
- u32 mapsz; /* size of the map array */
- struct ipath_segarray *map[0]; /* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct ipath_sge {
- struct ipath_mregion *mr;
- void *vaddr; /* current pointer into the segment */
- u32 sge_length; /* length of the SGE */
- u32 length; /* remaining length of the segment */
- u16 m; /* current index: mr->map[m] */
- u16 n; /* current index: mr->map[m]->segs[n] */
-};
-
-struct ipath_sge_state {
- struct ipath_sge *sg_list; /* next SGE to be used if any */
- struct ipath_sge sge; /* progress state for the current SGE */
- u8 num_sge;
-};
-
int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
void (*l_remove)(void *),
int (*l_intr)(void *, u32),
@@ -114,62 +50,14 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
struct sk_buff *),
u16 rcv_opcode,
int (*l_rcv_lid)(void *, void *));
-int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
- void (*l_remove)(void *arg),
- int (*l_piobufavail)(void *arg),
- void (*l_rcv)(void *arg, void *rhdr,
- void *data, u32 tlen),
- void (*l_timer_cb)(void *arg));
void ipath_layer_unregister(void);
-void ipath_verbs_unregister(void);
int ipath_layer_open(struct ipath_devdata *, u32 * pktmax);
u16 ipath_layer_get_lid(struct ipath_devdata *dd);
int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *);
u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
-u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
-int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
-int ipath_layer_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_lid(struct ipath_devdata *, u32, u8);
int ipath_layer_send_hdr(struct ipath_devdata *dd,
struct ether_header *hdr);
-int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
- u32 * hdr, u32 len, struct ipath_sge_state *ss);
int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd);
-int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
- size_t namelen);
-int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait);
-int ipath_layer_get_counters(struct ipath_devdata *dd,
- struct ipath_layer_counters *cntrs);
-int ipath_layer_want_buffer(struct ipath_devdata *dd);
-int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
-__be64 ipath_layer_get_guid(struct ipath_devdata *);
-u32 ipath_layer_get_nguid(struct ipath_devdata *);
-u32 ipath_layer_get_majrev(struct ipath_devdata *);
-u32 ipath_layer_get_minrev(struct ipath_devdata *);
-u32 ipath_layer_get_pcirev(struct ipath_devdata *);
-u32 ipath_layer_get_flags(struct ipath_devdata *dd);
-struct device *ipath_layer_get_device(struct ipath_devdata *dd);
-u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
-u32 ipath_layer_get_vendorid(struct ipath_devdata *);
-u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
-u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
-int ipath_layer_enable_timer(struct ipath_devdata *dd);
-int ipath_layer_disable_timer(struct ipath_devdata *dd);
-int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags);
-unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd);
-unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index);
-int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 *pkeys);
-int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 *pkeys);
-int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd);
-int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
- int sleep);
-int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd);
-int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n);
-int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd);
-int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n);
-u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd);
/* ipath_ether interrupt values */
#define IPATH_LAYER_INT_IF_UP 0x2
@@ -178,9 +66,6 @@ u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd);
#define IPATH_LAYER_INT_SEND_CONTINUE 0x10
#define IPATH_LAYER_INT_BCAST 0x40
-/* _verbs_layer.l_flags */
-#define IPATH_VERBS_KERNEL_SMA 0x1
-
extern unsigned ipath_debug; /* debugging bit mask */
#endif /* _IPATH_LAYER_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index d3402341b7d..72d1db89db8 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -101,15 +101,15 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp,
nip->num_ports = ibdev->phys_port_cnt;
/* This is already in network order */
nip->sys_guid = to_idev(ibdev)->sys_image_guid;
- nip->node_guid = ipath_layer_get_guid(dd);
+ nip->node_guid = dd->ipath_guid;
nip->port_guid = nip->sys_guid;
- nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd));
- nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd));
- majrev = ipath_layer_get_majrev(dd);
- minrev = ipath_layer_get_minrev(dd);
+ nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
+ nip->device_id = cpu_to_be16(dd->ipath_deviceid);
+ majrev = dd->ipath_majrev;
+ minrev = dd->ipath_minrev;
nip->revision = cpu_to_be32((majrev << 16) | minrev);
nip->local_port_num = port;
- vendor = ipath_layer_get_vendorid(dd);
+ vendor = dd->ipath_vendorid;
nip->vendor_id[0] = 0;
nip->vendor_id[1] = vendor >> 8;
nip->vendor_id[2] = vendor;
@@ -133,13 +133,89 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp,
*/
if (startgx == 0)
/* The first is a copy of the read-only HW GUID. */
- *p = ipath_layer_get_guid(to_idev(ibdev)->dd);
+ *p = to_idev(ibdev)->dd->ipath_guid;
else
smp->status |= IB_SMP_INVALID_FIELD;
return reply(smp);
}
+
+static int get_overrunthreshold(struct ipath_devdata *dd)
+{
+ return (dd->ipath_ibcctrl >>
+ INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
+}
+
+/**
+ * set_overrunthreshold - set the overrun threshold
+ * @dd: the infinipath device
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
+{
+ unsigned v;
+
+ v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
+ if (v != n) {
+ dd->ipath_ibcctrl &=
+ ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
+ INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
+ dd->ipath_ibcctrl |=
+ (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl);
+ }
+ return 0;
+}
+
+static int get_phyerrthreshold(struct ipath_devdata *dd)
+{
+ return (dd->ipath_ibcctrl >>
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+}
+
+/**
+ * set_phyerrthreshold - set the physical error threshold
+ * @dd: the infinipath device
+ * @n: the new threshold
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
+{
+ unsigned v;
+
+ v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+ if (v != n) {
+ dd->ipath_ibcctrl &=
+ ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
+ dd->ipath_ibcctrl |=
+ (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl);
+ }
+ return 0;
+}
+
+/**
+ * get_linkdowndefaultstate - get the default linkdown state
+ * @dd: the infinipath device
+ *
+ * Returns zero if the default is POLL, 1 if the default is SLEEP.
+ */
+static int get_linkdowndefaultstate(struct ipath_devdata *dd)
+{
+ return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
+}
+
static int recv_subn_get_portinfo(struct ib_smp *smp,
struct ib_device *ibdev, u8 port)
{
@@ -166,7 +242,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
(dev->mkeyprot_resv_lmc >> 6) == 0)
pip->mkey = dev->mkey;
pip->gid_prefix = dev->gid_prefix;
- lid = ipath_layer_get_lid(dev->dd);
+ lid = dev->dd->ipath_lid;
pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
pip->sm_lid = cpu_to_be16(dev->sm_lid);
pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
@@ -177,14 +253,14 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
pip->link_width_supported = 3; /* 1x or 4x */
pip->link_width_active = 2; /* 4x */
pip->linkspeed_portstate = 0x10; /* 2.5Gbps */
- ibcstat = ipath_layer_get_lastibcstat(dev->dd);
+ ibcstat = dev->dd->ipath_lastibcstat;
pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
pip->portphysstate_linkdown =
(ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
- (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2);
+ (get_linkdowndefaultstate(dev->dd) ? 1 : 2);
pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc;
pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */
- switch (ipath_layer_get_ibmtu(dev->dd)) {
+ switch (dev->dd->ipath_ibmtu) {
case 4096:
mtu = IB_MTU_4096;
break;
@@ -217,7 +293,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
/* P_KeyViolations are counted by hardware. */
pip->pkey_violations =
- cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) -
+ cpu_to_be16((ipath_get_cr_errpkey(dev->dd) -
dev->z_pkey_violations) & 0xFFFF);
pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
/* Only the hardware GUID is supported for now */
@@ -226,8 +302,8 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
/* 32.768 usec. response time (guessing) */
pip->resv_resptimevalue = 3;
pip->localphyerrors_overrunerrors =
- (ipath_layer_get_phyerrthreshold(dev->dd) << 4) |
- ipath_layer_get_overrunthreshold(dev->dd);
+ (get_phyerrthreshold(dev->dd) << 4) |
+ get_overrunthreshold(dev->dd);
/* pip->max_credit_hint; */
/* pip->link_roundtrip_latency[3]; */
@@ -237,6 +313,20 @@ bail:
return ret;
}
+/**
+ * get_pkeys - return the PKEY table for port 0
+ * @dd: the infinipath device
+ * @pkeys: the pkey table is placed here
+ */
+static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
+{
+ struct ipath_portdata *pd = dd->ipath_pd[0];
+
+ memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
+
+ return 0;
+}
+
static int recv_subn_get_pkeytable(struct ib_smp *smp,
struct ib_device *ibdev)
{
@@ -249,9 +339,9 @@ static int recv_subn_get_pkeytable(struct ib_smp *smp,
memset(smp->data, 0, sizeof(smp->data));
if (startpx == 0) {
struct ipath_ibdev *dev = to_idev(ibdev);
- unsigned i, n = ipath_layer_get_npkeys(dev->dd);
+ unsigned i, n = ipath_get_npkeys(dev->dd);
- ipath_layer_get_pkeys(dev->dd, p);
+ get_pkeys(dev->dd, p);
for (i = 0; i < n; i++)
q[i] = cpu_to_be16(p[i]);
@@ -269,6 +359,24 @@ static int recv_subn_set_guidinfo(struct ib_smp *smp,
}
/**
+ * set_linkdowndefaultstate - set the default linkdown state
+ * @dd: the infinipath device
+ * @sleep: the new state
+ *
+ * Note that this will only take effect when the link state changes.
+ */
+static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
+{
+ if (sleep)
+ dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
+ else
+ dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
+ dd->ipath_ibcctrl);
+ return 0;
+}
+
+/**
* recv_subn_set_portinfo - set port information
* @smp: the incoming SM packet
* @ibdev: the infiniband device
@@ -290,7 +398,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
u8 state;
u16 lstate;
u32 mtu;
- int ret;
+ int ret, ore;
if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
goto err;
@@ -304,7 +412,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
lid = be16_to_cpu(pip->lid);
- if (lid != ipath_layer_get_lid(dev->dd)) {
+ if (lid != dev->dd->ipath_lid) {
/* Must be a valid unicast LID address. */
if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
goto err;
@@ -342,11 +450,11 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
case 0: /* NOP */
break;
case 1: /* SLEEP */
- if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1))
+ if (set_linkdowndefaultstate(dev->dd, 1))
goto err;
break;
case 2: /* POLL */
- if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0))
+ if (set_linkdowndefaultstate(dev->dd, 0))
goto err;
break;
default:
@@ -376,7 +484,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
/* XXX We have already partially updated our state! */
goto err;
}
- ipath_layer_set_mtu(dev->dd, mtu);
+ ipath_set_mtu(dev->dd, mtu);
dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
@@ -392,20 +500,16 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
* later.
*/
if (pip->pkey_violations == 0)
- dev->z_pkey_violations =
- ipath_layer_get_cr_errpkey(dev->dd);
+ dev->z_pkey_violations = ipath_get_cr_errpkey(dev->dd);
if (pip->qkey_violations == 0)
dev->qkey_violations = 0;
- if (ipath_layer_set_phyerrthreshold(
- dev->dd,
- (pip->localphyerrors_overrunerrors >> 4) & 0xF))
+ ore = pip->localphyerrors_overrunerrors;
+ if (set_phyerrthreshold(dev->dd, (ore >> 4) & 0xF))
goto err;
- if (ipath_layer_set_overrunthreshold(
- dev->dd,
- (pip->localphyerrors_overrunerrors & 0xF)))
+ if (set_overrunthreshold(dev->dd, (ore & 0xF)))
goto err;
dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
@@ -423,7 +527,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
* is down or is being set to down.
*/
state = pip->linkspeed_portstate & 0xF;
- flags = ipath_layer_get_flags(dev->dd);
+ flags = dev->dd->ipath_flags;
lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
goto err;
@@ -439,7 +543,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
/* FALLTHROUGH */
case IB_PORT_DOWN:
if (lstate == 0)
- if (ipath_layer_get_linkdowndefaultstate(dev->dd))
+ if (get_linkdowndefaultstate(dev->dd))
lstate = IPATH_IB_LINKDOWN_SLEEP;
else
lstate = IPATH_IB_LINKDOWN;
@@ -451,7 +555,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
lstate = IPATH_IB_LINKDOWN_DISABLE;
else
goto err;
- ipath_layer_set_linkstate(dev->dd, lstate);
+ ipath_set_linkstate(dev->dd, lstate);
if (flags & IPATH_LINKACTIVE) {
event.event = IB_EVENT_PORT_ERR;
ib_dispatch_event(&event);
@@ -460,7 +564,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
case IB_PORT_ARMED:
if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE)))
break;
- ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM);
+ ipath_set_linkstate(dev->dd, IPATH_IB_LINKARM);
if (flags & IPATH_LINKACTIVE) {
event.event = IB_EVENT_PORT_ERR;
ib_dispatch_event(&event);
@@ -469,7 +573,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
case IB_PORT_ACTIVE:
if (!(flags & IPATH_LINKARMED))
break;
- ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
+ ipath_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
event.event = IB_EVENT_PORT_ACTIVE;
ib_dispatch_event(&event);
break;
@@ -493,6 +597,152 @@ done:
return ret;
}
+/**
+ * rm_pkey - decrecment the reference count for the given PKEY
+ * @dd: the infinipath device
+ * @key: the PKEY index
+ *
+ * Return true if this was the last reference and the hardware table entry
+ * needs to be changed.
+ */
+static int rm_pkey(struct ipath_devdata *dd, u16 key)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+ if (dd->ipath_pkeys[i] != key)
+ continue;
+ if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
+ dd->ipath_pkeys[i] = 0;
+ ret = 1;
+ goto bail;
+ }
+ break;
+ }
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * add_pkey - add the given PKEY to the hardware table
+ * @dd: the infinipath device
+ * @key: the PKEY
+ *
+ * Return an error code if unable to add the entry, zero if no change,
+ * or 1 if the hardware PKEY register needs to be updated.
+ */
+static int add_pkey(struct ipath_devdata *dd, u16 key)
+{
+ int i;
+ u16 lkey = key & 0x7FFF;
+ int any = 0;
+ int ret;
+
+ if (lkey == 0x7FFF) {
+ ret = 0;
+ goto bail;
+ }
+
+ /* Look for an empty slot or a matching PKEY. */
+ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+ if (!dd->ipath_pkeys[i]) {
+ any++;
+ continue;
+ }
+ /* If it matches exactly, try to increment the ref count */
+ if (dd->ipath_pkeys[i] == key) {
+ if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
+ ret = 0;
+ goto bail;
+ }
+ /* Lost the race. Look for an empty slot below. */
+ atomic_dec(&dd->ipath_pkeyrefs[i]);
+ any++;
+ }
+ /*
+ * It makes no sense to have both the limited and unlimited
+ * PKEY set at the same time since the unlimited one will
+ * disable the limited one.
+ */
+ if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
+ ret = -EEXIST;
+ goto bail;
+ }
+ }
+ if (!any) {
+ ret = -EBUSY;
+ goto bail;
+ }
+ for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
+ if (!dd->ipath_pkeys[i] &&
+ atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
+ /* for ipathstats, etc. */
+ ipath_stats.sps_pkeys[i] = lkey;
+ dd->ipath_pkeys[i] = key;
+ ret = 1;
+ goto bail;
+ }
+ }
+ ret = -EBUSY;
+
+bail:
+ return ret;
+}
+
+/**
+ * set_pkeys - set the PKEY table for port 0
+ * @dd: the infinipath device
+ * @pkeys: the PKEY table
+ */
+static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
+{
+ struct ipath_portdata *pd;
+ int i;
+ int changed = 0;
+
+ pd = dd->ipath_pd[0];
+
+ for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
+ u16 key = pkeys[i];
+ u16 okey = pd->port_pkeys[i];
+
+ if (key == okey)
+ continue;
+ /*
+ * The value of this PKEY table entry is changing.
+ * Remove the old entry in the hardware's array of PKEYs.
+ */
+ if (okey & 0x7FFF)
+ changed |= rm_pkey(dd, okey);
+ if (key & 0x7FFF) {
+ int ret = add_pkey(dd, key);
+
+ if (ret < 0)
+ key = 0;
+ else
+ changed |= ret;
+ }
+ pd->port_pkeys[i] = key;
+ }
+ if (changed) {
+ u64 pkey;
+
+ pkey = (u64) dd->ipath_pkeys[0] |
+ ((u64) dd->ipath_pkeys[1] << 16) |
+ ((u64) dd->ipath_pkeys[2] << 32) |
+ ((u64) dd->ipath_pkeys[3] << 48);
+ ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
+ (unsigned long long) pkey);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
+ pkey);
+ }
+ return 0;
+}
+
static int recv_subn_set_pkeytable(struct ib_smp *smp,
struct ib_device *ibdev)
{
@@ -500,13 +750,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
__be16 *p = (__be16 *) smp->data;
u16 *q = (u16 *) smp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
- unsigned i, n = ipath_layer_get_npkeys(dev->dd);
+ unsigned i, n = ipath_get_npkeys(dev->dd);
for (i = 0; i < n; i++)
q[i] = be16_to_cpu(p[i]);
- if (startpx != 0 ||
- ipath_layer_set_pkeys(dev->dd, q) != 0)
+ if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
smp->status |= IB_SMP_INVALID_FIELD;
return recv_subn_get_pkeytable(smp, ibdev);
@@ -844,10 +1093,10 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_layer_counters cntrs;
+ struct ipath_verbs_counters cntrs;
u8 port_select = p->port_select;
- ipath_layer_get_counters(dev->dd, &cntrs);
+ ipath_get_counters(dev->dd, &cntrs);
/* Adjust counters for any resets done. */
cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
@@ -944,8 +1193,8 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
u64 swords, rwords, spkts, rpkts, xwait;
u8 port_select = p->port_select;
- ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
- &rpkts, &xwait);
+ ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
+ &rpkts, &xwait);
/* Adjust counters for any resets done. */
swords -= dev->z_port_xmit_data;
@@ -978,13 +1227,13 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
struct ipath_ibdev *dev = to_idev(ibdev);
- struct ipath_layer_counters cntrs;
+ struct ipath_verbs_counters cntrs;
/*
* Since the HW doesn't support clearing counters, we save the
* current count and subtract it from future responses.
*/
- ipath_layer_get_counters(dev->dd, &cntrs);
+ ipath_get_counters(dev->dd, &cntrs);
if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
dev->z_symbol_error_counter = cntrs.symbol_error_counter;
@@ -1041,8 +1290,8 @@ static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
struct ipath_ibdev *dev = to_idev(ibdev);
u64 swords, rwords, spkts, rpkts, xwait;
- ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
- &rpkts, &xwait);
+ ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
+ &rpkts, &xwait);
if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
dev->z_port_xmit_data = swords;
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
new file mode 100644
index 00000000000..11b7378ff21
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/pgtable.h>
+
+#include "ipath_verbs.h"
+
+/**
+ * ipath_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct ipath_mmap_info
+ */
+void ipath_release_mmap_info(struct kref *ref)
+{
+ struct ipath_mmap_info *ip =
+ container_of(ref, struct ipath_mmap_info, ref);
+
+ vfree(ip->obj);
+ kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the CQ is mapped,
+ * to avoid releasing it.
+ */
+static void ipath_vma_open(struct vm_area_struct *vma)
+{
+ struct ipath_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+ ip->mmap_cnt++;
+}
+
+static void ipath_vma_close(struct vm_area_struct *vma)
+{
+ struct ipath_mmap_info *ip = vma->vm_private_data;
+
+ ip->mmap_cnt--;
+ kref_put(&ip->ref, ipath_release_mmap_info);
+}
+
+static struct vm_operations_struct ipath_vm_ops = {
+ .open = ipath_vma_open,
+ .close = ipath_vma_close,
+};
+
+/**
+ * ipath_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct ipath_ibdev *dev = to_idev(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct ipath_mmap_info *ip, **pp;
+ int ret = -EINVAL;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_irq(&dev->pending_lock);
+ for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) {
+ /* Only the creator is allowed to mmap the object */
+ if (context != ip->context || (void *) offset != ip->obj)
+ continue;
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->size)
+ break;
+
+ *pp = ip->next;
+ spin_unlock_irq(&dev->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret)
+ goto done;
+ vma->vm_ops = &ipath_vm_ops;
+ vma->vm_private_data = ip;
+ ipath_vma_open(vma);
+ goto done;
+ }
+ spin_unlock_irq(&dev->pending_lock);
+done:
+ return ret;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 4ac31a5da33..b36f6fb3e37 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -36,6 +36,18 @@
#include "ipath_verbs.h"
+/* Fast memory region */
+struct ipath_fmr {
+ struct ib_fmr ibfmr;
+ u8 page_shift;
+ struct ipath_mregion mr; /* must be last */
+};
+
+static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct ipath_fmr, ibfmr);
+}
+
/**
* ipath_get_dma_mr - get a DMA memory region
* @pd: protection domain for this memory region
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 83e557be591..224b0f40767 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -35,7 +35,7 @@
#include <linux/vmalloc.h>
#include "ipath_verbs.h"
-#include "ipath_common.h"
+#include "ipath_kernel.h"
#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
@@ -44,19 +44,6 @@
#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
BITS_PER_PAGE, off)
-#define TRANS_INVALID 0
-#define TRANS_ANY2RST 1
-#define TRANS_RST2INIT 2
-#define TRANS_INIT2INIT 3
-#define TRANS_INIT2RTR 4
-#define TRANS_RTR2RTS 5
-#define TRANS_RTS2RTS 6
-#define TRANS_SQERR2RTS 7
-#define TRANS_ANY2ERR 8
-#define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */
-#define TRANS_SQD2SQD 10 /* error if not drained & parameter change */
-#define TRANS_SQD2RTS 11 /* error if not drained */
-
/*
* Convert the AETH credit code into the number of credits.
*/
@@ -287,7 +274,7 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt)
free_qpn(qpt, qp->ibqp.qp_num);
if (!atomic_dec_and_test(&qp->refcount) ||
!ipath_destroy_qp(&qp->ibqp))
- _VERBS_INFO("QP memory leak!\n");
+ ipath_dbg(KERN_INFO "QP memory leak!\n");
qp = nqp;
}
}
@@ -355,8 +342,10 @@ static void ipath_reset_qp(struct ipath_qp *qp)
qp->s_last = 0;
qp->s_ssn = 1;
qp->s_lsn = 0;
- qp->r_rq.head = 0;
- qp->r_rq.tail = 0;
+ if (qp->r_rq.wq) {
+ qp->r_rq.wq->head = 0;
+ qp->r_rq.wq->tail = 0;
+ }
qp->r_reuse_sge = 0;
}
@@ -373,8 +362,8 @@ void ipath_error_qp(struct ipath_qp *qp)
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
- _VERBS_INFO("QP%d/%d in error state\n",
- qp->ibqp.qp_num, qp->remote_qpn);
+ ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
+ qp->ibqp.qp_num, qp->remote_qpn);
spin_lock(&dev->pending_lock);
/* XXX What if its already removed by the timeout code? */
@@ -410,15 +399,32 @@ void ipath_error_qp(struct ipath_qp *qp)
qp->s_hdrwords = 0;
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- wc.opcode = IB_WC_RECV;
- spin_lock(&qp->r_rq.lock);
- while (qp->r_rq.tail != qp->r_rq.head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
- if (++qp->r_rq.tail >= qp->r_rq.size)
- qp->r_rq.tail = 0;
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ if (qp->r_rq.wq) {
+ struct ipath_rwq *wq;
+ u32 head;
+ u32 tail;
+
+ spin_lock(&qp->r_rq.lock);
+
+ /* sanity check pointers before trusting them */
+ wq = qp->r_rq.wq;
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ wc.opcode = IB_WC_RECV;
+ while (tail != head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wq->tail = tail;
+
+ spin_unlock(&qp->r_rq.lock);
}
- spin_unlock(&qp->r_rq.lock);
}
/**
@@ -426,11 +432,12 @@ void ipath_error_qp(struct ipath_qp *qp)
* @ibqp: the queue pair who's attributes we're modifying
* @attr: the new attributes
* @attr_mask: the mask of attributes to modify
+ * @udata: user data for ipathverbs.so
*
* Returns 0 on success, otherwise returns an errno.
*/
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask)
+ int attr_mask, struct ib_udata *udata)
{
struct ipath_ibdev *dev = to_idev(ibqp->device);
struct ipath_qp *qp = to_iqp(ibqp);
@@ -448,19 +455,46 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr_mask))
goto inval;
- if (attr_mask & IB_QP_AV)
+ if (attr_mask & IB_QP_AV) {
if (attr->ah_attr.dlid == 0 ||
attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
goto inval;
+ if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
+ (attr->ah_attr.grh.sgid_index > 1))
+ goto inval;
+ }
+
if (attr_mask & IB_QP_PKEY_INDEX)
- if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd))
+ if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
goto inval;
if (attr_mask & IB_QP_MIN_RNR_TIMER)
if (attr->min_rnr_timer > 31)
goto inval;
+ if (attr_mask & IB_QP_PORT)
+ if (attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt)
+ goto inval;
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ if (attr->path_mtu > IB_MTU_4096)
+ goto inval;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ if (attr->max_dest_rd_atomic > 1)
+ goto inval;
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ if (attr->max_rd_atomic > 1)
+ goto inval;
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE)
+ if (attr->path_mig_state != IB_MIG_MIGRATED &&
+ attr->path_mig_state != IB_MIG_REARM)
+ goto inval;
+
switch (new_state) {
case IB_QPS_RESET:
ipath_reset_qp(qp);
@@ -511,6 +545,9 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_MIN_RNR_TIMER)
qp->r_min_rnr_timer = attr->min_rnr_timer;
+ if (attr_mask & IB_QP_TIMEOUT)
+ qp->timeout = attr->timeout;
+
if (attr_mask & IB_QP_QKEY)
qp->qkey = attr->qkey;
@@ -543,7 +580,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->dest_qp_num = qp->remote_qpn;
attr->qp_access_flags = qp->qp_access_flags;
attr->cap.max_send_wr = qp->s_size - 1;
- attr->cap.max_recv_wr = qp->r_rq.size - 1;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
attr->cap.max_send_sge = qp->s_max_sge;
attr->cap.max_recv_sge = qp->r_rq.max_sge;
attr->cap.max_inline_data = 0;
@@ -557,7 +594,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->max_dest_rd_atomic = 1;
attr->min_rnr_timer = qp->r_min_rnr_timer;
attr->port_num = 1;
- attr->timeout = 0;
+ attr->timeout = qp->timeout;
attr->retry_cnt = qp->s_retry_cnt;
attr->rnr_retry = qp->s_rnr_retry;
attr->alt_port_num = 0;
@@ -569,9 +606,10 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->recv_cq = qp->ibqp.recv_cq;
init_attr->srq = qp->ibqp.srq;
init_attr->cap = attr->cap;
- init_attr->sq_sig_type =
- (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
- ? IB_SIGNAL_REQ_WR : 0;
+ if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ else
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr->qp_type = qp->ibqp.qp_type;
init_attr->port_num = 1;
return 0;
@@ -596,13 +634,23 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp)
} else {
u32 min, max, x;
u32 credits;
-
+ struct ipath_rwq *wq = qp->r_rq.wq;
+ u32 head;
+ u32 tail;
+
+ /* sanity check pointers before trusting them */
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
/*
* Compute the number of credits available (RWQEs).
* XXX Not holding the r_rq.lock here so there is a small
* chance that the pair of reads are not atomic.
*/
- credits = qp->r_rq.head - qp->r_rq.tail;
+ credits = head - tail;
if ((int)credits < 0)
credits += qp->r_rq.size;
/*
@@ -679,27 +727,37 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
case IB_QPT_UD:
case IB_QPT_SMI:
case IB_QPT_GSI:
- qp = kmalloc(sizeof(*qp), GFP_KERNEL);
+ sz = sizeof(*qp);
+ if (init_attr->srq) {
+ struct ipath_srq *srq = to_isrq(init_attr->srq);
+
+ sz += sizeof(*qp->r_sg_list) *
+ srq->rq.max_sge;
+ } else
+ sz += sizeof(*qp->r_sg_list) *
+ init_attr->cap.max_recv_sge;
+ qp = kmalloc(sz, GFP_KERNEL);
if (!qp) {
- vfree(swq);
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto bail_swq;
}
if (init_attr->srq) {
+ sz = 0;
qp->r_rq.size = 0;
qp->r_rq.max_sge = 0;
qp->r_rq.wq = NULL;
+ init_attr->cap.max_recv_wr = 0;
+ init_attr->cap.max_recv_sge = 0;
} else {
qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
- sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) +
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
sizeof(struct ipath_rwqe);
- qp->r_rq.wq = vmalloc(qp->r_rq.size * sz);
+ qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
+ qp->r_rq.size * sz);
if (!qp->r_rq.wq) {
- kfree(qp);
- vfree(swq);
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto bail_qp;
}
}
@@ -719,24 +777,19 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
qp->s_wq = swq;
qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge;
- qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
- 1 << IPATH_S_SIGNAL_REQ_WR : 0;
+ if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR;
+ else
+ qp->s_flags = 0;
dev = to_idev(ibpd->device);
err = ipath_alloc_qpn(&dev->qp_table, qp,
init_attr->qp_type);
if (err) {
- vfree(swq);
- vfree(qp->r_rq.wq);
- kfree(qp);
ret = ERR_PTR(err);
- goto bail;
+ goto bail_rwq;
}
+ qp->ip = NULL;
ipath_reset_qp(qp);
-
- /* Tell the core driver that the kernel SMA is present. */
- if (init_attr->qp_type == IB_QPT_SMI)
- ipath_layer_set_verbs_flags(dev->dd,
- IPATH_VERBS_KERNEL_SMA);
break;
default:
@@ -747,8 +800,63 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
init_attr->cap.max_inline_data = 0;
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See ipath_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ struct ipath_mmap_info *ip;
+ __u64 offset = (__u64) qp->r_rq.wq;
+ int err;
+
+ err = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_rwq;
+ }
+
+ if (qp->r_rq.wq) {
+ /* Allocate info for ipath_mmap(). */
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_rwq;
+ }
+ qp->ip = ip;
+ ip->context = ibpd->uobject->context;
+ ip->obj = qp->r_rq.wq;
+ kref_init(&ip->ref);
+ ip->mmap_cnt = 0;
+ ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
+ qp->r_rq.size * sz);
+ spin_lock_irq(&dev->pending_lock);
+ ip->next = dev->pending_mmaps;
+ dev->pending_mmaps = ip;
+ spin_unlock_irq(&dev->pending_lock);
+ }
+ }
+
+ spin_lock(&dev->n_qps_lock);
+ if (dev->n_qps_allocated == ib_ipath_max_qps) {
+ spin_unlock(&dev->n_qps_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ dev->n_qps_allocated++;
+ spin_unlock(&dev->n_qps_lock);
+
ret = &qp->ibqp;
+ goto bail;
+bail_ip:
+ kfree(qp->ip);
+bail_rwq:
+ vfree(qp->r_rq.wq);
+bail_qp:
+ kfree(qp);
+bail_swq:
+ vfree(swq);
bail:
return ret;
}
@@ -768,15 +876,12 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
struct ipath_ibdev *dev = to_idev(ibqp->device);
unsigned long flags;
- /* Tell the core driver that the kernel SMA is gone. */
- if (qp->ibqp.qp_type == IB_QPT_SMI)
- ipath_layer_set_verbs_flags(dev->dd, 0);
-
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- spin_lock(&qp->s_lock);
+ spin_lock_irqsave(&qp->s_lock, flags);
qp->state = IB_QPS_ERR;
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_lock(&dev->n_qps_lock);
+ dev->n_qps_allocated--;
+ spin_unlock(&dev->n_qps_lock);
/* Stop the sending tasklet. */
tasklet_kill(&qp->s_task);
@@ -797,8 +902,11 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
if (atomic_read(&qp->refcount) != 0)
ipath_free_qp(&dev->qp_table, qp);
+ if (qp->ip)
+ kref_put(&qp->ip->ref, ipath_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
vfree(qp->s_wq);
- vfree(qp->r_rq.wq);
kfree(qp);
return 0;
}
@@ -850,8 +958,8 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
- _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n",
- qp->ibqp.qp_num, qp->remote_qpn, wc->status);
+ ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
+ qp->ibqp.qp_num, qp->remote_qpn, wc->status);
spin_lock(&dev->pending_lock);
/* XXX What if its already removed by the timeout code? */
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 774d1615ce2..a08654042c0 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -32,7 +32,7 @@
*/
#include "ipath_verbs.h"
-#include "ipath_common.h"
+#include "ipath_kernel.h"
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_RC_##x
@@ -540,7 +540,7 @@ static void send_rc_ack(struct ipath_qp *qp)
lrh0 = IPATH_LRH_GRH;
}
/* read pkey_index w/o lock (its atomic) */
- bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+ bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index);
if (qp->r_nak_state)
ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
(qp->r_nak_state <<
@@ -557,7 +557,7 @@ static void send_rc_ack(struct ipath_qp *qp)
hdr.lrh[0] = cpu_to_be16(lrh0);
hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
+ hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
@@ -1323,8 +1323,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
* the eager header buffer size to 56 bytes so the last 4
* bytes of the BTH header (PSN) is in the data buffer.
*/
- header_in_data =
- ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+ header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
if (header_in_data) {
psn = be32_to_cpu(((__be32 *) data)[0]);
data += sizeof(__be32);
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 89df8f5ea99..6e23b3d632b 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -36,8 +36,7 @@
/*
* This file should only be included by kernel source, and by the diags. It
- * defines the registers, and their contents, for the InfiniPath HT-400
- * chip.
+ * defines the registers, and their contents, for InfiniPath chips.
*/
/*
@@ -283,10 +282,12 @@
#define INFINIPATH_XGXS_RESET 0x7ULL
#define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL
#define INFINIPATH_XGXS_MDIOADDR_SHIFT 4
+#define INFINIPATH_XGXS_RX_POL_SHIFT 19
+#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
-/* TID entries (memory), HT400-only */
+/* TID entries (memory), HT-only */
#define INFINIPATH_RT_VALID 0x8000000000000000ULL
#define INFINIPATH_RT_ADDR_SHIFT 0
#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 772bc59fb85..5c1da2d25e0 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -32,7 +32,7 @@
*/
#include "ipath_verbs.h"
-#include "ipath_common.h"
+#include "ipath_kernel.h"
/*
* Convert the AETH RNR timeout code into the number of milliseconds.
@@ -106,6 +106,54 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
spin_unlock_irqrestore(&dev->pending_lock, flags);
}
+static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ int user = to_ipd(qp->ibqp.pd)->user;
+ int i, j, ret;
+ struct ib_wc wc;
+
+ qp->r_len = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ if ((user && wqe->sg_list[i].lkey == 0) ||
+ !ipath_lkey_ok(&dev->lk_table,
+ &qp->r_sg_list[j], &wqe->sg_list[i],
+ IB_ACCESS_LOCAL_WRITE))
+ goto bad_lkey;
+ qp->r_len += wqe->sg_list[i].length;
+ j++;
+ }
+ qp->r_sge.sge = qp->r_sg_list[0];
+ qp->r_sge.sg_list = qp->r_sg_list + 1;
+ qp->r_sge.num_sge = j;
+ ret = 1;
+ goto bail;
+
+bad_lkey:
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.byte_len = 0;
+ wc.imm_data = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = 0;
+ wc.wc_flags = 0;
+ wc.pkey_index = 0;
+ wc.slid = 0;
+ wc.sl = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ /* Signal solicited completion event. */
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ ret = 0;
+bail:
+ return ret;
+}
+
/**
* ipath_get_rwqe - copy the next RWQE into the QP's RWQE
* @qp: the QP
@@ -119,71 +167,71 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
{
unsigned long flags;
struct ipath_rq *rq;
+ struct ipath_rwq *wq;
struct ipath_srq *srq;
struct ipath_rwqe *wqe;
- int ret = 1;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ int ret;
- if (!qp->ibqp.srq) {
+ if (qp->ibqp.srq) {
+ srq = to_isrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
+ rq = &srq->rq;
+ } else {
+ srq = NULL;
+ handler = NULL;
rq = &qp->r_rq;
- spin_lock_irqsave(&rq->lock, flags);
-
- if (unlikely(rq->tail == rq->head)) {
- ret = 0;
- goto done;
- }
- wqe = get_rwqe_ptr(rq, rq->tail);
- qp->r_wr_id = wqe->wr_id;
- if (!wr_id_only) {
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->num_sge;
- qp->r_len = wqe->length;
- }
- if (++rq->tail >= rq->size)
- rq->tail = 0;
- goto done;
}
- srq = to_isrq(qp->ibqp.srq);
- rq = &srq->rq;
spin_lock_irqsave(&rq->lock, flags);
-
- if (unlikely(rq->tail == rq->head)) {
- ret = 0;
- goto done;
- }
- wqe = get_rwqe_ptr(rq, rq->tail);
+ wq = rq->wq;
+ tail = wq->tail;
+ /* Validate tail before using it since it is user writable. */
+ if (tail >= rq->size)
+ tail = 0;
+ do {
+ if (unlikely(tail == wq->head)) {
+ spin_unlock_irqrestore(&rq->lock, flags);
+ ret = 0;
+ goto bail;
+ }
+ wqe = get_rwqe_ptr(rq, tail);
+ if (++tail >= rq->size)
+ tail = 0;
+ } while (!wr_id_only && !init_sge(qp, wqe));
qp->r_wr_id = wqe->wr_id;
- if (!wr_id_only) {
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->num_sge;
- qp->r_len = wqe->length;
- }
- if (++rq->tail >= rq->size)
- rq->tail = 0;
- if (srq->ibsrq.event_handler) {
- struct ib_event ev;
+ wq->tail = tail;
+
+ ret = 1;
+ if (handler) {
u32 n;
- if (rq->head < rq->tail)
- n = rq->size + rq->head - rq->tail;
+ /*
+ * validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
else
- n = rq->head - rq->tail;
+ n -= tail;
if (n < srq->limit) {
+ struct ib_event ev;
+
srq->limit = 0;
spin_unlock_irqrestore(&rq->lock, flags);
ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- srq->ibsrq.event_handler(&ev,
- srq->ibsrq.srq_context);
+ handler(&ev, srq->ibsrq.srq_context);
goto bail;
}
}
-
-done:
spin_unlock_irqrestore(&rq->lock, flags);
+
bail:
return ret;
}
@@ -422,6 +470,15 @@ done:
wake_up(&qp->wait);
}
+static int want_buffer(struct ipath_devdata *dd)
+{
+ set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+
+ return 0;
+}
+
/**
* ipath_no_bufs_available - tell the layer driver we need buffers
* @qp: the QP that caused the problem
@@ -438,7 +495,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
list_add_tail(&qp->piowait, &dev->piowait);
spin_unlock_irqrestore(&dev->pending_lock, flags);
/*
- * Note that as soon as ipath_layer_want_buffer() is called and
+ * Note that as soon as want_buffer() is called and
* possibly before it returns, ipath_ib_piobufavail()
* could be called. If we are still in the tasklet function,
* tasklet_hi_schedule() will not call us until the next time
@@ -448,7 +505,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
*/
clear_bit(IPATH_S_BUSY, &qp->s_flags);
tasklet_unlock(&qp->s_task);
- ipath_layer_want_buffer(dev->dd);
+ want_buffer(dev->dd);
dev->n_piowait++;
}
@@ -563,7 +620,7 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
hdr->hop_limit = grh->hop_limit;
/* The SGID is 32-bit aligned. */
hdr->sgid.global.subnet_prefix = dev->gid_prefix;
- hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd);
+ hdr->sgid.global.interface_id = dev->dd->ipath_guid;
hdr->dgid = grh->dgid;
/* GRH header size in 32-bit words. */
@@ -595,8 +652,7 @@ void ipath_do_ruc_send(unsigned long data)
if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
goto bail;
- if (unlikely(qp->remote_ah_attr.dlid ==
- ipath_layer_get_lid(dev->dd))) {
+ if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
ipath_ruc_loopback(qp);
goto clear;
}
@@ -663,8 +719,8 @@ again:
qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
SIZE_OF_CRC);
- qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
- bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+ qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
+ bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index f760434660b..941e866d951 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -48,66 +48,39 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct ipath_srq *srq = to_isrq(ibsrq);
- struct ipath_ibdev *dev = to_idev(ibsrq->device);
+ struct ipath_rwq *wq;
unsigned long flags;
int ret;
for (; wr; wr = wr->next) {
struct ipath_rwqe *wqe;
u32 next;
- int i, j;
+ int i;
- if (wr->num_sge > srq->rq.max_sge) {
+ if ((unsigned) wr->num_sge > srq->rq.max_sge) {
*bad_wr = wr;
ret = -ENOMEM;
goto bail;
}
spin_lock_irqsave(&srq->rq.lock, flags);
- next = srq->rq.head + 1;
+ wq = srq->rq.wq;
+ next = wq->head + 1;
if (next >= srq->rq.size)
next = 0;
- if (next == srq->rq.tail) {
+ if (next == wq->tail) {
spin_unlock_irqrestore(&srq->rq.lock, flags);
*bad_wr = wr;
ret = -ENOMEM;
goto bail;
}
- wqe = get_rwqe_ptr(&srq->rq, srq->rq.head);
+ wqe = get_rwqe_ptr(&srq->rq, wq->head);
wqe->wr_id = wr->wr_id;
- wqe->sg_list[0].mr = NULL;
- wqe->sg_list[0].vaddr = NULL;
- wqe->sg_list[0].length = 0;
- wqe->sg_list[0].sge_length = 0;
- wqe->length = 0;
- for (i = 0, j = 0; i < wr->num_sge; i++) {
- /* Check LKEY */
- if (to_ipd(srq->ibsrq.pd)->user &&
- wr->sg_list[i].lkey == 0) {
- spin_unlock_irqrestore(&srq->rq.lock,
- flags);
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
- if (wr->sg_list[i].length == 0)
- continue;
- if (!ipath_lkey_ok(&dev->lk_table,
- &wqe->sg_list[j],
- &wr->sg_list[i],
- IB_ACCESS_LOCAL_WRITE)) {
- spin_unlock_irqrestore(&srq->rq.lock,
- flags);
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
- wqe->length += wr->sg_list[i].length;
- j++;
- }
- wqe->num_sge = j;
- srq->rq.head = next;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ wq->head = next;
spin_unlock_irqrestore(&srq->rq.lock, flags);
}
ret = 0;
@@ -133,53 +106,95 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto done;
}
if (srq_init_attr->attr.max_wr == 0) {
ret = ERR_PTR(-EINVAL);
- goto bail;
+ goto done;
}
if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
(srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
ret = ERR_PTR(-EINVAL);
- goto bail;
+ goto done;
}
srq = kmalloc(sizeof(*srq), GFP_KERNEL);
if (!srq) {
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto done;
}
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
srq->rq.size = srq_init_attr->attr.max_wr + 1;
- sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge +
+ srq->rq.max_sge = srq_init_attr->attr.max_sge;
+ sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct ipath_rwqe);
- srq->rq.wq = vmalloc(srq->rq.size * sz);
+ srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
if (!srq->rq.wq) {
- kfree(srq);
ret = ERR_PTR(-ENOMEM);
- goto bail;
+ goto bail_srq;
}
/*
+ * Return the address of the RWQ as the offset to mmap.
+ * See ipath_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ struct ipath_mmap_info *ip;
+ __u64 offset = (__u64) srq->rq.wq;
+ int err;
+
+ err = ib_copy_to_udata(udata, &offset, sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_wq;
+ }
+
+ /* Allocate info for ipath_mmap(). */
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_wq;
+ }
+ srq->ip = ip;
+ ip->context = ibpd->uobject->context;
+ ip->obj = srq->rq.wq;
+ kref_init(&ip->ref);
+ ip->mmap_cnt = 0;
+ ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
+ srq->rq.size * sz);
+ spin_lock_irq(&dev->pending_lock);
+ ip->next = dev->pending_mmaps;
+ dev->pending_mmaps = ip;
+ spin_unlock_irq(&dev->pending_lock);
+ } else
+ srq->ip = NULL;
+
+ /*
* ib_create_srq() will initialize srq->ibsrq.
*/
spin_lock_init(&srq->rq.lock);
- srq->rq.head = 0;
- srq->rq.tail = 0;
+ srq->rq.wq->head = 0;
+ srq->rq.wq->tail = 0;
srq->rq.max_sge = srq_init_attr->attr.max_sge;
srq->limit = srq_init_attr->attr.srq_limit;
+ dev->n_srqs_allocated++;
+
ret = &srq->ibsrq;
+ goto done;
- dev->n_srqs_allocated++;
+bail_wq:
+ vfree(srq->rq.wq);
-bail:
+bail_srq:
+ kfree(srq);
+
+done:
return ret;
}
@@ -188,83 +203,130 @@ bail:
* @ibsrq: the SRQ to modify
* @attr: the new attributes of the SRQ
* @attr_mask: indicates which attributes to modify
+ * @udata: user data for ipathverbs.so
*/
int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask)
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
{
struct ipath_srq *srq = to_isrq(ibsrq);
- unsigned long flags;
- int ret;
+ int ret = 0;
- if (attr_mask & IB_SRQ_MAX_WR)
- if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
- (attr->max_sge > srq->rq.max_sge)) {
- ret = -EINVAL;
- goto bail;
- }
+ if (attr_mask & IB_SRQ_MAX_WR) {
+ struct ipath_rwq *owq;
+ struct ipath_rwq *wq;
+ struct ipath_rwqe *p;
+ u32 sz, size, n, head, tail;
- if (attr_mask & IB_SRQ_LIMIT)
- if (attr->srq_limit >= srq->rq.size) {
+ /* Check that the requested sizes are below the limits. */
+ if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
+ ((attr_mask & IB_SRQ_LIMIT) ?
+ attr->srq_limit : srq->limit) > attr->max_wr) {
ret = -EINVAL;
goto bail;
}
- if (attr_mask & IB_SRQ_MAX_WR) {
- struct ipath_rwqe *wq, *p;
- u32 sz, size, n;
-
sz = sizeof(struct ipath_rwqe) +
- attr->max_sge * sizeof(struct ipath_sge);
+ srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
- wq = vmalloc(size * sz);
+ wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
if (!wq) {
ret = -ENOMEM;
goto bail;
}
- spin_lock_irqsave(&srq->rq.lock, flags);
- if (srq->rq.head < srq->rq.tail)
- n = srq->rq.size + srq->rq.head - srq->rq.tail;
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See ipath_mmap() for details.
+ */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ __u64 offset_addr;
+ __u64 offset = (__u64) wq;
+
+ ret = ib_copy_from_udata(&offset_addr, udata,
+ sizeof(offset_addr));
+ if (ret) {
+ vfree(wq);
+ goto bail;
+ }
+ udata->outbuf = (void __user *) offset_addr;
+ ret = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (ret) {
+ vfree(wq);
+ goto bail;
+ }
+ }
+
+ spin_lock_irq(&srq->rq.lock);
+ /*
+ * validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ owq = srq->rq.wq;
+ head = owq->head;
+ if (head >= srq->rq.size)
+ head = 0;
+ tail = owq->tail;
+ if (tail >= srq->rq.size)
+ tail = 0;
+ n = head;
+ if (n < tail)
+ n += srq->rq.size - tail;
else
- n = srq->rq.head - srq->rq.tail;
- if (size <= n || size <= srq->limit) {
- spin_unlock_irqrestore(&srq->rq.lock, flags);
+ n -= tail;
+ if (size <= n) {
+ spin_unlock_irq(&srq->rq.lock);
vfree(wq);
ret = -EINVAL;
goto bail;
}
n = 0;
- p = wq;
- while (srq->rq.tail != srq->rq.head) {
+ p = wq->wq;
+ while (tail != head) {
struct ipath_rwqe *wqe;
int i;
- wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail);
+ wqe = get_rwqe_ptr(&srq->rq, tail);
p->wr_id = wqe->wr_id;
- p->length = wqe->length;
p->num_sge = wqe->num_sge;
for (i = 0; i < wqe->num_sge; i++)
p->sg_list[i] = wqe->sg_list[i];
n++;
p = (struct ipath_rwqe *)((char *) p + sz);
- if (++srq->rq.tail >= srq->rq.size)
- srq->rq.tail = 0;
+ if (++tail >= srq->rq.size)
+ tail = 0;
}
- vfree(srq->rq.wq);
srq->rq.wq = wq;
srq->rq.size = size;
- srq->rq.head = n;
- srq->rq.tail = 0;
- srq->rq.max_sge = attr->max_sge;
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- }
-
- if (attr_mask & IB_SRQ_LIMIT) {
- spin_lock_irqsave(&srq->rq.lock, flags);
- srq->limit = attr->srq_limit;
- spin_unlock_irqrestore(&srq->rq.lock, flags);
+ wq->head = n;
+ wq->tail = 0;
+ if (attr_mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
+
+ vfree(owq);
+
+ if (srq->ip) {
+ struct ipath_mmap_info *ip = srq->ip;
+ struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
+
+ ip->obj = wq;
+ ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
+ size * sz);
+ spin_lock_irq(&dev->pending_lock);
+ ip->next = dev->pending_mmaps;
+ dev->pending_mmaps = ip;
+ spin_unlock_irq(&dev->pending_lock);
+ }
+ } else if (attr_mask & IB_SRQ_LIMIT) {
+ spin_lock_irq(&srq->rq.lock);
+ if (attr->srq_limit >= srq->rq.size)
+ ret = -EINVAL;
+ else
+ srq->limit = attr->srq_limit;
+ spin_unlock_irq(&srq->rq.lock);
}
- ret = 0;
bail:
return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index 70351b7e35c..30a825928fc 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -271,33 +271,6 @@ void ipath_get_faststats(unsigned long opaque)
}
}
- if (dd->ipath_nosma_bufs) {
- dd->ipath_nosma_secs += 5;
- if (dd->ipath_nosma_secs >= 30) {
- ipath_cdbg(SMA, "No SMA bufs avail %u seconds; "
- "cancelling pending sends\n",
- dd->ipath_nosma_secs);
- /*
- * issue an abort as well, in case we have a packet
- * stuck in launch fifo. This could corrupt an
- * outgoing user packet in the worst case,
- * but this is a pretty catastrophic, anyway.
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- INFINIPATH_S_ABORT);
- ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
- dd->ipath_piobcnt2k +
- dd->ipath_piobcnt4k -
- dd->ipath_lastport_piobuf);
- /* start again, if necessary */
- dd->ipath_nosma_secs = 0;
- } else
- ipath_cdbg(SMA, "No SMA bufs avail %u tries, "
- "after %u seconds\n",
- dd->ipath_nosma_bufs,
- dd->ipath_nosma_secs);
- }
-
done:
mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
}
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index b98821d7801..e299148c4b6 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -35,7 +35,6 @@
#include <linux/pci.h>
#include "ipath_kernel.h"
-#include "ipath_layer.h"
#include "ipath_common.h"
/**
@@ -76,7 +75,7 @@ bail:
static ssize_t show_version(struct device_driver *dev, char *buf)
{
/* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", ipath_core_version);
+ return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version);
}
static ssize_t show_num_units(struct device_driver *dev, char *buf)
@@ -108,8 +107,8 @@ static const char *ipath_status_str[] = {
"Initted",
"Disabled",
"Admin_Disabled",
- "OIB_SMA",
- "SMA",
+ "", /* This used to be the old "OIB_SMA" status. */
+ "", /* This used to be the old "SMA" status. */
"Present",
"IB_link_up",
"IB_configured",
@@ -227,7 +226,6 @@ static ssize_t store_mlid(struct device *dev,
unit = dd->ipath_unit;
dd->ipath_mlid = mlid;
- ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST);
goto bail;
invalid:
@@ -467,7 +465,7 @@ static ssize_t store_link_state(struct device *dev,
if (ret < 0)
goto invalid;
- r = ipath_layer_set_linkstate(dd, state);
+ r = ipath_set_linkstate(dd, state);
if (r < 0) {
ret = r;
goto bail;
@@ -502,7 +500,7 @@ static ssize_t store_mtu(struct device *dev,
if (ret < 0)
goto invalid;
- r = ipath_layer_set_mtu(dd, mtu);
+ r = ipath_set_mtu(dd, mtu);
if (r < 0)
ret = r;
@@ -563,6 +561,33 @@ bail:
return ret;
}
+static ssize_t store_rx_pol_inv(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ int ret, r;
+ u16 val;
+
+ ret = ipath_parse_ushort(buf, &val);
+ if (ret < 0)
+ goto invalid;
+
+ r = ipath_set_rx_pol_inv(dd, val);
+ if (r < 0) {
+ ret = r;
+ goto bail;
+ }
+
+ goto bail;
+invalid:
+ ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n");
+bail:
+ return ret;
+}
+
+
static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
@@ -589,6 +614,7 @@ static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
+static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
static struct attribute *dev_attributes[] = {
&dev_attr_guid.attr,
@@ -603,6 +629,7 @@ static struct attribute *dev_attributes[] = {
&dev_attr_boardversion.attr,
&dev_attr_unit.attr,
&dev_attr_enabled.attr,
+ &dev_attr_rx_pol_inv.attr,
NULL
};
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index c33abea2d5a..0fd3cded16b 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -32,7 +32,7 @@
*/
#include "ipath_verbs.h"
-#include "ipath_common.h"
+#include "ipath_kernel.h"
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_UC_##x
@@ -261,8 +261,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
* size to 56 bytes so the last 4 bytes of
* the BTH header (PSN) is in the data buffer.
*/
- header_in_data =
- ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+ header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
if (header_in_data) {
psn = be32_to_cpu(((__be32 *) data)[0]);
data += sizeof(__be32);
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 3466129af80..6991d1d74e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -34,7 +34,54 @@
#include <rdma/ib_smi.h>
#include "ipath_verbs.h"
-#include "ipath_common.h"
+#include "ipath_kernel.h"
+
+static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
+ u32 *lengthp, struct ipath_sge_state *ss)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ int user = to_ipd(qp->ibqp.pd)->user;
+ int i, j, ret;
+ struct ib_wc wc;
+
+ *lengthp = 0;
+ for (i = j = 0; i < wqe->num_sge; i++) {
+ if (wqe->sg_list[i].length == 0)
+ continue;
+ /* Check LKEY */
+ if ((user && wqe->sg_list[i].lkey == 0) ||
+ !ipath_lkey_ok(&dev->lk_table,
+ j ? &ss->sg_list[j - 1] : &ss->sge,
+ &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+ goto bad_lkey;
+ *lengthp += wqe->sg_list[i].length;
+ j++;
+ }
+ ss->num_sge = j;
+ ret = 1;
+ goto bail;
+
+bad_lkey:
+ wc.wr_id = wqe->wr_id;
+ wc.status = IB_WC_LOC_PROT_ERR;
+ wc.opcode = IB_WC_RECV;
+ wc.vendor_err = 0;
+ wc.byte_len = 0;
+ wc.imm_data = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = 0;
+ wc.wc_flags = 0;
+ wc.pkey_index = 0;
+ wc.slid = 0;
+ wc.sl = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+ /* Signal solicited completion event. */
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ ret = 0;
+bail:
+ return ret;
+}
/**
* ipath_ud_loopback - handle send on loopback QPs
@@ -46,6 +93,8 @@
*
* This is called from ipath_post_ud_send() to forward a WQE addressed
* to the same HCA.
+ * Note that the receive interrupt handler may be calling ipath_ud_rcv()
+ * while this is being called.
*/
static void ipath_ud_loopback(struct ipath_qp *sqp,
struct ipath_sge_state *ss,
@@ -60,7 +109,11 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
struct ipath_srq *srq;
struct ipath_sge_state rsge;
struct ipath_sge *sge;
+ struct ipath_rwq *wq;
struct ipath_rwqe *wqe;
+ void (*handler)(struct ib_event *, void *);
+ u32 tail;
+ u32 rlen;
qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn);
if (!qp)
@@ -94,6 +147,13 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
wc->imm_data = 0;
}
+ if (wr->num_sge > 1) {
+ rsge.sg_list = kmalloc((wr->num_sge - 1) *
+ sizeof(struct ipath_sge),
+ GFP_ATOMIC);
+ } else
+ rsge.sg_list = NULL;
+
/*
* Get the next work request entry to find where to put the data.
* Note that it is safe to drop the lock after changing rq->tail
@@ -101,37 +161,52 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
*/
if (qp->ibqp.srq) {
srq = to_isrq(qp->ibqp.srq);
+ handler = srq->ibsrq.event_handler;
rq = &srq->rq;
} else {
srq = NULL;
+ handler = NULL;
rq = &qp->r_rq;
}
+
spin_lock_irqsave(&rq->lock, flags);
- if (rq->tail == rq->head) {
- spin_unlock_irqrestore(&rq->lock, flags);
- dev->n_pkt_drops++;
- goto done;
+ wq = rq->wq;
+ tail = wq->tail;
+ while (1) {
+ if (unlikely(tail == wq->head)) {
+ spin_unlock_irqrestore(&rq->lock, flags);
+ dev->n_pkt_drops++;
+ goto bail_sge;
+ }
+ wqe = get_rwqe_ptr(rq, tail);
+ if (++tail >= rq->size)
+ tail = 0;
+ if (init_sge(qp, wqe, &rlen, &rsge))
+ break;
+ wq->tail = tail;
}
/* Silently drop packets which are too big. */
- wqe = get_rwqe_ptr(rq, rq->tail);
- if (wc->byte_len > wqe->length) {
+ if (wc->byte_len > rlen) {
spin_unlock_irqrestore(&rq->lock, flags);
dev->n_pkt_drops++;
- goto done;
+ goto bail_sge;
}
+ wq->tail = tail;
wc->wr_id = wqe->wr_id;
- rsge.sge = wqe->sg_list[0];
- rsge.sg_list = wqe->sg_list + 1;
- rsge.num_sge = wqe->num_sge;
- if (++rq->tail >= rq->size)
- rq->tail = 0;
- if (srq && srq->ibsrq.event_handler) {
+ if (handler) {
u32 n;
- if (rq->head < rq->tail)
- n = rq->size + rq->head - rq->tail;
+ /*
+ * validate head pointer value and compute
+ * the number of remaining WQEs.
+ */
+ n = wq->head;
+ if (n >= rq->size)
+ n = 0;
+ if (n < tail)
+ n += rq->size - tail;
else
- n = rq->head - rq->tail;
+ n -= tail;
if (n < srq->limit) {
struct ib_event ev;
@@ -140,12 +215,12 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
ev.device = qp->ibqp.device;
ev.element.srq = qp->ibqp.srq;
ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- srq->ibsrq.event_handler(&ev,
- srq->ibsrq.srq_context);
+ handler(&ev, srq->ibsrq.srq_context);
} else
spin_unlock_irqrestore(&rq->lock, flags);
} else
spin_unlock_irqrestore(&rq->lock, flags);
+
ah_attr = &to_iah(wr->wr.ud.ah)->attr;
if (ah_attr->ah_flags & IB_AH_GRH) {
ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
@@ -186,7 +261,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
wc->src_qp = sqp->ibqp.qp_num;
/* XXX do we know which pkey matched? Only needed for GSI. */
wc->pkey_index = 0;
- wc->slid = ipath_layer_get_lid(dev->dd) |
+ wc->slid = dev->dd->ipath_lid |
(ah_attr->src_path_bits &
((1 << (dev->mkeyprot_resv_lmc & 7)) - 1));
wc->sl = ah_attr->sl;
@@ -196,6 +271,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
wr->send_flags & IB_SEND_SOLICITED);
+bail_sge:
+ kfree(rsge.sg_list);
done:
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
@@ -276,7 +353,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
ss.num_sge++;
}
/* Check for invalid packet size. */
- if (len > ipath_layer_get_ibmtu(dev->dd)) {
+ if (len > dev->dd->ipath_ibmtu) {
ret = -EINVAL;
goto bail;
}
@@ -298,7 +375,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
dev->n_unicast_xmit++;
lid = ah_attr->dlid &
~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
- if (unlikely(lid == ipath_layer_get_lid(dev->dd))) {
+ if (unlikely(lid == dev->dd->ipath_lid)) {
/*
* Pass in an uninitialized ib_wc to save stack
* space.
@@ -327,7 +404,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
dev->gid_prefix;
qp->s_hdr.u.l.grh.sgid.global.interface_id =
- ipath_layer_get_guid(dev->dd);
+ dev->dd->ipath_guid;
qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid;
/*
* Don't worry about sending to locally attached multicast
@@ -357,7 +434,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
- lid = ipath_layer_get_lid(dev->dd);
+ lid = dev->dd->ipath_lid;
if (lid) {
lid |= ah_attr->src_path_bits &
((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
@@ -368,7 +445,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
bth0 |= 1 << 23;
bth0 |= extra_bytes << 20;
bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
- ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
+ ipath_get_pkey(dev->dd, qp->s_pkey_index);
ohdr->bth[0] = cpu_to_be32(bth0);
/*
* Use the multicast QP if the destination LID is a multicast LID.
@@ -433,13 +510,9 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
int opcode;
u32 hdrsize;
u32 pad;
- unsigned long flags;
struct ib_wc wc;
u32 qkey;
u32 src_qp;
- struct ipath_rq *rq;
- struct ipath_srq *srq;
- struct ipath_rwqe *wqe;
u16 dlid;
int header_in_data;
@@ -458,8 +531,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
* the eager header buffer size to 56 bytes so the last 12
* bytes of the IB header is in the data buffer.
*/
- header_in_data =
- ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
+ header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
if (header_in_data) {
qkey = be32_to_cpu(((__be32 *) data)[1]);
src_qp = be32_to_cpu(((__be32 *) data)[2]);
@@ -547,19 +619,10 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
/*
* Get the next work request entry to find where to put the data.
- * Note that it is safe to drop the lock after changing rq->tail
- * since ipath_post_receive() won't fill the empty slot.
*/
- if (qp->ibqp.srq) {
- srq = to_isrq(qp->ibqp.srq);
- rq = &srq->rq;
- } else {
- srq = NULL;
- rq = &qp->r_rq;
- }
- spin_lock_irqsave(&rq->lock, flags);
- if (rq->tail == rq->head) {
- spin_unlock_irqrestore(&rq->lock, flags);
+ if (qp->r_reuse_sge)
+ qp->r_reuse_sge = 0;
+ else if (!ipath_get_rwqe(qp, 0)) {
/*
* Count VL15 packets dropped due to no receive buffer.
* Otherwise, count them as buffer overruns since usually,
@@ -573,39 +636,11 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
goto bail;
}
/* Silently drop packets which are too big. */
- wqe = get_rwqe_ptr(rq, rq->tail);
- if (wc.byte_len > wqe->length) {
- spin_unlock_irqrestore(&rq->lock, flags);
+ if (wc.byte_len > qp->r_len) {
+ qp->r_reuse_sge = 1;
dev->n_pkt_drops++;
goto bail;
}
- wc.wr_id = wqe->wr_id;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->num_sge;
- if (++rq->tail >= rq->size)
- rq->tail = 0;
- if (srq && srq->ibsrq.event_handler) {
- u32 n;
-
- if (rq->head < rq->tail)
- n = rq->size + rq->head - rq->tail;
- else
- n = rq->head - rq->tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- srq->ibsrq.event_handler(&ev,
- srq->ibsrq.srq_context);
- } else
- spin_unlock_irqrestore(&rq->lock, flags);
- } else
- spin_unlock_irqrestore(&rq->lock, flags);
if (has_grh) {
ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
sizeof(struct ib_grh));
@@ -614,6 +649,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
ipath_copy_sge(&qp->r_sge, data,
wc.byte_len - sizeof(struct ib_grh));
+ wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
wc.vendor_err = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index d70a9b6b523..b8381c5e72b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -33,15 +33,13 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_user_verbs.h>
+#include <linux/io.h>
#include <linux/utsname.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
#include "ipath_common.h"
-/* Not static, because we don't want the compiler removing it */
-const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
-
static unsigned int ib_ipath_qp_table_size = 251;
module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
@@ -52,10 +50,6 @@ module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
MODULE_PARM_DESC(lkey_table_size,
"LKEY table size in bits (2^n, 1 <= n <= 23)");
-unsigned int ib_ipath_debug; /* debug mask */
-module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "Verbs debug mask");
-
static unsigned int ib_ipath_max_pds = 0xFFFF;
module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_pds,
@@ -79,6 +73,10 @@ module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
+unsigned int ib_ipath_max_qps = 16384;
+module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
+
unsigned int ib_ipath_max_sges = 0x60;
module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
@@ -109,9 +107,9 @@ module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("QLogic <support@pathscale.com>");
-MODULE_DESCRIPTION("QLogic InfiniPath driver");
+static unsigned int ib_ipath_disable_sma;
+module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA");
const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = 0,
@@ -125,6 +123,16 @@ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
[IB_QPS_ERR] = 0,
};
+struct ipath_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct ipath_ucontext, ibucontext);
+}
+
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
@@ -277,11 +285,12 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct ipath_qp *qp = to_iqp(ibqp);
+ struct ipath_rwq *wq = qp->r_rq.wq;
unsigned long flags;
int ret;
/* Check that state is OK to post receive. */
- if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) {
+ if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
*bad_wr = wr;
ret = -EINVAL;
goto bail;
@@ -290,59 +299,31 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
for (; wr; wr = wr->next) {
struct ipath_rwqe *wqe;
u32 next;
- int i, j;
+ int i;
- if (wr->num_sge > qp->r_rq.max_sge) {
+ if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
*bad_wr = wr;
ret = -ENOMEM;
goto bail;
}
spin_lock_irqsave(&qp->r_rq.lock, flags);
- next = qp->r_rq.head + 1;
+ next = wq->head + 1;
if (next >= qp->r_rq.size)
next = 0;
- if (next == qp->r_rq.tail) {
+ if (next == wq->tail) {
spin_unlock_irqrestore(&qp->r_rq.lock, flags);
*bad_wr = wr;
ret = -ENOMEM;
goto bail;
}
- wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head);
+ wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
wqe->wr_id = wr->wr_id;
- wqe->sg_list[0].mr = NULL;
- wqe->sg_list[0].vaddr = NULL;
- wqe->sg_list[0].length = 0;
- wqe->sg_list[0].sge_length = 0;
- wqe->length = 0;
- for (i = 0, j = 0; i < wr->num_sge; i++) {
- /* Check LKEY */
- if (to_ipd(qp->ibqp.pd)->user &&
- wr->sg_list[i].lkey == 0) {
- spin_unlock_irqrestore(&qp->r_rq.lock,
- flags);
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
- if (wr->sg_list[i].length == 0)
- continue;
- if (!ipath_lkey_ok(
- &to_idev(qp->ibqp.device)->lk_table,
- &wqe->sg_list[j], &wr->sg_list[i],
- IB_ACCESS_LOCAL_WRITE)) {
- spin_unlock_irqrestore(&qp->r_rq.lock,
- flags);
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
- wqe->length += wr->sg_list[i].length;
- j++;
- }
- wqe->num_sge = j;
- qp->r_rq.head = next;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ wq->head = next;
spin_unlock_irqrestore(&qp->r_rq.lock, flags);
}
ret = 0;
@@ -377,6 +358,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
switch (qp->ibqp.qp_type) {
case IB_QPT_SMI:
case IB_QPT_GSI:
+ if (ib_ipath_disable_sma)
+ break;
+ /* FALLTHROUGH */
case IB_QPT_UD:
ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
break;
@@ -395,7 +379,7 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
}
/**
- * ipath_ib_rcv - process and incoming packet
+ * ipath_ib_rcv - process an incoming packet
* @arg: the device pointer
* @rhdr: the header of the packet
* @data: the packet data
@@ -404,9 +388,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
* This is called from ipath_kreceive() to process an incoming packet at
* interrupt level. Tlen is the length of the header + data + CRC in bytes.
*/
-static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen)
+void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
+ u32 tlen)
{
- struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
struct ipath_ib_header *hdr = rhdr;
struct ipath_other_headers *ohdr;
struct ipath_qp *qp;
@@ -427,7 +411,7 @@ static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen)
lid = be16_to_cpu(hdr->lrh[1]);
if (lid < IPATH_MULTICAST_LID_BASE) {
lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
- if (unlikely(lid != ipath_layer_get_lid(dev->dd))) {
+ if (unlikely(lid != dev->dd->ipath_lid)) {
dev->rcv_errors++;
goto bail;
}
@@ -495,9 +479,8 @@ bail:;
* This is called from ipath_do_rcv_timer() at interrupt level to check for
* QPs which need retransmits and to collect performance numbers.
*/
-static void ipath_ib_timer(void *arg)
+void ipath_ib_timer(struct ipath_ibdev *dev)
{
- struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
struct ipath_qp *resend = NULL;
struct list_head *last;
struct ipath_qp *qp;
@@ -539,19 +522,19 @@ static void ipath_ib_timer(void *arg)
if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
--dev->pma_sample_start == 0) {
dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
- ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword,
- &dev->ipath_rword,
- &dev->ipath_spkts,
- &dev->ipath_rpkts,
- &dev->ipath_xmit_wait);
+ ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
+ &dev->ipath_rword,
+ &dev->ipath_spkts,
+ &dev->ipath_rpkts,
+ &dev->ipath_xmit_wait);
}
if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
if (dev->pma_sample_interval == 0) {
u64 ta, tb, tc, td, te;
dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- ipath_layer_snapshot_counters(dev->dd, &ta, &tb,
- &tc, &td, &te);
+ ipath_snapshot_counters(dev->dd, &ta, &tb,
+ &tc, &td, &te);
dev->ipath_sword = ta - dev->ipath_sword;
dev->ipath_rword = tb - dev->ipath_rword;
@@ -581,6 +564,362 @@ static void ipath_ib_timer(void *arg)
}
}
+static void update_sge(struct ipath_sge_state *ss, u32 length)
+{
+ struct ipath_sge *sge = &ss->sge;
+
+ sge->vaddr += length;
+ sge->length -= length;
+ sge->sge_length -= length;
+ if (sge->sge_length == 0) {
+ if (--ss->num_sge)
+ *sge = *ss->sg_list++;
+ } else if (sge->length == 0 && sge->mr != NULL) {
+ if (++sge->n >= IPATH_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ return;
+ sge->n = 0;
+ }
+ sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+}
+
+#ifdef __LITTLE_ENDIAN
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#else
+static inline u32 get_upper_bits(u32 data, u32 shift)
+{
+ return data << shift;
+}
+
+static inline u32 set_upper_bits(u32 data, u32 shift)
+{
+ return data >> shift;
+}
+
+static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
+{
+ data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
+ data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
+ return data;
+}
+#endif
+
+static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
+ u32 length)
+{
+ u32 extra = 0;
+ u32 data = 0;
+ u32 last;
+
+ while (1) {
+ u32 len = ss->sge.length;
+ u32 off;
+
+ BUG_ON(len == 0);
+ if (len > length)
+ len = length;
+ if (len > ss->sge.sge_length)
+ len = ss->sge.sge_length;
+ /* If the source address is not aligned, try to align it. */
+ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
+ if (off) {
+ u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
+ ~(sizeof(u32) - 1));
+ u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
+ u32 y;
+
+ y = sizeof(u32) - off;
+ if (len > y)
+ len = y;
+ if (len + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, extra *
+ BITS_PER_BYTE);
+ len = sizeof(u32) - extra;
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, len, extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += len;
+ }
+ } else if (extra) {
+ /* Source address is aligned. */
+ u32 *addr = (u32 *) ss->sge.vaddr;
+ int shift = extra * BITS_PER_BYTE;
+ int ushift = 32 - shift;
+ u32 l = len;
+
+ while (l >= sizeof(u32)) {
+ u32 v = *addr;
+
+ data |= set_upper_bits(v, shift);
+ __raw_writel(data, piobuf);
+ data = get_upper_bits(v, ushift);
+ piobuf++;
+ addr++;
+ l -= sizeof(u32);
+ }
+ /*
+ * We still have 'extra' number of bytes leftover.
+ */
+ if (l) {
+ u32 v = *addr;
+
+ if (l + extra >= sizeof(u32)) {
+ data |= set_upper_bits(v, shift);
+ len -= l + extra - sizeof(u32);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ __raw_writel(data, piobuf);
+ piobuf++;
+ extra = 0;
+ data = 0;
+ } else {
+ /* Clear unused upper bytes */
+ data |= clear_upper_bytes(v, l,
+ extra);
+ if (len == length) {
+ last = data;
+ break;
+ }
+ extra += l;
+ }
+ } else if (len == length) {
+ last = data;
+ break;
+ }
+ } else if (len == length) {
+ u32 w;
+
+ /*
+ * Need to round up for the last dword in the
+ * packet.
+ */
+ w = (len + 3) >> 2;
+ __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
+ piobuf += w - 1;
+ last = ((u32 *) ss->sge.vaddr)[w - 1];
+ break;
+ } else {
+ u32 w = len >> 2;
+
+ __iowrite32_copy(piobuf, ss->sge.vaddr, w);
+ piobuf += w;
+
+ extra = len & (sizeof(u32) - 1);
+ if (extra) {
+ u32 v = ((u32 *) ss->sge.vaddr)[w];
+
+ /* Clear unused upper bytes */
+ data = clear_upper_bytes(v, extra, 0);
+ }
+ }
+ update_sge(ss, len);
+ length -= len;
+ }
+ /* Update address before sending packet. */
+ update_sge(ss, length);
+ /* must flush early everything before trigger word */
+ ipath_flush_wc();
+ __raw_writel(last, piobuf);
+ /* be sure trigger word is written */
+ ipath_flush_wc();
+}
+
+/**
+ * ipath_verbs_send - send a packet
+ * @dd: the infinipath device
+ * @hdrwords: the number of words in the header
+ * @hdr: the packet header
+ * @len: the length of the packet in bytes
+ * @ss: the SGE to send
+ */
+int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
+ u32 *hdr, u32 len, struct ipath_sge_state *ss)
+{
+ u32 __iomem *piobuf;
+ u32 plen;
+ int ret;
+
+ /* +1 is for the qword padding of pbc */
+ plen = hdrwords + ((len + 3) >> 2) + 1;
+ if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
+ ipath_dbg("packet len 0x%x too long, failing\n", plen);
+ ret = -EINVAL;
+ goto bail;
+ }
+
+ /* Get a PIO buffer to use. */
+ piobuf = ipath_getpiobuf(dd, NULL);
+ if (unlikely(piobuf == NULL)) {
+ ret = -EBUSY;
+ goto bail;
+ }
+
+ /*
+ * Write len to control qword, no flags.
+ * We have to flush after the PBC for correctness on some cpus
+ * or WC buffer can be written out of order.
+ */
+ writeq(plen, piobuf);
+ ipath_flush_wc();
+ piobuf += 2;
+ if (len == 0) {
+ /*
+ * If there is just the header portion, must flush before
+ * writing last word of header for correctness, and after
+ * the last header word (trigger word).
+ */
+ __iowrite32_copy(piobuf, hdr, hdrwords - 1);
+ ipath_flush_wc();
+ __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
+ ipath_flush_wc();
+ ret = 0;
+ goto bail;
+ }
+
+ __iowrite32_copy(piobuf, hdr, hdrwords);
+ piobuf += hdrwords;
+
+ /* The common case is aligned and contained in one segment. */
+ if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
+ !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
+ u32 w;
+ u32 *addr = (u32 *) ss->sge.vaddr;
+
+ /* Update address before sending packet. */
+ update_sge(ss, len);
+ /* Need to round up for the last dword in the packet. */
+ w = (len + 3) >> 2;
+ __iowrite32_copy(piobuf, addr, w - 1);
+ /* must flush early everything before trigger word */
+ ipath_flush_wc();
+ __raw_writel(addr[w - 1], piobuf + w - 1);
+ /* be sure trigger word is written */
+ ipath_flush_wc();
+ ret = 0;
+ goto bail;
+ }
+ copy_io(piobuf, ss, len);
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait)
+{
+ int ret;
+
+ if (!(dd->ipath_flags & IPATH_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ipath_dbg("unit %u not usable\n", dd->ipath_unit);
+ ret = -EINVAL;
+ goto bail;
+ }
+ *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
+ *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
+ *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
+ *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+ *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
+/**
+ * ipath_get_counters - get various chip counters
+ * @dd: the infinipath device
+ * @cntrs: counters are placed here
+ *
+ * Return the counters needed by recv_pma_get_portcounters().
+ */
+int ipath_get_counters(struct ipath_devdata *dd,
+ struct ipath_verbs_counters *cntrs)
+{
+ int ret;
+
+ if (!(dd->ipath_flags & IPATH_INITTED)) {
+ /* no hardware, freeze, etc. */
+ ipath_dbg("unit %u not usable\n", dd->ipath_unit);
+ ret = -EINVAL;
+ goto bail;
+ }
+ cntrs->symbol_error_counter =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
+ cntrs->link_error_recovery_counter =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
+ /*
+ * The link downed counter counts when the other side downs the
+ * connection. We add in the number of times we downed the link
+ * due to local link integrity errors to compensate.
+ */
+ cntrs->link_downed_counter =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
+ cntrs->port_rcv_errors =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
+ cntrs->port_rcv_remphys_errors =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
+ cntrs->port_xmit_discards =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
+ cntrs->port_xmit_data =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
+ cntrs->port_rcv_data =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
+ cntrs->port_xmit_packets =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
+ cntrs->port_rcv_packets =
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
+ cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
+ cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
+
+ ret = 0;
+
+bail:
+ return ret;
+}
+
/**
* ipath_ib_piobufavail - callback when a PIO buffer is available
* @arg: the device pointer
@@ -591,9 +930,8 @@ static void ipath_ib_timer(void *arg)
* QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
* return zero).
*/
-static int ipath_ib_piobufavail(void *arg)
+int ipath_ib_piobufavail(struct ipath_ibdev *dev)
{
- struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
struct ipath_qp *qp;
unsigned long flags;
@@ -624,14 +962,14 @@ static int ipath_query_device(struct ib_device *ibdev,
IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID;
props->page_size_cap = PAGE_SIZE;
- props->vendor_id = ipath_layer_get_vendorid(dev->dd);
- props->vendor_part_id = ipath_layer_get_deviceid(dev->dd);
- props->hw_ver = ipath_layer_get_pcirev(dev->dd);
+ props->vendor_id = dev->dd->ipath_vendorid;
+ props->vendor_part_id = dev->dd->ipath_deviceid;
+ props->hw_ver = dev->dd->ipath_pcirev;
props->sys_image_guid = dev->sys_image_guid;
props->max_mr_size = ~0ull;
- props->max_qp = dev->qp_table.max;
+ props->max_qp = ib_ipath_max_qps;
props->max_qp_wr = ib_ipath_max_qp_wrs;
props->max_sge = ib_ipath_max_sges;
props->max_cq = ib_ipath_max_cqs;
@@ -647,7 +985,7 @@ static int ipath_query_device(struct ib_device *ibdev,
props->max_srq_sge = ib_ipath_max_srq_sges;
/* props->local_ca_ack_delay */
props->atomic_cap = IB_ATOMIC_HCA;
- props->max_pkeys = ipath_layer_get_npkeys(dev->dd);
+ props->max_pkeys = ipath_get_npkeys(dev->dd);
props->max_mcast_grp = ib_ipath_max_mcast_grps;
props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -672,12 +1010,17 @@ const u8 ipath_cvt_physportstate[16] = {
[INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
};
+u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
+{
+ return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
+}
+
static int ipath_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
struct ipath_ibdev *dev = to_idev(ibdev);
enum ib_mtu mtu;
- u16 lid = ipath_layer_get_lid(dev->dd);
+ u16 lid = dev->dd->ipath_lid;
u64 ibcstat;
memset(props, 0, sizeof(*props));
@@ -685,16 +1028,16 @@ static int ipath_query_port(struct ib_device *ibdev,
props->lmc = dev->mkeyprot_resv_lmc & 7;
props->sm_lid = dev->sm_lid;
props->sm_sl = dev->sm_sl;
- ibcstat = ipath_layer_get_lastibcstat(dev->dd);
+ ibcstat = dev->dd->ipath_lastibcstat;
props->state = ((ibcstat >> 4) & 0x3) + 1;
/* See phys_state_show() */
props->phys_state = ipath_cvt_physportstate[
- ipath_layer_get_lastibcstat(dev->dd) & 0xf];
+ dev->dd->ipath_lastibcstat & 0xf];
props->port_cap_flags = dev->port_cap_flags;
props->gid_tbl_len = 1;
props->max_msg_sz = 0x80000000;
- props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd);
- props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) -
+ props->pkey_tbl_len = ipath_get_npkeys(dev->dd);
+ props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) -
dev->z_pkey_violations;
props->qkey_viol_cntr = dev->qkey_violations;
props->active_width = IB_WIDTH_4X;
@@ -704,7 +1047,7 @@ static int ipath_query_port(struct ib_device *ibdev,
props->init_type_reply = 0;
props->max_mtu = IB_MTU_4096;
- switch (ipath_layer_get_ibmtu(dev->dd)) {
+ switch (dev->dd->ipath_ibmtu) {
case 4096:
mtu = IB_MTU_4096;
break;
@@ -763,7 +1106,7 @@ static int ipath_modify_port(struct ib_device *ibdev,
dev->port_cap_flags |= props->set_port_cap_mask;
dev->port_cap_flags &= ~props->clr_port_cap_mask;
if (port_modify_mask & IB_PORT_SHUTDOWN)
- ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
+ ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
dev->qkey_violations = 0;
return 0;
@@ -780,7 +1123,7 @@ static int ipath_query_gid(struct ib_device *ibdev, u8 port,
goto bail;
}
gid->global.subnet_prefix = dev->gid_prefix;
- gid->global.interface_id = ipath_layer_get_guid(dev->dd);
+ gid->global.interface_id = dev->dd->ipath_guid;
ret = 0;
@@ -803,18 +1146,22 @@ static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
* we allow allocations of more than we report for this value.
*/
- if (dev->n_pds_allocated == ib_ipath_max_pds) {
+ pd = kmalloc(sizeof *pd, GFP_KERNEL);
+ if (!pd) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
- pd = kmalloc(sizeof *pd, GFP_KERNEL);
- if (!pd) {
+ spin_lock(&dev->n_pds_lock);
+ if (dev->n_pds_allocated == ib_ipath_max_pds) {
+ spin_unlock(&dev->n_pds_lock);
+ kfree(pd);
ret = ERR_PTR(-ENOMEM);
goto bail;
}
dev->n_pds_allocated++;
+ spin_unlock(&dev->n_pds_lock);
/* ib_alloc_pd() will initialize pd->ibpd. */
pd->user = udata != NULL;
@@ -830,7 +1177,9 @@ static int ipath_dealloc_pd(struct ib_pd *ibpd)
struct ipath_pd *pd = to_ipd(ibpd);
struct ipath_ibdev *dev = to_idev(ibpd->device);
+ spin_lock(&dev->n_pds_lock);
dev->n_pds_allocated--;
+ spin_unlock(&dev->n_pds_lock);
kfree(pd);
@@ -851,11 +1200,6 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
struct ib_ah *ret;
struct ipath_ibdev *dev = to_idev(pd->device);
- if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
/* A multicast address requires a GRH (see ch. 8.4.1). */
if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
ah_attr->dlid != IPATH_PERMISSIVE_LID &&
@@ -881,7 +1225,16 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
goto bail;
}
+ spin_lock(&dev->n_ahs_lock);
+ if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
+ spin_unlock(&dev->n_ahs_lock);
+ kfree(ah);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
dev->n_ahs_allocated++;
+ spin_unlock(&dev->n_ahs_lock);
/* ib_create_ah() will initialize ah->ibah. */
ah->attr = *ah_attr;
@@ -903,7 +1256,9 @@ static int ipath_destroy_ah(struct ib_ah *ibah)
struct ipath_ibdev *dev = to_idev(ibah->device);
struct ipath_ah *ah = to_iah(ibah);
+ spin_lock(&dev->n_ahs_lock);
dev->n_ahs_allocated--;
+ spin_unlock(&dev->n_ahs_lock);
kfree(ah);
@@ -919,25 +1274,50 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
return 0;
}
+/**
+ * ipath_get_npkeys - return the size of the PKEY table for port 0
+ * @dd: the infinipath device
+ */
+unsigned ipath_get_npkeys(struct ipath_devdata *dd)
+{
+ return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
+}
+
+/**
+ * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
+ * @dd: the infinipath device
+ * @index: the PKEY index
+ */
+unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
+{
+ unsigned ret;
+
+ if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
+ ret = 0;
+ else
+ ret = dd->ipath_pd[0]->port_pkeys[index];
+
+ return ret;
+}
+
static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
struct ipath_ibdev *dev = to_idev(ibdev);
int ret;
- if (index >= ipath_layer_get_npkeys(dev->dd)) {
+ if (index >= ipath_get_npkeys(dev->dd)) {
ret = -EINVAL;
goto bail;
}
- *pkey = ipath_layer_get_pkey(dev->dd, index);
+ *pkey = ipath_get_pkey(dev->dd, index);
ret = 0;
bail:
return ret;
}
-
/**
* ipath_alloc_ucontext - allocate a ucontest
* @ibdev: the infiniband device
@@ -970,26 +1350,91 @@ static int ipath_dealloc_ucontext(struct ib_ucontext *context)
static int ipath_verbs_register_sysfs(struct ib_device *dev);
+static void __verbs_timer(unsigned long arg)
+{
+ struct ipath_devdata *dd = (struct ipath_devdata *) arg;
+
+ /*
+ * If port 0 receive packet interrupts are not available, or
+ * can be missed, poll the receive queue
+ */
+ if (dd->ipath_flags & IPATH_POLL_RX_INTR)
+ ipath_kreceive(dd);
+
+ /* Handle verbs layer timeouts. */
+ ipath_ib_timer(dd->verbs_dev);
+
+ mod_timer(&dd->verbs_timer, jiffies + 1);
+}
+
+static int enable_timer(struct ipath_devdata *dd)
+{
+ /*
+ * Early chips had a design flaw where the chip and kernel idea
+ * of the tail register don't always agree, and therefore we won't
+ * get an interrupt on the next packet received.
+ * If the board supports per packet receive interrupts, use it.
+ * Otherwise, the timer function periodically checks for packets
+ * to cover this case.
+ * Either way, the timer is needed for verbs layer related
+ * processing.
+ */
+ if (dd->ipath_flags & IPATH_GPIO_INTR) {
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
+ 0x2074076542310ULL);
+ /* Enable GPIO bit 2 interrupt */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
+ (u64) (1 << 2));
+ }
+
+ init_timer(&dd->verbs_timer);
+ dd->verbs_timer.function = __verbs_timer;
+ dd->verbs_timer.data = (unsigned long)dd;
+ dd->verbs_timer.expires = jiffies + 1;
+ add_timer(&dd->verbs_timer);
+
+ return 0;
+}
+
+static int disable_timer(struct ipath_devdata *dd)
+{
+ /* Disable GPIO bit 2 interrupt */
+ if (dd->ipath_flags & IPATH_GPIO_INTR)
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
+
+ del_timer_sync(&dd->verbs_timer);
+
+ return 0;
+}
+
/**
* ipath_register_ib_device - register our device with the infiniband core
- * @unit: the device number to register
* @dd: the device data structure
* Return the allocated ipath_ibdev pointer or NULL on error.
*/
-static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
+int ipath_register_ib_device(struct ipath_devdata *dd)
{
- struct ipath_layer_counters cntrs;
+ struct ipath_verbs_counters cntrs;
struct ipath_ibdev *idev;
struct ib_device *dev;
int ret;
idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
- if (idev == NULL)
+ if (idev == NULL) {
+ ret = -ENOMEM;
goto bail;
+ }
dev = &idev->ibdev;
/* Only need to initialize non-zero fields. */
+ spin_lock_init(&idev->n_pds_lock);
+ spin_lock_init(&idev->n_ahs_lock);
+ spin_lock_init(&idev->n_cqs_lock);
+ spin_lock_init(&idev->n_qps_lock);
+ spin_lock_init(&idev->n_srqs_lock);
+ spin_lock_init(&idev->n_mcast_grps_lock);
+
spin_lock_init(&idev->qp_table.lock);
spin_lock_init(&idev->lk_table.lock);
idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
@@ -1030,7 +1475,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
idev->link_width_enabled = 3; /* 1x or 4x */
/* Snapshot current HW counters to "clear" them. */
- ipath_layer_get_counters(dd, &cntrs);
+ ipath_get_counters(dd, &cntrs);
idev->z_symbol_error_counter = cntrs.symbol_error_counter;
idev->z_link_error_recovery_counter =
cntrs.link_error_recovery_counter;
@@ -1054,14 +1499,14 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
* device types in the system, we can't be sure this is unique.
*/
if (!sys_image_guid)
- sys_image_guid = ipath_layer_get_guid(dd);
+ sys_image_guid = dd->ipath_guid;
idev->sys_image_guid = sys_image_guid;
- idev->ib_unit = unit;
+ idev->ib_unit = dd->ipath_unit;
idev->dd = dd;
strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
dev->owner = THIS_MODULE;
- dev->node_guid = ipath_layer_get_guid(dd);
+ dev->node_guid = dd->ipath_guid;
dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
dev->uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1093,9 +1538,9 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
(1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
- dev->node_type = IB_NODE_CA;
+ dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
- dev->dma_device = ipath_layer_get_device(dd);
+ dev->dma_device = &dd->pcidev->dev;
dev->class_dev.dev = dev->dma_device;
dev->query_device = ipath_query_device;
dev->modify_device = ipath_modify_device;
@@ -1137,9 +1582,10 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
dev->attach_mcast = ipath_multicast_attach;
dev->detach_mcast = ipath_multicast_detach;
dev->process_mad = ipath_process_mad;
+ dev->mmap = ipath_mmap;
snprintf(dev->node_desc, sizeof(dev->node_desc),
- IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename);
+ IPATH_IDSTR " %s", system_utsname.nodename);
ret = ib_register_device(dev);
if (ret)
@@ -1148,7 +1594,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
if (ipath_verbs_register_sysfs(dev))
goto err_class;
- ipath_layer_enable_timer(dd);
+ enable_timer(dd);
goto bail;
@@ -1160,37 +1606,32 @@ err_lk:
kfree(idev->qp_table.table);
err_qp:
ib_dealloc_device(dev);
- _VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n",
- unit, -ret);
+ ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
idev = NULL;
bail:
- return idev;
+ dd->verbs_dev = idev;
+ return ret;
}
-static void ipath_unregister_ib_device(void *arg)
+void ipath_unregister_ib_device(struct ipath_ibdev *dev)
{
- struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
struct ib_device *ibdev = &dev->ibdev;
- ipath_layer_disable_timer(dev->dd);
+ disable_timer(dev->dd);
ib_unregister_device(ibdev);
if (!list_empty(&dev->pending[0]) ||
!list_empty(&dev->pending[1]) ||
!list_empty(&dev->pending[2]))
- _VERBS_ERROR("ipath%d pending list not empty!\n",
- dev->ib_unit);
+ ipath_dev_err(dev->dd, "pending list not empty!\n");
if (!list_empty(&dev->piowait))
- _VERBS_ERROR("ipath%d piowait list not empty!\n",
- dev->ib_unit);
+ ipath_dev_err(dev->dd, "piowait list not empty!\n");
if (!list_empty(&dev->rnrwait))
- _VERBS_ERROR("ipath%d rnrwait list not empty!\n",
- dev->ib_unit);
+ ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
if (!ipath_mcast_tree_empty())
- _VERBS_ERROR("ipath%d multicast table memory leak!\n",
- dev->ib_unit);
+ ipath_dev_err(dev->dd, "multicast table memory leak!\n");
/*
* Note that ipath_unregister_ib_device() can be called before all
* the QPs are destroyed!
@@ -1201,25 +1642,12 @@ static void ipath_unregister_ib_device(void *arg)
ib_dealloc_device(ibdev);
}
-static int __init ipath_verbs_init(void)
-{
- return ipath_verbs_register(ipath_register_ib_device,
- ipath_unregister_ib_device,
- ipath_ib_piobufavail, ipath_ib_rcv,
- ipath_ib_timer);
-}
-
-static void __exit ipath_verbs_cleanup(void)
-{
- ipath_verbs_unregister();
-}
-
static ssize_t show_rev(struct class_device *cdev, char *buf)
{
struct ipath_ibdev *dev =
container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
- return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd));
+ return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
}
static ssize_t show_hca(struct class_device *cdev, char *buf)
@@ -1228,7 +1656,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
int ret;
- ret = ipath_layer_get_boardname(dev->dd, buf, 128);
+ ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
if (ret < 0)
goto bail;
strcat(buf, "\n");
@@ -1305,6 +1733,3 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev)
bail:
return ret;
}
-
-module_init(ipath_verbs_init);
-module_exit(ipath_verbs_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 2df684727dc..09bbb3f9a21 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -38,10 +38,10 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/kref.h>
#include <rdma/ib_pack.h>
#include "ipath_layer.h"
-#include "verbs_debug.h"
#define QPN_MAX (1 << 24)
#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
@@ -50,7 +50,7 @@
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
-#define IPATH_UVERBS_ABI_VERSION 1
+#define IPATH_UVERBS_ABI_VERSION 2
/*
* Define an ib_cq_notify value that is not valid so we know when CQ
@@ -152,19 +152,6 @@ struct ipath_mcast {
int n_attached;
};
-/* Memory region */
-struct ipath_mr {
- struct ib_mr ibmr;
- struct ipath_mregion mr; /* must be last */
-};
-
-/* Fast memory region */
-struct ipath_fmr {
- struct ib_fmr ibfmr;
- u8 page_shift;
- struct ipath_mregion mr; /* must be last */
-};
-
/* Protection domain */
struct ipath_pd {
struct ib_pd ibpd;
@@ -178,58 +165,89 @@ struct ipath_ah {
};
/*
- * Quick description of our CQ/QP locking scheme:
- *
- * We have one global lock that protects dev->cq/qp_table. Each
- * struct ipath_cq/qp also has its own lock. An individual qp lock
- * may be taken inside of an individual cq lock. Both cqs attached to
- * a qp may be locked, with the send cq locked first. No other
- * nesting should be done.
- *
- * Each struct ipath_cq/qp also has an atomic_t ref count. The
- * pointer from the cq/qp_table to the struct counts as one reference.
- * This reference also is good for access through the consumer API, so
- * modifying the CQ/QP etc doesn't need to take another reference.
- * Access because of a completion being polled does need a reference.
- *
- * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the
- * destroy function to sleep on.
- *
- * This means that access from the consumer API requires nothing but
- * taking the struct's lock.
- *
- * Access because of a completion event should go as follows:
- * - lock cq/qp_table and look up struct
- * - increment ref count in struct
- * - drop cq/qp_table lock
- * - lock struct, do your thing, and unlock struct
- * - decrement ref count; if zero, wake up waiters
- *
- * To destroy a CQ/QP, we can do the following:
- * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
- * - decrement ref count
- * - wait_event until ref count is zero
- *
- * It is the consumer's responsibilty to make sure that no QP
- * operations (WQE posting or state modification) are pending when the
- * QP is destroyed. Also, the consumer must make sure that calls to
- * qp_modify are serialized.
- *
- * Possible optimizations (wait for profile data to see if/where we
- * have locks bouncing between CPUs):
- * - split cq/qp table lock into n separate (cache-aligned) locks,
- * indexed (say) by the page in the table
+ * This structure is used by ipath_mmap() to validate an offset
+ * when an mmap() request is made. The vm_area_struct then uses
+ * this as its vm_private_data.
+ */
+struct ipath_mmap_info {
+ struct ipath_mmap_info *next;
+ struct ib_ucontext *context;
+ void *obj;
+ struct kref ref;
+ unsigned size;
+ unsigned mmap_cnt;
+};
+
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and completion queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
*/
+struct ipath_cq_wc {
+ u32 head; /* index of next entry to fill */
+ u32 tail; /* index of next ib_poll_cq() entry */
+ struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */
+};
+/*
+ * The completion queue structure.
+ */
struct ipath_cq {
struct ib_cq ibcq;
struct tasklet_struct comptask;
spinlock_t lock;
u8 notify;
u8 triggered;
- u32 head; /* new records added to the head */
- u32 tail; /* poll_cq() reads from here. */
- struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */
+ struct ipath_cq_wc *queue;
+ struct ipath_mmap_info *ip;
+};
+
+/*
+ * A segment is a linear region of low physical memory.
+ * XXX Maybe we should use phys addr here and kmap()/kunmap().
+ * Used by the verbs layer.
+ */
+struct ipath_seg {
+ void *vaddr;
+ size_t length;
+};
+
+/* The number of ipath_segs that fit in a page. */
+#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
+
+struct ipath_segarray {
+ struct ipath_seg segs[IPATH_SEGSZ];
+};
+
+struct ipath_mregion {
+ u64 user_base; /* User's address for this region */
+ u64 iova; /* IB start address of this region */
+ size_t length;
+ u32 lkey;
+ u32 offset; /* offset (bytes) to start of region */
+ int access_flags;
+ u32 max_segs; /* number of ipath_segs in all the arrays */
+ u32 mapsz; /* size of the map array */
+ struct ipath_segarray *map[0]; /* the segments */
+};
+
+/*
+ * These keep track of the copy progress within a memory region.
+ * Used by the verbs layer.
+ */
+struct ipath_sge {
+ struct ipath_mregion *mr;
+ void *vaddr; /* current pointer into the segment */
+ u32 sge_length; /* length of the SGE */
+ u32 length; /* remaining length of the segment */
+ u16 m; /* current index: mr->map[m] */
+ u16 n; /* current index: mr->map[m]->segs[n] */
+};
+
+/* Memory region */
+struct ipath_mr {
+ struct ib_mr ibmr;
+ struct ipath_mregion mr; /* must be last */
};
/*
@@ -248,32 +266,50 @@ struct ipath_swqe {
/*
* Receive work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->r_max_sge.
+ * The size of the sg_list is determined when the QP (or SRQ) is created
+ * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
*/
struct ipath_rwqe {
u64 wr_id;
- u32 length; /* total length of data in sg_list */
u8 num_sge;
- struct ipath_sge sg_list[0];
+ struct ib_sge sg_list[0];
};
-struct ipath_rq {
- spinlock_t lock;
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and receive work queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ * Note that the wq array elements are variable size so you can't
+ * just index into the array to get the N'th element;
+ * use get_rwqe_ptr() instead.
+ */
+struct ipath_rwq {
u32 head; /* new work requests posted to the head */
u32 tail; /* receives pull requests from here. */
+ struct ipath_rwqe wq[0];
+};
+
+struct ipath_rq {
+ struct ipath_rwq *wq;
+ spinlock_t lock;
u32 size; /* size of RWQE array */
u8 max_sge;
- struct ipath_rwqe *wq; /* RWQE array */
};
struct ipath_srq {
struct ib_srq ibsrq;
struct ipath_rq rq;
+ struct ipath_mmap_info *ip;
/* send signal when number of RWQEs < limit */
u32 limit;
};
+struct ipath_sge_state {
+ struct ipath_sge *sg_list; /* next SGE to be used if any */
+ struct ipath_sge sge; /* progress state for the current SGE */
+ u8 num_sge;
+};
+
/*
* Variables prefixed with s_ are for the requester (sender).
* Variables prefixed with r_ are for the responder (receiver).
@@ -293,6 +329,7 @@ struct ipath_qp {
atomic_t refcount;
wait_queue_head_t wait;
struct tasklet_struct s_task;
+ struct ipath_mmap_info *ip;
struct ipath_sge_state *s_cur_sge;
struct ipath_sge_state s_sge; /* current send request data */
/* current RDMA read send data */
@@ -334,6 +371,7 @@ struct ipath_qp {
u8 s_retry; /* requester retry counter */
u8 s_rnr_retry; /* requester RNR retry counter */
u8 s_pkey_index; /* PKEY index to use */
+ u8 timeout; /* Timeout for this QP */
enum ib_mtu path_mtu;
u32 remote_qpn;
u32 qkey; /* QKEY for this QP (for UD or RD) */
@@ -345,7 +383,8 @@ struct ipath_qp {
u32 s_ssn; /* SSN of tail entry */
u32 s_lsn; /* limit sequence number (credit) */
struct ipath_swqe *s_wq; /* send work queue */
- struct ipath_rq r_rq; /* receive work queue */
+ struct ipath_rq r_rq; /* receive work queue */
+ struct ipath_sge r_sg_list[0]; /* verified SGEs */
};
/*
@@ -369,15 +408,15 @@ static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
/*
* Since struct ipath_rwqe is not a fixed size, we can't simply index into
- * struct ipath_rq.wq. This function does the array index computation.
+ * struct ipath_rwq.wq. This function does the array index computation.
*/
static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
unsigned n)
{
return (struct ipath_rwqe *)
- ((char *) rq->wq +
+ ((char *) rq->wq->wq +
(sizeof(struct ipath_rwqe) +
- rq->max_sge * sizeof(struct ipath_sge)) * n);
+ rq->max_sge * sizeof(struct ib_sge)) * n);
}
/*
@@ -417,6 +456,7 @@ struct ipath_ibdev {
struct ib_device ibdev;
struct list_head dev_list;
struct ipath_devdata *dd;
+ struct ipath_mmap_info *pending_mmaps;
int ib_unit; /* This is the device number */
u16 sm_lid; /* in host order */
u8 sm_sl;
@@ -435,11 +475,20 @@ struct ipath_ibdev {
__be64 sys_image_guid; /* in network order */
__be64 gid_prefix; /* in network order */
__be64 mkey;
+
u32 n_pds_allocated; /* number of PDs allocated for device */
+ spinlock_t n_pds_lock;
u32 n_ahs_allocated; /* number of AHs allocated for device */
+ spinlock_t n_ahs_lock;
u32 n_cqs_allocated; /* number of CQs allocated for device */
+ spinlock_t n_cqs_lock;
+ u32 n_qps_allocated; /* number of QPs allocated for device */
+ spinlock_t n_qps_lock;
u32 n_srqs_allocated; /* number of SRQs allocated for device */
+ spinlock_t n_srqs_lock;
u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
+ spinlock_t n_mcast_grps_lock;
+
u64 ipath_sword; /* total dwords sent (sample result) */
u64 ipath_rword; /* total dwords received (sample result) */
u64 ipath_spkts; /* total packets sent (sample result) */
@@ -494,8 +543,19 @@ struct ipath_ibdev {
struct ipath_opcode_stats opstats[128];
};
-struct ipath_ucontext {
- struct ib_ucontext ibucontext;
+struct ipath_verbs_counters {
+ u64 symbol_error_counter;
+ u64 link_error_recovery_counter;
+ u64 link_downed_counter;
+ u64 port_rcv_errors;
+ u64 port_rcv_remphys_errors;
+ u64 port_xmit_discards;
+ u64 port_xmit_data;
+ u64 port_rcv_data;
+ u64 port_xmit_packets;
+ u64 port_rcv_packets;
+ u32 local_link_integrity_errors;
+ u32 excessive_buffer_overrun_errors;
};
static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
@@ -503,11 +563,6 @@ static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
return container_of(ibmr, struct ipath_mr, ibmr);
}
-static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct ipath_fmr, ibfmr);
-}
-
static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct ipath_pd, ibpd);
@@ -545,12 +600,6 @@ int ipath_process_mad(struct ib_device *ibdev,
struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad);
-static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
- *ibucontext)
-{
- return container_of(ibucontext, struct ipath_ucontext, ibucontext);
-}
-
/*
* Compare the lower 24 bits of the two values.
* Returns an integer <, ==, or > than zero.
@@ -562,6 +611,13 @@ static inline int ipath_cmp24(u32 a, u32 b)
struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
+int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
+ u64 *rwords, u64 *spkts, u64 *rpkts,
+ u64 *xmit_wait);
+
+int ipath_get_counters(struct ipath_devdata *dd,
+ struct ipath_verbs_counters *cntrs);
+
int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
@@ -579,7 +635,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
int ipath_destroy_qp(struct ib_qp *ibqp);
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask);
+ int attr_mask, struct ib_udata *udata);
int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr);
@@ -592,6 +648,9 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
+int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
+ u32 *hdr, u32 len, struct ipath_sge_state *ss);
+
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
@@ -638,7 +697,8 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
struct ib_udata *udata);
int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask);
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata);
int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
@@ -680,6 +740,10 @@ int ipath_unmap_fmr(struct list_head *fmr_list);
int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
+void ipath_release_mmap_info(struct kref *ref);
+
+int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
void ipath_insert_rnr_queue(struct ipath_qp *qp);
@@ -700,6 +764,22 @@ int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
u32 pmtu, u32 *bth0p, u32 *bth2p);
+int ipath_register_ib_device(struct ipath_devdata *);
+
+void ipath_unregister_ib_device(struct ipath_ibdev *);
+
+void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
+
+int ipath_ib_piobufavail(struct ipath_ibdev *);
+
+void ipath_ib_timer(struct ipath_ibdev *);
+
+unsigned ipath_get_npkeys(struct ipath_devdata *);
+
+u32 ipath_get_cr_errpkey(struct ipath_devdata *);
+
+unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
+
extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
extern const u8 ipath_cvt_physportstate[];
@@ -714,6 +794,8 @@ extern unsigned int ib_ipath_max_cqs;
extern unsigned int ib_ipath_max_qp_wrs;
+extern unsigned int ib_ipath_max_qps;
+
extern unsigned int ib_ipath_max_sges;
extern unsigned int ib_ipath_max_mcast_grps;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
index ee0e1d96d72..085e28b939e 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -207,12 +207,17 @@ static int ipath_mcast_add(struct ipath_ibdev *dev,
goto bail;
}
+ spin_lock(&dev->n_mcast_grps_lock);
if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
+ spin_unlock(&dev->n_mcast_grps_lock);
ret = ENOMEM;
goto bail;
}
dev->n_mcast_grps_allocated++;
+ spin_unlock(&dev->n_mcast_grps_lock);
+
+ mcast->n_attached++;
list_add_tail_rcu(&mqp->list, &mcast->qp_list);
@@ -343,7 +348,9 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
atomic_dec(&mcast->refcount);
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
ipath_mcast_free(mcast);
+ spin_lock(&dev->n_mcast_grps_lock);
dev->n_mcast_grps_allocated--;
+ spin_unlock(&dev->n_mcast_grps_lock);
}
ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
new file mode 100644
index 00000000000..036fde662aa
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * This file is conditionally built on PowerPC only. Otherwise weak symbol
+ * versions of the functions exported from here are used.
+ */
+
+#include "ipath_kernel.h"
+
+/**
+ * ipath_unordered_wc - indicate whether write combining is ordered
+ *
+ * PowerPC systems (at least those in the 970 processor family)
+ * write partially filled store buffers in address order, but will write
+ * completely filled store buffers in "random" order, and therefore must
+ * have serialization for correctness with current InfiniPath chips.
+ *
+ */
+int ipath_unordered_wc(void)
+{
+ return 1;
+}
diff --git a/drivers/infiniband/hw/ipath/verbs_debug.h b/drivers/infiniband/hw/ipath/verbs_debug.h
deleted file mode 100644
index 6186676f2a1..00000000000
--- a/drivers/infiniband/hw/ipath/verbs_debug.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _VERBS_DEBUG_H
-#define _VERBS_DEBUG_H
-
-/*
- * This file contains tracing code for the ib_ipath kernel module.
- */
-#ifndef _VERBS_DEBUGGING /* tracing enabled or not */
-#define _VERBS_DEBUGGING 1
-#endif
-
-extern unsigned ib_ipath_debug;
-
-#define _VERBS_ERROR(fmt,...) \
- do { \
- printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
- } while(0)
-
-#define _VERBS_UNIT_ERROR(unit,fmt,...) \
- do { \
- printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
- } while(0)
-
-#if _VERBS_DEBUGGING
-
-/*
- * Mask values for debugging. The scheme allows us to compile out any
- * of the debug tracing stuff, and if compiled in, to enable or
- * disable dynamically.
- * This can be set at modprobe time also:
- * modprobe ib_path ib_ipath_debug=3
- */
-
-#define __VERBS_INFO 0x1 /* generic low verbosity stuff */
-#define __VERBS_DBG 0x2 /* generic debug */
-#define __VERBS_VDBG 0x4 /* verbose debug */
-#define __VERBS_SMADBG 0x8000 /* sma packet debug */
-
-#define _VERBS_INFO(fmt,...) \
- do { \
- if (unlikely(ib_ipath_debug&__VERBS_INFO)) \
- printk(KERN_INFO "%s: " fmt,"ib_ipath", \
- ##__VA_ARGS__); \
- } while(0)
-
-#define _VERBS_DBG(fmt,...) \
- do { \
- if (unlikely(ib_ipath_debug&__VERBS_DBG)) \
- printk(KERN_DEBUG "%s: " fmt, __func__, \
- ##__VA_ARGS__); \
- } while(0)
-
-#define _VERBS_VDBG(fmt,...) \
- do { \
- if (unlikely(ib_ipath_debug&__VERBS_VDBG)) \
- printk(KERN_DEBUG "%s: " fmt, __func__, \
- ##__VA_ARGS__); \
- } while(0)
-
-#define _VERBS_SMADBG(fmt,...) \
- do { \
- if (unlikely(ib_ipath_debug&__VERBS_SMADBG)) \
- printk(KERN_DEBUG "%s: " fmt, __func__, \
- ##__VA_ARGS__); \
- } while(0)
-
-#else /* ! _VERBS_DEBUGGING */
-
-#define _VERBS_INFO(fmt,...)
-#define _VERBS_DBG(fmt,...)
-#define _VERBS_VDBG(fmt,...)
-#define _VERBS_SMADBG(fmt,...)
-
-#endif /* _VERBS_DEBUGGING */
-
-#endif /* _VERBS_DEBUG_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index 25157f57a6d..f930e55b58f 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -41,9 +41,11 @@
/* Trivial bitmap-based allocator */
u32 mthca_alloc(struct mthca_alloc *alloc)
{
+ unsigned long flags;
u32 obj;
- spin_lock(&alloc->lock);
+ spin_lock_irqsave(&alloc->lock, flags);
+
obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
if (obj >= alloc->max) {
alloc->top = (alloc->top + alloc->max) & alloc->mask;
@@ -56,19 +58,24 @@ u32 mthca_alloc(struct mthca_alloc *alloc)
} else
obj = -1;
- spin_unlock(&alloc->lock);
+ spin_unlock_irqrestore(&alloc->lock, flags);
return obj;
}
void mthca_free(struct mthca_alloc *alloc, u32 obj)
{
+ unsigned long flags;
+
obj &= alloc->max - 1;
- spin_lock(&alloc->lock);
+
+ spin_lock_irqsave(&alloc->lock, flags);
+
clear_bit(obj, alloc->table);
alloc->last = min(alloc->last, obj);
alloc->top = (alloc->top + alloc->max) & alloc->mask;
- spin_unlock(&alloc->lock);
+
+ spin_unlock_irqrestore(&alloc->lock, flags);
}
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index e215041b2db..69599455aca 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -90,7 +90,7 @@ static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS;
case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS;
- default: return port_rate;
+ default: return mult_to_ib_rate(port_rate);
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index c3bec7490f5..cd044ea2dfa 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -34,6 +34,7 @@
#include <linux/jiffies.h>
#include <linux/timer.h>
+#include <linux/workqueue.h>
#include "mthca_dev.h"
@@ -48,9 +49,41 @@ enum {
static DEFINE_SPINLOCK(catas_lock);
+static LIST_HEAD(catas_list);
+static struct workqueue_struct *catas_wq;
+static struct work_struct catas_work;
+
+static int catas_reset_disable;
+module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
+MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
+
+static void catas_reset(void *work_ptr)
+{
+ struct mthca_dev *dev, *tmpdev;
+ LIST_HEAD(tlist);
+ int ret;
+
+ mutex_lock(&mthca_device_mutex);
+
+ spin_lock_irq(&catas_lock);
+ list_splice_init(&catas_list, &tlist);
+ spin_unlock_irq(&catas_lock);
+
+ list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) {
+ ret = __mthca_restart_one(dev->pdev);
+ if (ret)
+ mthca_err(dev, "Reset failed (%d)\n", ret);
+ else
+ mthca_dbg(dev, "Reset succeeded\n");
+ }
+
+ mutex_unlock(&mthca_device_mutex);
+}
+
static void handle_catas(struct mthca_dev *dev)
{
struct ib_event event;
+ unsigned long flags;
const char *type;
int i;
@@ -82,6 +115,14 @@ static void handle_catas(struct mthca_dev *dev)
for (i = 0; i < dev->catas_err.size; ++i)
mthca_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(dev->catas_err.map + i)));
+
+ if (catas_reset_disable)
+ return;
+
+ spin_lock_irqsave(&catas_lock, flags);
+ list_add(&dev->catas_err.list, &catas_list);
+ queue_work(catas_wq, &catas_work);
+ spin_unlock_irqrestore(&catas_lock, flags);
}
static void poll_catas(unsigned long dev_ptr)
@@ -135,6 +176,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
dev->catas_err.timer.data = (unsigned long) dev;
dev->catas_err.timer.function = poll_catas;
dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL;
+ INIT_LIST_HEAD(&dev->catas_err.list);
add_timer(&dev->catas_err.timer);
}
@@ -153,4 +195,24 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
dev->catas_err.addr),
dev->catas_err.size * 4);
}
+
+ spin_lock_irq(&catas_lock);
+ list_del(&dev->catas_err.list);
+ spin_unlock_irq(&catas_lock);
+}
+
+int __init mthca_catas_init(void)
+{
+ INIT_WORK(&catas_work, catas_reset, NULL);
+
+ catas_wq = create_singlethread_workqueue("mthca_catas");
+ if (!catas_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mthca_catas_cleanup(void)
+{
+ destroy_workqueue(catas_wq);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index deabc14b4ea..99a94d71093 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -34,7 +34,7 @@
* $Id: mthca_cmd.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <linux/sched.h>
+#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <asm/io.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 3e27a084257..e393681ba7d 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -544,11 +544,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
wq = &(*cur_qp)->rq;
wqe = be32_to_cpu(cqe->wqe);
wqe_index = wqe >> wq->wqe_shift;
- /*
- * WQE addr == base - 1 might be reported in receive completion
- * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
- * Arbel FW 5.1.400. This bug should be fixed in later FW revs.
- */
+ /*
+ * WQE addr == base - 1 might be reported in receive completion
+ * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
+ * Arbel FW 5.1.400. This bug should be fixed in later FW revs.
+ */
if (unlikely(wqe_index < 0))
wqe_index = wq->max - 1;
entry->wr_id = (*cur_qp)->wrid[wqe_index];
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index f8160b8de09..fe5cecf70fe 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -45,6 +45,7 @@
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <linux/mutex.h>
+#include <linux/list.h>
#include <asm/semaphore.h>
@@ -283,8 +284,11 @@ struct mthca_catas_err {
unsigned long stop;
u32 size;
struct timer_list timer;
+ struct list_head list;
};
+extern struct mutex mthca_device_mutex;
+
struct mthca_dev {
struct ib_device ib_dev;
struct pci_dev *pdev;
@@ -450,6 +454,9 @@ void mthca_unregister_device(struct mthca_dev *dev);
void mthca_start_catas_poll(struct mthca_dev *dev);
void mthca_stop_catas_poll(struct mthca_dev *dev);
+int __mthca_restart_one(struct pci_dev *pdev);
+int mthca_catas_init(void);
+void mthca_catas_cleanup(void);
int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar);
void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
@@ -506,7 +513,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
struct ib_srq_attr *attr, struct mthca_srq *srq);
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask);
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int mthca_max_srq_sge(struct mthca_dev *dev);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
@@ -521,7 +528,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type);
int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr);
-int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
+int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata);
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index d9bc030bccc..45e106f1480 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -119,7 +119,7 @@ static void smp_snoop(struct ib_device *ibdev,
mthca_update_rate(to_mdev(ibdev), port_num);
update_sm_ah(to_mdev(ibdev), port_num,
- be16_to_cpu(pinfo->lid),
+ be16_to_cpu(pinfo->sm_lid),
pinfo->neighbormtu_mastersmsl & 0xf);
event.device = ibdev;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 7b82c1907f0..47ea0214836 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -80,6 +80,8 @@ static int tune_pci = 0;
module_param(tune_pci, int, 0444);
MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
+struct mutex mthca_device_mutex;
+
static const char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -978,28 +980,15 @@ static struct {
MTHCA_FLAG_SINAI_OPT }
};
-static int __devinit mthca_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
{
- static int mthca_version_printed = 0;
int ddr_hidden = 0;
int err;
struct mthca_dev *mdev;
- if (!mthca_version_printed) {
- printk(KERN_INFO "%s", mthca_version);
- ++mthca_version_printed;
- }
-
printk(KERN_INFO PFX "Initializing %s\n",
pci_name(pdev));
- if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
- printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
- pci_name(pdev), id->driver_data);
- return -ENODEV;
- }
-
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, "
@@ -1065,7 +1054,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
mdev->pdev = pdev;
- mdev->mthca_flags = mthca_hca_table[id->driver_data].flags;
+ mdev->mthca_flags = mthca_hca_table[hca_type].flags;
if (ddr_hidden)
mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
@@ -1099,13 +1088,13 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
if (err)
goto err_cmd;
- if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
+ if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff),
- (int) (mthca_hca_table[id->driver_data].latest_fw >> 32),
- (int) (mthca_hca_table[id->driver_data].latest_fw >> 16) & 0xffff,
- (int) (mthca_hca_table[id->driver_data].latest_fw & 0xffff));
+ (int) (mthca_hca_table[hca_type].latest_fw >> 32),
+ (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
+ (int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
}
@@ -1122,6 +1111,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
goto err_unregister;
pci_set_drvdata(pdev, mdev);
+ mdev->hca_type = hca_type;
return 0;
@@ -1166,7 +1156,7 @@ err_disable_pdev:
return err;
}
-static void __devexit mthca_remove_one(struct pci_dev *pdev)
+static void __mthca_remove_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev = pci_get_drvdata(pdev);
u8 status;
@@ -1211,6 +1201,51 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
}
}
+int __mthca_restart_one(struct pci_dev *pdev)
+{
+ struct mthca_dev *mdev;
+
+ mdev = pci_get_drvdata(pdev);
+ if (!mdev)
+ return -ENODEV;
+ __mthca_remove_one(pdev);
+ return __mthca_init_one(pdev, mdev->hca_type);
+}
+
+static int __devinit mthca_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static int mthca_version_printed = 0;
+ int ret;
+
+ mutex_lock(&mthca_device_mutex);
+
+ if (!mthca_version_printed) {
+ printk(KERN_INFO "%s", mthca_version);
+ ++mthca_version_printed;
+ }
+
+ if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
+ printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
+ pci_name(pdev), id->driver_data);
+ mutex_unlock(&mthca_device_mutex);
+ return -ENODEV;
+ }
+
+ ret = __mthca_init_one(pdev, id->driver_data);
+
+ mutex_unlock(&mthca_device_mutex);
+
+ return ret;
+}
+
+static void __devexit mthca_remove_one(struct pci_dev *pdev)
+{
+ mutex_lock(&mthca_device_mutex);
+ __mthca_remove_one(pdev);
+ mutex_unlock(&mthca_device_mutex);
+}
+
static struct pci_device_id mthca_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
@@ -1248,13 +1283,24 @@ static int __init mthca_init(void)
{
int ret;
+ mutex_init(&mthca_device_mutex);
+ ret = mthca_catas_init();
+ if (ret)
+ return ret;
+
ret = pci_register_driver(&mthca_driver);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ mthca_catas_cleanup();
+ return ret;
+ }
+
+ return 0;
}
static void __exit mthca_cleanup(void)
{
pci_unregister_driver(&mthca_driver);
+ mthca_catas_cleanup();
}
module_init(mthca_init);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 265b1d1c4a6..981fe2eebdf 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1288,7 +1288,7 @@ int mthca_register_device(struct mthca_dev *dev)
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
- dev->ib_dev.node_type = IB_NODE_CA;
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.dma_device = &dev->pdev->dev;
dev->ib_dev.class_dev.dev = &dev->pdev->dev;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 2e8f6f36e0a..5e5c58b9920 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -408,7 +408,7 @@ static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
- path->static_rate & 0x7,
+ path->static_rate & 0xf,
ib_ah_attr->port_num);
ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
if (ib_ah_attr->ah_flags) {
@@ -472,10 +472,14 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
if (qp->transport == RC || qp->transport == UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+ qp_attr->alt_pkey_index =
+ be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
+ qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
- qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
- qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
+ qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
+ qp_attr->port_num =
+ (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
@@ -486,11 +490,9 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
qp_attr->min_rnr_timer =
(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
- qp_attr->port_num = qp_attr->ah_attr.port_num;
qp_attr->timeout = context->pri_path.ackto >> 3;
qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
- qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
qp_attr->alt_timeout = context->alt_path.ackto >> 3;
qp_init_attr->cap = qp_attr->cap;
@@ -527,7 +529,8 @@ static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
return 0;
}
-int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
+int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
@@ -842,11 +845,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
- mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
- mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
- qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
mthca_wq_reset(&qp->sq);
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index b60a9d79ae5..0f316c87bf6 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -358,7 +358,7 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
}
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask)
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
diff --git a/drivers/infiniband/hw/mthca/mthca_uar.c b/drivers/infiniband/hw/mthca/mthca_uar.c
index 8e9219842be..8b728486410 100644
--- a/drivers/infiniband/hw/mthca/mthca_uar.c
+++ b/drivers/infiniband/hw/mthca/mthca_uar.c
@@ -60,7 +60,7 @@ int mthca_init_uar_table(struct mthca_dev *dev)
ret = mthca_alloc_init(&dev->uar_table.alloc,
dev->limits.num_uars,
dev->limits.num_uars - 1,
- dev->limits.reserved_uars);
+ dev->limits.reserved_uars + 1);
if (ret)
return ret;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 474aa214ab5..0b8a79d53a0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -336,6 +336,8 @@ static inline void ipoib_unregister_debugfs(void) { }
extern int ipoib_sendq_size;
extern int ipoib_recvq_size;
+extern struct ib_sa_client ipoib_sa_client;
+
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
extern int ipoib_debug_level;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5033666b148..f426a69d9a4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -169,117 +169,129 @@ static int ipoib_ib_post_receives(struct net_device *dev)
return 0;
}
-static void ipoib_ib_handle_wc(struct net_device *dev,
- struct ib_wc *wc)
+static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- unsigned int wr_id = wc->wr_id;
+ unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
+ struct sk_buff *skb;
+ dma_addr_t addr;
- ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n",
+ ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status);
- if (wr_id & IPOIB_OP_RECV) {
- wr_id &= ~IPOIB_OP_RECV;
-
- if (wr_id < ipoib_recvq_size) {
- struct sk_buff *skb = priv->rx_ring[wr_id].skb;
- dma_addr_t addr = priv->rx_ring[wr_id].mapping;
-
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- ipoib_warn(priv, "failed recv event "
- "(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
- dma_unmap_single(priv->ca->dma_device, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- priv->rx_ring[wr_id].skb = NULL;
- return;
- }
+ if (unlikely(wr_id >= ipoib_recvq_size)) {
+ ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_recvq_size);
+ return;
+ }
- /*
- * If we can't allocate a new RX buffer, dump
- * this packet and reuse the old buffer.
- */
- if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
- ++priv->stats.rx_dropped;
- goto repost;
- }
+ skb = priv->rx_ring[wr_id].skb;
+ addr = priv->rx_ring[wr_id].mapping;
- ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
- wc->byte_len, wc->slid);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ ipoib_warn(priv, "failed recv event "
+ "(status=%d, wrid=%d vend_err %x)\n",
+ wc->status, wr_id, wc->vendor_err);
+ dma_unmap_single(priv->ca->dma_device, addr,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_ring[wr_id].skb = NULL;
+ return;
+ }
- dma_unmap_single(priv->ca->dma_device, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ /*
+ * If we can't allocate a new RX buffer, dump
+ * this packet and reuse the old buffer.
+ */
+ if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+ ++priv->stats.rx_dropped;
+ goto repost;
+ }
- skb_put(skb, wc->byte_len);
- skb_pull(skb, IB_GRH_BYTES);
+ ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
+ wc->byte_len, wc->slid);
- if (wc->slid != priv->local_lid ||
- wc->src_qp != priv->qp->qp_num) {
- skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb->mac.raw = skb->data;
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ dma_unmap_single(priv->ca->dma_device, addr,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
- dev->last_rx = jiffies;
- ++priv->stats.rx_packets;
- priv->stats.rx_bytes += skb->len;
+ skb_put(skb, wc->byte_len);
+ skb_pull(skb, IB_GRH_BYTES);
- skb->dev = dev;
- /* XXX get correct PACKET_ type here */
- skb->pkt_type = PACKET_HOST;
- netif_rx_ni(skb);
- } else {
- ipoib_dbg_data(priv, "dropping loopback packet\n");
- dev_kfree_skb_any(skb);
- }
+ if (wc->slid != priv->local_lid ||
+ wc->src_qp != priv->qp->qp_num) {
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+ skb->mac.raw = skb->data;
+ skb_pull(skb, IPOIB_ENCAP_LEN);
- repost:
- if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
- ipoib_warn(priv, "ipoib_ib_post_receive failed "
- "for buf %d\n", wr_id);
- } else
- ipoib_warn(priv, "completion event with wrid %d\n",
- wr_id);
+ dev->last_rx = jiffies;
+ ++priv->stats.rx_packets;
+ priv->stats.rx_bytes += skb->len;
+ skb->dev = dev;
+ /* XXX get correct PACKET_ type here */
+ skb->pkt_type = PACKET_HOST;
+ netif_rx_ni(skb);
} else {
- struct ipoib_tx_buf *tx_req;
- unsigned long flags;
+ ipoib_dbg_data(priv, "dropping loopback packet\n");
+ dev_kfree_skb_any(skb);
+ }
- if (wr_id >= ipoib_sendq_size) {
- ipoib_warn(priv, "completion event with wrid %d (> %d)\n",
- wr_id, ipoib_sendq_size);
- return;
- }
+repost:
+ if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
+ ipoib_warn(priv, "ipoib_ib_post_receive failed "
+ "for buf %d\n", wr_id);
+}
- ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id);
+static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ unsigned int wr_id = wc->wr_id;
+ struct ipoib_tx_buf *tx_req;
+ unsigned long flags;
- tx_req = &priv->tx_ring[wr_id];
+ ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n",
+ wr_id, wc->opcode, wc->status);
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(tx_req, mapping),
- tx_req->skb->len,
- DMA_TO_DEVICE);
+ if (unlikely(wr_id >= ipoib_sendq_size)) {
+ ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
+ wr_id, ipoib_sendq_size);
+ return;
+ }
- ++priv->stats.tx_packets;
- priv->stats.tx_bytes += tx_req->skb->len;
+ tx_req = &priv->tx_ring[wr_id];
- dev_kfree_skb_any(tx_req->skb);
+ dma_unmap_single(priv->ca->dma_device,
+ pci_unmap_addr(tx_req, mapping),
+ tx_req->skb->len,
+ DMA_TO_DEVICE);
- spin_lock_irqsave(&priv->tx_lock, flags);
- ++priv->tx_tail;
- if (netif_queue_stopped(dev) &&
- test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
- priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
- netif_wake_queue(dev);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ ++priv->stats.tx_packets;
+ priv->stats.tx_bytes += tx_req->skb->len;
- if (wc->status != IB_WC_SUCCESS &&
- wc->status != IB_WC_WR_FLUSH_ERR)
- ipoib_warn(priv, "failed send event "
- "(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
- }
+ dev_kfree_skb_any(tx_req->skb);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ ++priv->tx_tail;
+ if (netif_queue_stopped(dev) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
+ priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)
+ ipoib_warn(priv, "failed send event "
+ "(status=%d, wrid=%d vend_err %x)\n",
+ wc->status, wr_id, wc->vendor_err);
+}
+
+static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc)
+{
+ if (wc->wr_id & IPOIB_OP_RECV)
+ ipoib_ib_handle_rx_wc(dev, wc);
+ else
+ ipoib_ib_handle_tx_wc(dev, wc);
}
void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
@@ -320,7 +332,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_tx_buf *tx_req;
dma_addr_t addr;
- if (skb->len > dev->mtu + INFINIBAND_ALEN) {
+ if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
skb->len, dev->mtu + INFINIBAND_ALEN);
++priv->stats.tx_dropped;
@@ -619,8 +631,10 @@ void ipoib_ib_dev_flush(void *_dev)
* The device could have been brought down between the start and when
* we get here, don't bring it back up if it's not configured up
*/
- if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
ipoib_ib_dev_up(dev);
+ ipoib_mcast_restart_task(dev);
+ }
mutex_lock(&priv->vlan_mutex);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cf71d2a5515..1eaf00e9862 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -40,7 +40,6 @@
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/kernel.h>
#include <linux/if_arp.h> /* For ARPHRD_xxx */
@@ -82,6 +81,8 @@ static const u8 ipv4_bcast_addr[] = {
struct workqueue_struct *ipoib_workqueue;
+struct ib_sa_client ipoib_sa_client;
+
static void ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device);
@@ -336,7 +337,8 @@ void ipoib_flush_paths(struct net_device *dev)
struct ipoib_path *path, *tp;
LIST_HEAD(remove_list);
- spin_lock_irq(&priv->lock);
+ spin_lock_irq(&priv->tx_lock);
+ spin_lock(&priv->lock);
list_splice(&priv->path_list, &remove_list);
INIT_LIST_HEAD(&priv->path_list);
@@ -347,12 +349,15 @@ void ipoib_flush_paths(struct net_device *dev)
list_for_each_entry_safe(path, tp, &remove_list, list) {
if (path->query)
ib_sa_cancel_query(path->query_id, path->query);
- spin_unlock_irq(&priv->lock);
+ spin_unlock(&priv->lock);
+ spin_unlock_irq(&priv->tx_lock);
wait_for_completion(&path->done);
path_free(dev, path);
- spin_lock_irq(&priv->lock);
+ spin_lock_irq(&priv->tx_lock);
+ spin_lock(&priv->lock);
}
- spin_unlock_irq(&priv->lock);
+ spin_unlock(&priv->lock);
+ spin_unlock_irq(&priv->tx_lock);
}
static void path_rec_completion(int status,
@@ -459,7 +464,7 @@ static int path_rec_start(struct net_device *dev,
init_completion(&path->done);
path->query_id =
- ib_sa_path_rec_get(priv->ca, priv->port,
+ ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
&path->pathrec,
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
@@ -615,7 +620,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipoib_neigh *neigh;
unsigned long flags;
- if (!spin_trylock_irqsave(&priv->tx_lock, flags))
+ if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
return NETDEV_TX_LOCKED;
/*
@@ -628,7 +633,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (skb->dst && skb->dst->neighbour) {
+ if (likely(skb->dst && skb->dst->neighbour)) {
if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
ipoib_path_lookup(skb, dev);
goto out;
@@ -1107,13 +1112,16 @@ static void ipoib_add_one(struct ib_device *device)
struct ipoib_dev_priv *priv;
int s, e, p;
+ if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ return;
+
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
if (!dev_list)
return;
INIT_LIST_HEAD(dev_list);
- if (device->node_type == IB_NODE_SWITCH) {
+ if (device->node_type == RDMA_NODE_IB_SWITCH) {
s = 0;
e = 0;
} else {
@@ -1137,6 +1145,9 @@ static void ipoib_remove_one(struct ib_device *device)
struct ipoib_dev_priv *priv, *tmp;
struct list_head *dev_list;
+ if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
+ return;
+
dev_list = ib_get_client_data(device, &ipoib_client);
list_for_each_entry_safe(priv, tmp, dev_list, list) {
@@ -1181,13 +1192,16 @@ static int __init ipoib_init_module(void)
goto err_fs;
}
+ ib_sa_register_client(&ipoib_sa_client);
+
ret = ib_register_client(&ipoib_client);
if (ret)
- goto err_wq;
+ goto err_sa;
return 0;
-err_wq:
+err_sa:
+ ib_sa_unregister_client(&ipoib_sa_client);
destroy_workqueue(ipoib_workqueue);
err_fs:
@@ -1199,6 +1213,7 @@ err_fs:
static void __exit ipoib_cleanup_module(void)
{
ib_unregister_client(&ipoib_client);
+ ib_sa_unregister_client(&ipoib_sa_client);
ipoib_unregister_debugfs();
destroy_workqueue(ipoib_workqueue);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b5e6a7be603..3faa1820f0e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -326,6 +326,7 @@ ipoib_mcast_sendonly_join_complete(int status,
/* Clear the busy flag so we try again */
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ mcast->query = NULL;
}
complete(&mcast->done);
@@ -360,7 +361,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
init_completion(&mcast->done);
- ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
+ ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port, &rec,
IB_SA_MCMEMBER_REC_MGID |
IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY |
@@ -471,22 +472,32 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (create) {
comp_mask |=
- IB_SA_MCMEMBER_REC_QKEY |
- IB_SA_MCMEMBER_REC_SL |
- IB_SA_MCMEMBER_REC_FLOW_LABEL |
- IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
+ IB_SA_MCMEMBER_REC_QKEY |
+ IB_SA_MCMEMBER_REC_MTU_SELECTOR |
+ IB_SA_MCMEMBER_REC_MTU |
+ IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
+ IB_SA_MCMEMBER_REC_RATE_SELECTOR |
+ IB_SA_MCMEMBER_REC_RATE |
+ IB_SA_MCMEMBER_REC_SL |
+ IB_SA_MCMEMBER_REC_FLOW_LABEL |
+ IB_SA_MCMEMBER_REC_HOP_LIMIT;
rec.qkey = priv->broadcast->mcmember.qkey;
+ rec.mtu_selector = IB_SA_EQ;
+ rec.mtu = priv->broadcast->mcmember.mtu;
+ rec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ rec.rate_selector = IB_SA_EQ;
+ rec.rate = priv->broadcast->mcmember.rate;
rec.sl = priv->broadcast->mcmember.sl;
rec.flow_label = priv->broadcast->mcmember.flow_label;
- rec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ rec.hop_limit = priv->broadcast->mcmember.hop_limit;
}
init_completion(&mcast->done);
- ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask,
- mcast->backoff * 1000, GFP_ATOMIC,
- ipoib_mcast_join_complete,
+ ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port,
+ &rec, comp_mask, mcast->backoff * 1000,
+ GFP_ATOMIC, ipoib_mcast_join_complete,
mcast, &mcast->query);
if (ret < 0) {
@@ -527,7 +538,7 @@ void ipoib_mcast_join_task(void *dev_ptr)
priv->local_rate = attr.active_speed *
ib_width_enum_to_int(attr.active_width);
} else
- ipoib_warn(priv, "ib_query_port failed\n");
+ ipoib_warn(priv, "ib_query_port failed\n");
}
if (!priv->broadcast) {
@@ -680,7 +691,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
* Just make one shot at leaving and don't wait for a reply;
* if we fail, too bad.
*/
- ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec,
+ ret = ib_sa_mcmember_rec_delete(&ipoib_sa_client, priv->ca, priv->port, &rec,
IB_SA_MCMEMBER_REC_MGID |
IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY |
@@ -794,7 +805,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
if (priv->broadcast) {
- rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
+ rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
list_add_tail(&priv->broadcast->list, &remove_list);
priv->broadcast = NULL;
}
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index fead87d1eff..365a1b5f19e 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_ISER
tristate "ISCSI RDMA Protocol"
- depends on INFINIBAND && SCSI
+ depends on INFINIBAND && SCSI && INET
select SCSI_ISCSI_ATTRS
---help---
Support for the ISCSI RDMA Protocol over InfiniBand. This
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 101e407eaa4..2a14fe2e322 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -545,6 +545,7 @@ static struct scsi_host_template iscsi_iser_sht = {
.queuecommand = iscsi_queuecommand,
.can_queue = ISCSI_XMIT_CMDS_MAX - 1,
.sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_host_reset_handler = iscsi_eh_host_reset,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 7c3d0c96d88..2cf9ae0def1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -82,8 +82,12 @@
__func__ , ## arg); \
} while (0)
+#define SHIFT_4K 12
+#define SIZE_4K (1UL << SHIFT_4K)
+#define MASK_4K (~(SIZE_4K-1))
+
/* support upto 512KB in one RDMA */
-#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> PAGE_SHIFT)
+#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
#define ISCSI_ISER_MAX_LUN 256
#define ISCSI_ISER_MAX_CMD_LEN 16
@@ -171,6 +175,7 @@ struct iser_mem_reg {
u64 va;
u64 len;
void *mem_h;
+ int is_fmr;
};
struct iser_regd_buf {
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 31950a522a1..d0b03f42658 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -42,6 +42,7 @@
#include "iscsi_iser.h"
#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
+
/**
* Decrements the reference count for the
* registered buffer & releases it
@@ -55,7 +56,7 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
if ((atomic_read(&regd_buf->ref_count) == 0) ||
atomic_dec_and_test(&regd_buf->ref_count)) {
/* if we used the dma mr, unreg is just NOP */
- if (regd_buf->reg.rkey != 0)
+ if (regd_buf->reg.is_fmr)
iser_unreg_mem(&regd_buf->reg);
if (regd_buf->dma_addr) {
@@ -90,9 +91,9 @@ void iser_reg_single(struct iser_device *device,
BUG_ON(dma_mapping_error(dma_addr));
regd_buf->reg.lkey = device->mr->lkey;
- regd_buf->reg.rkey = 0; /* indicate there's no need to unreg */
regd_buf->reg.len = regd_buf->data_size;
regd_buf->reg.va = dma_addr;
+ regd_buf->reg.is_fmr = 0;
regd_buf->dma_addr = dma_addr;
regd_buf->direction = direction;
@@ -239,7 +240,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
int i;
/* compute the offset of first element */
- page_vec->offset = (u64) sg[0].offset;
+ page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
for (i = 0; i < data->dma_nents; i++) {
total_sz += sg_dma_len(&sg[i]);
@@ -247,21 +248,30 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
first_addr = sg_dma_address(&sg[i]);
last_addr = first_addr + sg_dma_len(&sg[i]);
- start_aligned = !(first_addr & ~PAGE_MASK);
- end_aligned = !(last_addr & ~PAGE_MASK);
+ start_aligned = !(first_addr & ~MASK_4K);
+ end_aligned = !(last_addr & ~MASK_4K);
/* continue to collect page fragments till aligned or SG ends */
while (!end_aligned && (i + 1 < data->dma_nents)) {
i++;
total_sz += sg_dma_len(&sg[i]);
last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
- end_aligned = !(last_addr & ~PAGE_MASK);
+ end_aligned = !(last_addr & ~MASK_4K);
}
- first_addr = first_addr & PAGE_MASK;
-
- for (page = first_addr; page < last_addr; page += PAGE_SIZE)
- page_vec->pages[cur_page++] = page;
+ /* handle the 1st page in the 1st DMA element */
+ if (cur_page == 0) {
+ page = first_addr & MASK_4K;
+ page_vec->pages[cur_page] = page;
+ cur_page++;
+ page += SIZE_4K;
+ } else
+ page = first_addr;
+
+ for (; page < last_addr; page += SIZE_4K) {
+ page_vec->pages[cur_page] = page;
+ cur_page++;
+ }
}
page_vec->data_size = total_sz;
@@ -269,8 +279,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
return cur_page;
}
-#define MASK_4K ((1UL << 12) - 1) /* 0xFFF */
-#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & MASK_4K) == 0)
+#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
/**
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -320,9 +329,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data)
struct scatterlist *sg = (struct scatterlist *)data->buf;
int i;
- for (i = 0; i < data->size; i++)
+ for (i = 0; i < data->dma_nents; i++)
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
- "off:%d sz:%d dma_len:%d\n",
+ "off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)sg_dma_address(&sg[i]),
sg[i].page, sg[i].offset,
sg[i].length,sg_dma_len(&sg[i]));
@@ -352,7 +361,7 @@ static void iser_page_vec_build(struct iser_data_buf *data,
page_vec->length = page_vec_len;
- if (page_vec_len * PAGE_SIZE < page_vec->data_size) {
+ if (page_vec_len * SIZE_4K < page_vec->data_size) {
iser_err("page_vec too short to hold this SG\n");
iser_data_buf_dump(data);
iser_dump_page_vec(page_vec);
@@ -370,15 +379,18 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
{
struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
struct iser_regd_buf *regd_buf;
int aligned_len;
int err;
+ int i;
+ struct scatterlist *sg;
regd_buf = &iser_ctask->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem);
- if (aligned_len != mem->size) {
+ if (aligned_len != mem->dma_nents) {
iser_err("rdma alignment violation %d/%d aligned\n",
aligned_len, mem->size);
iser_data_buf_dump(mem);
@@ -389,10 +401,38 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
mem = &iser_ctask->data_copy[cmd_dir];
}
- iser_page_vec_build(mem, ib_conn->page_vec);
- err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
- if (err)
- return err;
+ /* if there a single dma entry, FMR is not needed */
+ if (mem->dma_nents == 1) {
+ sg = (struct scatterlist *)mem->buf;
+
+ regd_buf->reg.lkey = device->mr->lkey;
+ regd_buf->reg.rkey = device->mr->rkey;
+ regd_buf->reg.len = sg_dma_len(&sg[0]);
+ regd_buf->reg.va = sg_dma_address(&sg[0]);
+ regd_buf->reg.is_fmr = 0;
+
+ iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
+ "va: 0x%08lX sz: %ld]\n",
+ (unsigned int)regd_buf->reg.lkey,
+ (unsigned int)regd_buf->reg.rkey,
+ (unsigned long)regd_buf->reg.va,
+ (unsigned long)regd_buf->reg.len);
+ } else { /* use FMR for multiple dma entries */
+ iser_page_vec_build(mem, ib_conn->page_vec);
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem);
+ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
+ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+ for (i=0 ; i<ib_conn->page_vec->length ; i++)
+ iser_err("page_vec[%d] = 0x%llx\n", i,
+ (unsigned long long) ib_conn->page_vec->pages[i]);
+ return err;
+ }
+ }
/* take a reference on this regd buf such that it will not be released *
* (eg in send dto completion) before we get the scsi response */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 72febf1f8ff..ecdca7fc1e4 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -88,8 +88,9 @@ static int iser_create_device_ib_res(struct iser_device *device)
iser_cq_tasklet_fn,
(unsigned long)device);
- device->mr = ib_get_dma_mr(device->pd,
- IB_ACCESS_LOCAL_WRITE);
+ device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
if (IS_ERR(device->mr))
goto dma_mr_err;
@@ -150,7 +151,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
}
ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
- params.page_shift = PAGE_SHIFT;
+ params.page_shift = SHIFT_4K;
/* when the first/last SG element are not start/end *
* page aligned, the map whould be of N+1 pages */
params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
@@ -604,8 +605,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
mem_reg->lkey = mem->fmr->lkey;
mem_reg->rkey = mem->fmr->rkey;
- mem_reg->len = page_vec->length * PAGE_SIZE;
+ mem_reg->len = page_vec->length * SIZE_4K;
mem_reg->va = io_addr;
+ mem_reg->is_fmr = 1;
mem_reg->mem_h = (void *)mem;
mem_reg->va += page_vec->offset;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8257d5a2c8f..44b9e5be668 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -96,6 +96,8 @@ static struct ib_client srp_client = {
.remove = srp_remove_one
};
+static struct ib_sa_client srp_sa_client;
+
static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
{
return (struct srp_target_port *) host->hostdata;
@@ -267,7 +269,8 @@ static int srp_lookup_path(struct srp_target_port *target)
init_completion(&target->done);
- target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev,
+ target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
+ target->srp_host->dev->dev,
target->srp_host->port,
&target->path,
IB_SA_PATH_REC_DGID |
@@ -330,7 +333,7 @@ static int srp_send_req(struct srp_target_port *target)
req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
/*
- * In the published SRP specification (draft rev. 16a), the
+ * In the published SRP specification (draft rev. 16a), the
* port identifier format is 8 bytes of ID extension followed
* by 8 bytes of GUID. Older drafts put the two halves in the
* opposite order, so that the GUID comes first.
@@ -799,13 +802,6 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
}
-static void srp_reconnect_work(void *target_ptr)
-{
- struct srp_target_port *target = target_ptr;
-
- srp_reconnect_target(target);
-}
-
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
struct srp_iu *iu;
@@ -858,7 +854,6 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
struct ib_wc wc;
- unsigned long flags;
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(cq, 1, &wc) > 0) {
@@ -866,10 +861,6 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
printk(KERN_ERR PFX "failed %s status %d\n",
wc.wr_id & SRP_OP_RECV ? "receive" : "send",
wc.status);
- spin_lock_irqsave(target->scsi_host->host_lock, flags);
- if (target->state == SRP_TARGET_LIVE)
- schedule_work(&target->work);
- spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
break;
}
@@ -1461,12 +1452,28 @@ static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
return sprintf(buf, "%d\n", target->zero_req_lim);
}
-static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
-static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
-static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
-static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
-static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
-static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
+static ssize_t show_local_ib_port(struct class_device *cdev, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+
+ return sprintf(buf, "%d\n", target->srp_host->port);
+}
+
+static ssize_t show_local_ib_device(struct class_device *cdev, char *buf)
+{
+ struct srp_target_port *target = host_to_target(class_to_shost(cdev));
+
+ return sprintf(buf, "%s\n", target->srp_host->dev->dev->name);
+}
+
+static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
+static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
+static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
+static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
+static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
+static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
+static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
+static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
static struct class_device_attribute *srp_host_attrs[] = {
&class_device_attr_id_ext,
@@ -1475,6 +1482,8 @@ static struct class_device_attribute *srp_host_attrs[] = {
&class_device_attr_pkey,
&class_device_attr_dgid,
&class_device_attr_zero_req_lim,
+ &class_device_attr_local_ib_port,
+ &class_device_attr_local_ib_device,
NULL
};
@@ -1705,8 +1714,6 @@ static ssize_t srp_create_target(struct class_device *class_dev,
target->scsi_host = target_host;
target->srp_host = host;
- INIT_WORK(&target->work, srp_reconnect_work, target);
-
INIT_LIST_HEAD(&target->free_reqs);
INIT_LIST_HEAD(&target->req_queue);
for (i = 0; i < SRP_SQ_SIZE; ++i) {
@@ -1895,7 +1902,7 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->fmr_pool))
srp_dev->fmr_pool = NULL;
- if (device->node_type == IB_NODE_SWITCH) {
+ if (device->node_type == RDMA_NODE_IB_SWITCH) {
s = 0;
e = 0;
} else {
@@ -1994,9 +2001,12 @@ static int __init srp_init_module(void)
return ret;
}
+ ib_sa_register_client(&srp_sa_client);
+
ret = ib_register_client(&srp_client);
if (ret) {
printk(KERN_ERR PFX "couldn't register IB client\n");
+ ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
return ret;
}
@@ -2007,6 +2017,7 @@ static int __init srp_init_module(void)
static void __exit srp_cleanup_module(void)
{
ib_unregister_client(&srp_client);
+ ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
}
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index c69d23bb255..efd51e01c06 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -45,8 +45,8 @@
#include <linux/pmu.h>
#include <asm/machdep.h>
-#include <asm/backlight.h>
#ifdef CONFIG_PPC_PMAC
+#include <asm/backlight.h>
#include <asm/pmac_feature.h>
#endif
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 82657bc86d1..d5621606754 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -139,7 +139,9 @@ static int macio_uevent(struct device *dev, char **envp, int num_envp,
{
struct macio_dev * macio_dev;
struct of_device * of;
- char *scratch, *compat, *compat2;
+ char *scratch;
+ const char *compat, *compat2;
+
int i = 0;
int length, cplen, cplen2, seen = 0;
@@ -173,7 +175,7 @@ static int macio_uevent(struct device *dev, char **envp, int num_envp,
* it's not really legal to split it out with commas. We split it
* up using a number of environment variables instead. */
- compat = (char *) get_property(of->node, "compatible", &cplen);
+ compat = get_property(of->node, "compatible", &cplen);
compat2 = compat;
cplen2= cplen;
while (compat && cplen > 0) {
@@ -454,7 +456,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
struct resource *parent_res)
{
struct macio_dev *dev;
- u32 *reg;
+ const u32 *reg;
if (np == NULL)
return NULL;
@@ -489,7 +491,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
#endif
MAX_NODE_NAME_SIZE, np->name);
} else {
- reg = (u32 *)get_property(np, "reg", NULL);
+ reg = get_property(np, "reg", NULL);
sprintf(dev->ofdev.dev.bus_id, "%1d.%08x:%.*s",
chip->lbus.index,
reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name);
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index cae24a13526..8566bdfdd4b 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -16,12 +16,12 @@ static ssize_t
compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct of_device *of;
- char *compat;
+ const char *compat;
int cplen;
int length = 0;
of = &to_macio_device (dev)->ofdev;
- compat = (char *) get_property(of->node, "compatible", &cplen);
+ compat = get_property(of->node, "compatible", &cplen);
if (!compat) {
*buf = '\0';
return 0;
@@ -42,12 +42,12 @@ static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
struct of_device *of;
- char *compat;
+ const char *compat;
int cplen;
int length;
of = &to_macio_device (dev)->ofdev;
- compat = (char *) get_property (of->node, "compatible", &cplen);
+ compat = get_property(of->node, "compatible", &cplen);
if (!compat) compat = "", cplen = 1;
length = sprintf (buf, "of:N%sT%s", of->node->name, of->node->type);
buf += length;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 00ef4689814..090e40fc501 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -454,7 +454,7 @@ EXPORT_SYMBOL(smu_present);
int __init smu_init (void)
{
struct device_node *np;
- u32 *data;
+ const u32 *data;
np = of_find_node_by_type(NULL, "smu");
if (np == NULL)
@@ -490,7 +490,7 @@ int __init smu_init (void)
printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n");
goto fail;
}
- data = (u32 *)get_property(smu->db_node, "reg", NULL);
+ data = get_property(smu->db_node, "reg", NULL);
if (data == NULL) {
of_node_put(smu->db_node);
smu->db_node = NULL;
@@ -511,7 +511,7 @@ int __init smu_init (void)
smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt");
if (smu->msg_node == NULL)
break;
- data = (u32 *)get_property(smu->msg_node, "reg", NULL);
+ data = get_property(smu->msg_node, "reg", NULL);
if (data == NULL) {
of_node_put(smu->msg_node);
smu->msg_node = NULL;
@@ -982,11 +982,11 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
/* Note: Only allowed to return error code in pointers (using ERR_PTR)
* when interruptible is 1
*/
-struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
- int interruptible)
+const struct smu_sdbp_header *__smu_get_sdb_partition(int id,
+ unsigned int *size, int interruptible)
{
char pname[32];
- struct smu_sdbp_header *part;
+ const struct smu_sdbp_header *part;
if (!smu)
return NULL;
@@ -1003,8 +1003,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
} else
mutex_lock(&smu_part_access);
- part = (struct smu_sdbp_header *)get_property(smu->of_node,
- pname, size);
+ part = get_property(smu->of_node, pname, size);
if (part == NULL) {
DPRINTK("trying to extract from SMU ...\n");
part = smu_create_sdb_partition(id);
@@ -1015,7 +1014,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
return part;
}
-struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size)
+const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size)
{
return __smu_get_sdb_partition(id, size, 0);
}
@@ -1094,7 +1093,7 @@ static ssize_t smu_write(struct file *file, const char __user *buf,
pp->mode = smu_file_events;
return 0;
} else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) {
- struct smu_sdbp_header *part;
+ const struct smu_sdbp_header *part;
part = __smu_get_sdb_partition(hdr.cmd, NULL, 1);
if (part == NULL)
return -EINVAL;
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 7f86478bdd3..a0f30d0853e 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -47,7 +47,7 @@ static u8 FAN_SPD_SET[2] = {0x30, 0x31};
static u8 default_limits_local[3] = {70, 50, 70}; /* local, sensor1, sensor2 */
static u8 default_limits_chip[3] = {80, 65, 80}; /* local, sensor1, sensor2 */
-static char *sensor_location[3] = {NULL, NULL, NULL};
+static const char *sensor_location[3] = {NULL, NULL, NULL};
static int limit_adjust = 0;
static int fan_speed = -1;
@@ -553,7 +553,7 @@ static int __init
thermostat_init(void)
{
struct device_node* np;
- u32 *prop;
+ const u32 *prop;
int i = 0, offset = 0;
np = of_find_node_by_name(NULL, "fan");
@@ -566,13 +566,13 @@ thermostat_init(void)
else
return -ENODEV;
- prop = (u32 *)get_property(np, "hwsensor-params-version", NULL);
+ prop = get_property(np, "hwsensor-params-version", NULL);
printk(KERN_INFO "adt746x: version %d (%ssupported)\n", *prop,
(*prop == 1)?"":"un");
if (*prop != 1)
return -ENODEV;
- prop = (u32 *)get_property(np, "reg", NULL);
+ prop = get_property(np, "reg", NULL);
if (!prop)
return -ENODEV;
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 20bf67244e2..d00c0c37a12 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -660,7 +660,7 @@ static int read_eeprom(int cpu, struct mpu_data *out)
{
struct device_node *np;
char nodename[64];
- u8 *data;
+ const u8 *data;
int len;
/* prom.c routine for finding a node by path is a bit brain dead
@@ -673,7 +673,7 @@ static int read_eeprom(int cpu, struct mpu_data *out)
printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n");
return -ENODEV;
}
- data = (u8 *)get_property(np, "cpuid", &len);
+ data = get_property(np, "cpuid", &len);
if (data == NULL) {
printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n");
of_node_put(np);
@@ -1336,7 +1336,7 @@ static int init_backside_state(struct backside_pid_state *state)
*/
u3 = of_find_node_by_path("/u3@0,f8000000");
if (u3 != NULL) {
- u32 *vers = (u32 *)get_property(u3, "device-rev", NULL);
+ const u32 *vers = get_property(u3, "device-rev", NULL);
if (vers)
if (((*vers) & 0x3f) < 0x34)
u3h = 0;
@@ -2111,8 +2111,8 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
while ((np = of_get_next_child(fcu_node, np)) != NULL) {
int type = -1;
- char *loc;
- u32 *reg;
+ const char *loc;
+ const u32 *reg;
DBG(" control: %s, type: %s\n", np->name, np->type);
@@ -2128,8 +2128,8 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
continue;
/* Lookup for a matching location */
- loc = (char *)get_property(np, "location", NULL);
- reg = (u32 *)get_property(np, "reg", NULL);
+ loc = get_property(np, "location", NULL);
+ reg = get_property(np, "reg", NULL);
if (loc == NULL || reg == NULL)
continue;
DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg);
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index c7d1c290cb0..738faab1b22 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -484,14 +484,14 @@ struct apple_thermal_info {
static int __init
g4fan_init( void )
{
- struct apple_thermal_info *info;
+ const struct apple_thermal_info *info;
struct device_node *np;
init_MUTEX( &x.lock );
if( !(np=of_find_node_by_name(NULL, "power-mgt")) )
return -ENODEV;
- info = (struct apple_thermal_info*)get_property(np, "thermal-info", NULL);
+ info = get_property(np, "thermal-info", NULL);
of_node_put(np);
if( !info || !machine_is_compatible("PowerMac3,6") )
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 69d5452fd22..7512d1c1520 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -123,7 +123,7 @@ int __init find_via_cuda(void)
{
struct adb_request req;
phys_addr_t taddr;
- u32 *reg;
+ const u32 *reg;
int err;
if (vias != 0)
@@ -132,7 +132,7 @@ int __init find_via_cuda(void)
if (vias == 0)
return 0;
- reg = (u32 *)get_property(vias, "reg", NULL);
+ reg = get_property(vias, "reg", NULL);
if (reg == NULL) {
printk(KERN_ERR "via-cuda: No \"reg\" property !\n");
goto fail;
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index d3f8d75bcbb..a82f313d9dc 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -18,17 +18,48 @@
static struct backlight_properties pmu_backlight_data;
static spinlock_t pmu_backlight_lock;
static int sleeping;
+static u8 bl_curve[FB_BACKLIGHT_LEVELS];
-static int pmu_backlight_get_level_brightness(struct fb_info *info,
- int level)
+static void pmu_backlight_init_curve(u8 off, u8 min, u8 max)
+{
+ unsigned int i, flat, count, range = (max - min);
+
+ bl_curve[0] = off;
+
+ for (flat = 1; flat < (FB_BACKLIGHT_LEVELS / 16); ++flat)
+ bl_curve[flat] = min;
+
+ count = FB_BACKLIGHT_LEVELS * 15 / 16;
+ for (i = 0; i < count; ++i)
+ bl_curve[flat + i] = min + (range * (i + 1) / count);
+}
+
+static int pmu_backlight_curve_lookup(int value)
+{
+ int level = (FB_BACKLIGHT_LEVELS - 1);
+ int i, max = 0;
+
+ /* Look for biggest value */
+ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++)
+ max = max((int)bl_curve[i], max);
+
+ /* Look for nearest value */
+ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) {
+ int diff = abs(bl_curve[i] - value);
+ if (diff < max) {
+ max = diff;
+ level = i;
+ }
+ }
+ return level;
+}
+
+static int pmu_backlight_get_level_brightness(int level)
{
int pmulevel;
/* Get and convert the value */
- mutex_lock(&info->bl_mutex);
- pmulevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_PMU_LEVEL;
- mutex_unlock(&info->bl_mutex);
-
+ pmulevel = bl_curve[level] * FB_BACKLIGHT_MAX / MAX_PMU_LEVEL;
if (pmulevel < 0)
pmulevel = 0;
else if (pmulevel > MAX_PMU_LEVEL)
@@ -39,7 +70,6 @@ static int pmu_backlight_get_level_brightness(struct fb_info *info,
static int pmu_backlight_update_status(struct backlight_device *bd)
{
- struct fb_info *info = class_get_devdata(&bd->class_dev);
struct adb_request req;
unsigned long flags;
int level = bd->props->brightness;
@@ -55,7 +85,7 @@ static int pmu_backlight_update_status(struct backlight_device *bd)
level = 0;
if (level > 0) {
- int pmulevel = pmu_backlight_get_level_brightness(info, level);
+ int pmulevel = pmu_backlight_get_level_brightness(level);
pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmulevel);
pmu_wait_complete(&req);
@@ -88,35 +118,19 @@ static struct backlight_properties pmu_backlight_data = {
};
#ifdef CONFIG_PM
-static int pmu_backlight_sleep_call(struct pmu_sleep_notifier *self, int when)
+void pmu_backlight_set_sleep(int sleep)
{
unsigned long flags;
spin_lock_irqsave(&pmu_backlight_lock, flags);
-
- switch (when) {
- case PBOOK_SLEEP_REQUEST:
- sleeping = 1;
- break;
- case PBOOK_WAKE:
- sleeping = 0;
- break;
- }
-
+ sleeping = sleep;
spin_unlock_irqrestore(&pmu_backlight_lock, flags);
-
- return PBOOK_SLEEP_OK;
}
-
-static struct pmu_sleep_notifier pmu_backlight_sleep_notif = {
- .notifier_call = pmu_backlight_sleep_call,
-};
-#endif
+#endif /* CONFIG_PM */
void __init pmu_backlight_init()
{
struct backlight_device *bd;
- struct fb_info *info;
char name[10];
int level, autosave;
@@ -131,27 +145,14 @@ void __init pmu_backlight_init()
!machine_is_compatible("PowerBook1,1"))
return;
- /* Actually, this is a hack, but I don't know of a better way
- * to get the first framebuffer device.
- */
- info = registered_fb[0];
- if (!info) {
- printk("pmubl: No framebuffer found\n");
- goto error;
- }
-
- snprintf(name, sizeof(name), "pmubl%d", info->node);
+ snprintf(name, sizeof(name), "pmubl");
- bd = backlight_device_register(name, info, &pmu_backlight_data);
+ bd = backlight_device_register(name, NULL, &pmu_backlight_data);
if (IS_ERR(bd)) {
printk("pmubl: Backlight registration failed\n");
goto error;
}
-
- mutex_lock(&info->bl_mutex);
- info->bl_dev = bd;
- fb_bl_default_curve(info, 0x7F, 0x46, 0x0E);
- mutex_unlock(&info->bl_mutex);
+ pmu_backlight_init_curve(0x7F, 0x46, 0x0E);
level = pmu_backlight_data.max_brightness;
@@ -161,28 +162,22 @@ void __init pmu_backlight_init()
pmu_request(&req, NULL, 2, 0xd9, 0);
pmu_wait_complete(&req);
- mutex_lock(&info->bl_mutex);
- level = pmac_backlight_curve_lookup(info,
+ level = pmu_backlight_curve_lookup(
(req.reply[0] >> 4) *
pmu_backlight_data.max_brightness / 15);
- mutex_unlock(&info->bl_mutex);
}
- up(&bd->sem);
+ down(&bd->sem);
bd->props->brightness = level;
bd->props->power = FB_BLANK_UNBLANK;
bd->props->update_status(bd);
- down(&bd->sem);
+ up(&bd->sem);
mutex_lock(&pmac_backlight_mutex);
if (!pmac_backlight)
pmac_backlight = bd;
mutex_unlock(&pmac_backlight_mutex);
-#ifdef CONFIG_PM
- pmu_register_sleep_notifier(&pmu_backlight_sleep_notif);
-#endif
-
printk("pmubl: Backlight initialized (%s)\n", name);
return;
diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c
index 5189d5454b1..179af10105d 100644
--- a/drivers/macintosh/via-pmu-led.c
+++ b/drivers/macintosh/via-pmu-led.c
@@ -120,7 +120,7 @@ static int __init via_pmu_led_init(void)
dt = of_find_node_by_path("/");
if (dt == NULL)
return -ENODEV;
- model = (const char *)get_property(dt, "model", NULL);
+ model = get_property(dt, "model", NULL);
if (model == NULL)
return -ENODEV;
if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 &&
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index ea386801e21..dda03985dcf 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -280,7 +280,7 @@ static char *pbook_type[] = {
int __init find_via_pmu(void)
{
u64 taddr;
- u32 *reg;
+ const u32 *reg;
if (via != 0)
return 1;
@@ -288,7 +288,7 @@ int __init find_via_pmu(void)
if (vias == NULL)
return 0;
- reg = (u32 *)get_property(vias, "reg", NULL);
+ reg = get_property(vias, "reg", NULL);
if (reg == NULL) {
printk(KERN_ERR "via-pmu: No \"reg\" property !\n");
goto fail;
@@ -330,7 +330,7 @@ int __init find_via_pmu(void)
gpiop = of_find_node_by_name(NULL, "gpio");
if (gpiop) {
- reg = (u32 *)get_property(gpiop, "reg", NULL);
+ reg = get_property(gpiop, "reg", NULL);
if (reg)
gaddr = of_translate_address(gpiop, reg);
if (gaddr != OF_BAD_ADDR)
@@ -479,9 +479,9 @@ static int __init via_pmu_dev_init(void)
pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART;
} else {
struct device_node* prim = find_devices("power-mgt");
- u32 *prim_info = NULL;
+ const u32 *prim_info = NULL;
if (prim)
- prim_info = (u32 *)get_property(prim, "prim-info", NULL);
+ prim_info = get_property(prim, "prim-info", NULL);
if (prim_info) {
/* Other stuffs here yet unknown */
pmu_battery_count = (prim_info[6] >> 16) & 0xff;
@@ -1995,6 +1995,8 @@ restore_via_state(void)
out_8(&via[IER], IER_SET | SR_INT | CB1_INT);
}
+extern void pmu_backlight_set_sleep(int sleep);
+
static int
pmac_suspend_devices(void)
{
@@ -2032,6 +2034,11 @@ pmac_suspend_devices(void)
return -EBUSY;
}
+#ifdef CONFIG_PMAC_BACKLIGHT
+ /* Tell backlight code not to muck around with the chip anymore */
+ pmu_backlight_set_sleep(1);
+#endif
+
/* Call platform functions marked "on sleep" */
pmac_pfunc_i2c_suspend();
pmac_pfunc_base_suspend();
@@ -2090,6 +2097,11 @@ pmac_wakeup_devices(void)
{
mdelay(100);
+#ifdef CONFIG_PMAC_BACKLIGHT
+ /* Tell backlight code it can use the chip again */
+ pmu_backlight_set_sleep(0);
+#endif
+
/* Power back up system devices (including the PIC) */
device_power_up();
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index f1df6efcbe6..2ff546e4c92 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -396,7 +396,7 @@ static void wf_smu_sys_fans_tick(struct wf_smu_sys_fans_state *st)
static void wf_smu_create_cpu_fans(void)
{
struct wf_cpu_pid_param pid_param;
- struct smu_sdbp_header *hdr;
+ const struct smu_sdbp_header *hdr;
struct smu_sdbp_cpupiddata *piddata;
struct smu_sdbp_fvt *fvt;
s32 tmax, tdelta, maxpow, powadj;
@@ -702,7 +702,7 @@ static struct notifier_block wf_smu_events = {
static int wf_init_pm(void)
{
- struct smu_sdbp_header *hdr;
+ const struct smu_sdbp_header *hdr;
hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL);
if (hdr != 0) {
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index 0d6372e96d3..59e9ffe37c3 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -144,7 +144,7 @@ static struct wf_smu_slots_fans_state *wf_smu_slots_fans;
static void wf_smu_create_cpu_fans(void)
{
struct wf_cpu_pid_param pid_param;
- struct smu_sdbp_header *hdr;
+ const struct smu_sdbp_header *hdr;
struct smu_sdbp_cpupiddata *piddata;
struct smu_sdbp_fvt *fvt;
s32 tmax, tdelta, maxpow, powadj;
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index a9e88edc0c7..bff1f372f18 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -159,14 +159,15 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
int pwm_fan)
{
struct smu_fan_control *fct;
- s32 *v; u32 *reg;
- char *l;
+ const s32 *v;
+ const u32 *reg;
+ const char *l;
fct = kmalloc(sizeof(struct smu_fan_control), GFP_KERNEL);
if (fct == NULL)
return NULL;
fct->ctrl.ops = &smu_fan_ops;
- l = (char *)get_property(node, "location", NULL);
+ l = get_property(node, "location", NULL);
if (l == NULL)
goto fail;
@@ -223,17 +224,17 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
goto fail;
/* Get min & max values*/
- v = (s32 *)get_property(node, "min-value", NULL);
+ v = get_property(node, "min-value", NULL);
if (v == NULL)
goto fail;
fct->min = *v;
- v = (s32 *)get_property(node, "max-value", NULL);
+ v = get_property(node, "max-value", NULL);
if (v == NULL)
goto fail;
fct->max = *v;
/* Get "reg" value */
- reg = (u32 *)get_property(node, "reg", NULL);
+ reg = get_property(node, "reg", NULL);
if (reg == NULL)
goto fail;
fct->reg = *reg;
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index e295a07a1eb..aceb61d9fbc 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -233,15 +233,15 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
{
struct wf_sat *sat;
struct wf_sat_sensor *sens;
- u32 *reg;
- char *loc, *type;
+ const u32 *reg;
+ const char *loc, *type;
u8 addr, chip, core;
struct device_node *child;
int shift, cpu, index;
char *name;
int vsens[2], isens[2];
- reg = (u32 *) get_property(dev, "reg", NULL);
+ reg = get_property(dev, "reg", NULL);
if (reg == NULL)
return;
addr = *reg;
@@ -268,7 +268,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
isens[0] = isens[1] = -1;
child = NULL;
while ((child = of_get_next_child(dev, child)) != NULL) {
- reg = (u32 *) get_property(child, "reg", NULL);
+ reg = get_property(child, "reg", NULL);
type = get_property(child, "device_type", NULL);
loc = get_property(child, "location", NULL);
if (reg == NULL || loc == NULL)
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index bed25dcf8a1..defe9922ebd 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -198,14 +198,14 @@ static struct wf_sensor_ops smu_slotspow_ops = {
static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
{
struct smu_ad_sensor *ads;
- char *c, *l;
- u32 *v;
+ const char *c, *l;
+ const u32 *v;
ads = kmalloc(sizeof(struct smu_ad_sensor), GFP_KERNEL);
if (ads == NULL)
return NULL;
- c = (char *)get_property(node, "device_type", NULL);
- l = (char *)get_property(node, "location", NULL);
+ c = get_property(node, "device_type", NULL);
+ l = get_property(node, "location", NULL);
if (c == NULL || l == NULL)
goto fail;
@@ -255,7 +255,7 @@ static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
} else
goto fail;
- v = (u32 *)get_property(node, "reg", NULL);
+ v = get_property(node, "reg", NULL);
if (v == NULL)
goto fail;
ads->reg = *v;
@@ -382,7 +382,7 @@ smu_cpu_power_create(struct wf_sensor *volts, struct wf_sensor *amps)
static void smu_fetch_param_partitions(void)
{
- struct smu_sdbp_header *hdr;
+ const struct smu_sdbp_header *hdr;
/* Get CPU voltage/current/power calibration data */
hdr = smu_get_sdb_partition(SMU_SDB_CPUVCP_ID, NULL);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6022ed12a79..bdbd34993a8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -5,6 +5,7 @@
* This file is released under the GPL.
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -78,11 +79,13 @@ struct crypt_config {
*/
struct crypt_iv_operations *iv_gen_ops;
char *iv_mode;
- void *iv_gen_private;
+ struct crypto_cipher *iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
- struct crypto_tfm *tfm;
+ char cipher[CRYPTO_MAX_ALG_NAME];
+ char chainmode[CRYPTO_MAX_ALG_NAME];
+ struct crypto_blkcipher *tfm;
unsigned int key_size;
u8 key[0];
};
@@ -96,12 +99,12 @@ static kmem_cache_t *_crypt_io_pool;
/*
* Different IV generation algorithms:
*
- * plain: the initial vector is the 32-bit low-endian version of the sector
+ * plain: the initial vector is the 32-bit little-endian version of the sector
* number, padded with zeros if neccessary.
*
- * ess_iv: "encrypted sector|salt initial vector", the sector number is
- * encrypted with the bulk cipher using a salt as key. The salt
- * should be derived from the bulk cipher's key via hashing.
+ * essiv: "encrypted sector|salt initial vector", the sector number is
+ * encrypted with the bulk cipher using a salt as key. The salt
+ * should be derived from the bulk cipher's key via hashing.
*
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
@@ -118,11 +121,13 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- struct crypto_tfm *essiv_tfm;
- struct crypto_tfm *hash_tfm;
+ struct crypto_cipher *essiv_tfm;
+ struct crypto_hash *hash_tfm;
+ struct hash_desc desc;
struct scatterlist sg;
unsigned int saltsize;
u8 *salt;
+ int err;
if (opts == NULL) {
ti->error = "Digest algorithm missing for ESSIV mode";
@@ -130,76 +135,70 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
}
/* Hash the cipher key with the given hash algorithm */
- hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP);
- if (hash_tfm == NULL) {
+ hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hash_tfm)) {
ti->error = "Error initializing ESSIV hash";
- return -EINVAL;
+ return PTR_ERR(hash_tfm);
}
- if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) {
- ti->error = "Expected digest algorithm for ESSIV hash";
- crypto_free_tfm(hash_tfm);
- return -EINVAL;
- }
-
- saltsize = crypto_tfm_alg_digestsize(hash_tfm);
+ saltsize = crypto_hash_digestsize(hash_tfm);
salt = kmalloc(saltsize, GFP_KERNEL);
if (salt == NULL) {
ti->error = "Error kmallocing salt storage in ESSIV";
- crypto_free_tfm(hash_tfm);
+ crypto_free_hash(hash_tfm);
return -ENOMEM;
}
sg_set_buf(&sg, cc->key, cc->key_size);
- crypto_digest_digest(hash_tfm, &sg, 1, salt);
- crypto_free_tfm(hash_tfm);
+ desc.tfm = hash_tfm;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
+ crypto_free_hash(hash_tfm);
+
+ if (err) {
+ ti->error = "Error calculating hash in ESSIV";
+ return err;
+ }
/* Setup the essiv_tfm with the given salt */
- essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm),
- CRYPTO_TFM_MODE_ECB |
- CRYPTO_TFM_REQ_MAY_SLEEP);
- if (essiv_tfm == NULL) {
+ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(essiv_tfm)) {
ti->error = "Error allocating crypto tfm for ESSIV";
kfree(salt);
- return -EINVAL;
+ return PTR_ERR(essiv_tfm);
}
- if (crypto_tfm_alg_blocksize(essiv_tfm)
- != crypto_tfm_alg_ivsize(cc->tfm)) {
+ if (crypto_cipher_blocksize(essiv_tfm) !=
+ crypto_blkcipher_ivsize(cc->tfm)) {
ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher";
- crypto_free_tfm(essiv_tfm);
+ crypto_free_cipher(essiv_tfm);
kfree(salt);
return -EINVAL;
}
- if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) {
+ err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
+ if (err) {
ti->error = "Failed to set key for ESSIV cipher";
- crypto_free_tfm(essiv_tfm);
+ crypto_free_cipher(essiv_tfm);
kfree(salt);
- return -EINVAL;
+ return err;
}
kfree(salt);
- cc->iv_gen_private = (void *)essiv_tfm;
+ cc->iv_gen_private = essiv_tfm;
return 0;
}
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
- crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private);
+ crypto_free_cipher(cc->iv_gen_private);
cc->iv_gen_private = NULL;
}
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
- struct scatterlist sg;
-
memset(iv, 0, cc->iv_size);
*(u64 *)iv = cpu_to_le64(sector);
-
- sg_set_buf(&sg, iv, cc->iv_size);
- crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private,
- &sg, &sg, cc->iv_size);
-
+ crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
return 0;
}
@@ -220,6 +219,11 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
int write, sector_t sector)
{
u8 iv[cc->iv_size];
+ struct blkcipher_desc desc = {
+ .tfm = cc->tfm,
+ .info = iv,
+ .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
+ };
int r;
if (cc->iv_gen_ops) {
@@ -228,14 +232,14 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
return r;
if (write)
- r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv);
+ r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
else
- r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv);
+ r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
} else {
if (write)
- r = crypto_cipher_encrypt(cc->tfm, out, in, length);
+ r = crypto_blkcipher_encrypt(&desc, out, in, length);
else
- r = crypto_cipher_decrypt(cc->tfm, out, in, length);
+ r = crypto_blkcipher_decrypt(&desc, out, in, length);
}
return r;
@@ -510,13 +514,12 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
- struct crypto_tfm *tfm;
+ struct crypto_blkcipher *tfm;
char *tmp;
char *cipher;
char *chainmode;
char *ivmode;
char *ivopts;
- unsigned int crypto_flags;
unsigned int key_size;
unsigned long long tmpll;
@@ -556,31 +559,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ivmode = "plain";
}
- /* Choose crypto_flags according to chainmode */
- if (strcmp(chainmode, "cbc") == 0)
- crypto_flags = CRYPTO_TFM_MODE_CBC;
- else if (strcmp(chainmode, "ecb") == 0)
- crypto_flags = CRYPTO_TFM_MODE_ECB;
- else {
- ti->error = "Unknown chaining mode";
+ if (strcmp(chainmode, "ecb") && !ivmode) {
+ ti->error = "This chaining mode requires an IV mechanism";
goto bad1;
}
- if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) {
- ti->error = "This chaining mode requires an IV mechanism";
+ if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode,
+ cipher) >= CRYPTO_MAX_ALG_NAME) {
+ ti->error = "Chain mode + cipher name is too long";
goto bad1;
}
- tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP);
- if (!tfm) {
+ tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
ti->error = "Error allocating crypto tfm";
goto bad1;
}
- if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) {
- ti->error = "Expected cipher algorithm";
- goto bad2;
- }
+ strcpy(cc->cipher, cipher);
+ strcpy(cc->chainmode, chainmode);
cc->tfm = tfm;
/*
@@ -603,12 +600,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
goto bad2;
- if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv)
+ cc->iv_size = crypto_blkcipher_ivsize(tfm);
+ if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
- cc->iv_size = max(crypto_tfm_alg_ivsize(tfm),
+ cc->iv_size = max(cc->iv_size,
(unsigned int)(sizeof(u64) / sizeof(u8)));
else {
- cc->iv_size = 0;
if (cc->iv_gen_ops) {
DMWARN("Selected cipher does not support IVs");
if (cc->iv_gen_ops->dtr)
@@ -629,7 +626,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad4;
}
- if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
+ if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
ti->error = "Error setting key";
goto bad5;
}
@@ -675,7 +672,7 @@ bad3:
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
bad2:
- crypto_free_tfm(tfm);
+ crypto_free_blkcipher(tfm);
bad1:
/* Must zero key material before freeing */
memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
@@ -693,7 +690,7 @@ static void crypt_dtr(struct dm_target *ti)
kfree(cc->iv_mode);
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
- crypto_free_tfm(cc->tfm);
+ crypto_free_blkcipher(cc->tfm);
dm_put_device(ti, cc->dev);
/* Must zero key material before freeing */
@@ -858,18 +855,9 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
- cipher = crypto_tfm_alg_name(cc->tfm);
+ cipher = crypto_blkcipher_name(cc->tfm);
- switch(cc->tfm->crt_cipher.cit_mode) {
- case CRYPTO_TFM_MODE_CBC:
- chainmode = "cbc";
- break;
- case CRYPTO_TFM_MODE_ECB:
- chainmode = "ecb";
- break;
- default:
- BUG();
- }
+ chainmode = cc->chainmode;
if (cc->iv_mode)
DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 87bfe9e7d8c..3b4d69c0562 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -930,10 +930,13 @@ static void status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
conf->working_disks);
- for (i = 0; i < conf->raid_disks; i++)
+ rcu_read_lock();
+ for (i = 0; i < conf->raid_disks; i++) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
seq_printf(seq, "%s",
- conf->mirrors[i].rdev &&
- test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
+ rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
+ }
+ rcu_read_unlock();
seq_printf(seq, "]");
}
@@ -975,7 +978,6 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
static void print_conf(conf_t *conf)
{
int i;
- mirror_info_t *tmp;
printk("RAID1 conf printout:\n");
if (!conf) {
@@ -985,14 +987,17 @@ static void print_conf(conf_t *conf)
printk(" --- wd:%d rd:%d\n", conf->working_disks,
conf->raid_disks);
+ rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
- tmp = conf->mirrors + i;
- if (tmp->rdev)
+ mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (rdev)
printk(" disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
+ rcu_read_unlock();
}
static void close_sync(conf_t *conf)
@@ -1008,20 +1013,20 @@ static int raid1_spare_active(mddev_t *mddev)
{
int i;
conf_t *conf = mddev->private;
- mirror_info_t *tmp;
/*
* Find all failed disks within the RAID1 configuration
- * and mark them readable
+ * and mark them readable.
+ * Called under mddev lock, so rcu protection not needed.
*/
for (i = 0; i < conf->raid_disks; i++) {
- tmp = conf->mirrors + i;
- if (tmp->rdev
- && !test_bit(Faulty, &tmp->rdev->flags)
- && !test_bit(In_sync, &tmp->rdev->flags)) {
+ mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)) {
conf->working_disks++;
mddev->degraded--;
- set_bit(In_sync, &tmp->rdev->flags);
+ set_bit(In_sync, &rdev->flags);
}
}
@@ -1237,7 +1242,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
/* ouch - failed to read all of that.
* Try some synchronous reads of other devices to get
* good data, much like with normal read errors. Only
- * read into the pages we already have so they we don't
+ * read into the pages we already have so we don't
* need to re-issue the read request.
* We don't need to freeze the array, because being in an
* active sync request, there is no normal IO, and
@@ -1257,6 +1262,10 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
s = PAGE_SIZE >> 9;
do {
if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
+ /* No rcu protection needed here devices
+ * can only be removed when no resync is
+ * active, and resync is currently active
+ */
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev->bdev,
sect + rdev->data_offset,
@@ -1463,6 +1472,11 @@ static void raid1d(mddev_t *mddev)
s = PAGE_SIZE >> 9;
do {
+ /* Note: no rcu protection needed here
+ * as this is synchronous in the raid1d thread
+ * which is the thread that might remove
+ * a device. If raid1d ever becomes multi-threaded....
+ */
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
@@ -1486,7 +1500,6 @@ static void raid1d(mddev_t *mddev)
d = conf->raid_disks;
d--;
rdev = conf->mirrors[d].rdev;
- atomic_add(s, &rdev->corrected_errors);
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
if (sync_page_io(rdev->bdev,
@@ -1509,9 +1522,11 @@ static void raid1d(mddev_t *mddev)
s<<9, conf->tmppage, READ) == 0)
/* Well, this device is dead */
md_error(mddev, rdev);
- else
+ else {
+ atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO "raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
mdname(mddev), s, (unsigned long long)(sect + rdev->data_offset), bdevname(rdev->bdev, b));
+ }
}
}
} else {
@@ -1787,19 +1802,17 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
for (i=0; i<conf->raid_disks; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
- md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors);
+ md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
}
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
- md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev,
- nr_sectors);
+ md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
-
return nr_sectors;
}
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index ef52e6da01e..ed4aa4e7912 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -53,7 +53,7 @@ config VIDEO_V4L1_COMPAT
If you are unsure as to whether this is required, answer Y.
config VIDEO_V4L2
- tristate
+ bool
default y
source "drivers/media/video/Kconfig"
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 8393d472d3b..7e0cedc557d 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1190,6 +1190,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
}
return err;
}
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
case VIDIOCGMBUF:
{
struct video_mbuf *mbuf = arg;
@@ -1218,6 +1219,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
mutex_unlock(&q->lock);
return 0;
}
+#endif
default:
return v4l_compat_translate_ioctl(inode,file,cmd,arg,
saa7146_video_do_ioctl);
diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig
index d7f1fd5b7b0..49a06fc54c5 100644
--- a/drivers/media/dvb/b2c2/Kconfig
+++ b/drivers/media/dvb/b2c2/Kconfig
@@ -1,6 +1,7 @@
config DVB_B2C2_FLEXCOP
tristate "Technisat/B2C2 FlexCopII(b) and FlexCopIII adapters"
depends on DVB_CORE && I2C
+ select DVB_PLL
select DVB_STV0299
select DVB_MT352
select DVB_MT312
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index f394002118f..7d0ee1ab290 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -1,6 +1,7 @@
config DVB_BT8XX
tristate "BT8xx based PCI cards"
depends on DVB_CORE && PCI && I2C && VIDEO_BT848
+ select DVB_PLL
select DVB_MT352
select DVB_SP887X
select DVB_NXT6000
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 3bc6722a644..75824b77198 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -2,6 +2,7 @@ config DVB_USB
tristate "Support for various USB DVB devices"
depends on DVB_CORE && USB && I2C
select FW_LOADER
+ select DVB_PLL
help
By enabling this you will be able to choose the various supported
USB1.1 and USB2.0 DVB devices.
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 0ef361f0309..db978555b1e 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -6,43 +6,43 @@ comment "DVB-S (satellite) frontends"
config DVB_STV0299
tristate "ST STV0299 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_CX24110
tristate "Conexant CX24110 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_CX24123
tristate "Conexant CX24123 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_TDA8083
tristate "Philips TDA8083 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_MT312
tristate "Zarlink VP310/MT312 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_VES1X93
tristate "VLSI VES1893 or VES1993 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_S5H1420
tristate "Samsung S5H1420 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-S tuner module. Say Y when you want to support this frontend.
@@ -51,7 +51,7 @@ comment "DVB-T (terrestrial) frontends"
config DVB_SP8870
tristate "Spase sp8870 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select FW_LOADER
help
A DVB-T tuner module. Say Y when you want to support this frontend.
@@ -63,7 +63,7 @@ config DVB_SP8870
config DVB_SP887X
tristate "Spase sp887x based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select FW_LOADER
help
A DVB-T tuner module. Say Y when you want to support this frontend.
@@ -75,25 +75,25 @@ config DVB_SP887X
config DVB_CX22700
tristate "Conexant CX22700 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_CX22702
tristate "Conexant cx22702 demodulator (OFDM)"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_L64781
tristate "LSI L64781"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_TDA1004X
tristate "Philips TDA10045H/TDA10046H based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select FW_LOADER
help
A DVB-T tuner module. Say Y when you want to support this frontend.
@@ -106,32 +106,32 @@ config DVB_TDA1004X
config DVB_NXT6000
tristate "NxtWave Communications NXT6000 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_MT352
tristate "Zarlink MT352 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_ZL10353
tristate "Zarlink ZL10353 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_DIB3000MB
tristate "DiBcom 3000M-B"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-T tuner module. Designed for mobile usage. Say Y when you want
to support this frontend.
config DVB_DIB3000MC
tristate "DiBcom 3000P/M-C"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-T tuner module. Designed for mobile usage. Say Y when you want
to support this frontend.
@@ -141,19 +141,19 @@ comment "DVB-C (cable) frontends"
config DVB_VES1820
tristate "VLSI VES1820 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-C tuner module. Say Y when you want to support this frontend.
config DVB_TDA10021
tristate "Philips TDA10021 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-C tuner module. Say Y when you want to support this frontend.
config DVB_STV0297
tristate "ST STV0297 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
A DVB-C tuner module. Say Y when you want to support this frontend.
@@ -162,7 +162,7 @@ comment "ATSC (North American/Korean Terrestrial/Cable DTV) frontends"
config DVB_NXT200X
tristate "NxtWave Communications NXT2002/NXT2004 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select FW_LOADER
help
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
@@ -176,7 +176,7 @@ config DVB_NXT200X
config DVB_OR51211
tristate "Oren OR51211 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select FW_LOADER
help
An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
@@ -188,7 +188,7 @@ config DVB_OR51211
config DVB_OR51132
tristate "Oren OR51132 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select FW_LOADER
help
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
@@ -203,7 +203,7 @@ config DVB_OR51132
config DVB_BCM3510
tristate "Broadcom BCM3510"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select FW_LOADER
help
An ATSC 8VSB/16VSB and QAM64/256 tuner module. Say Y when you want to
@@ -211,7 +211,7 @@ config DVB_BCM3510
config DVB_LGDT330X
tristate "LG Electronics LGDT3302/LGDT3303 based"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
@@ -220,15 +220,19 @@ config DVB_LGDT330X
comment "Miscellaneous devices"
depends on DVB_CORE
+config DVB_PLL
+ tristate
+ depends on DVB_CORE && I2C
+
config DVB_LNBP21
tristate "LNBP21 SEC controller"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
An SEC control chip.
config DVB_ISL6421
tristate "ISL6421 SEC controller"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
help
An SEC control chip.
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 5222245c7f5..0e4880b6db1 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -4,7 +4,7 @@
EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/
-obj-$(CONFIG_DVB_CORE) += dvb-pll.o
+obj-$(CONFIG_DVB_PLL) += dvb-pll.o
obj-$(CONFIG_DVB_STV0299) += stv0299.o
obj-$(CONFIG_DVB_SP8870) += sp8870.o
obj-$(CONFIG_DVB_CX22700) += cx22700.o
diff --git a/drivers/media/dvb/pluto2/Kconfig b/drivers/media/dvb/pluto2/Kconfig
index 7d8e6e87bdb..9b84b1bdc31 100644
--- a/drivers/media/dvb/pluto2/Kconfig
+++ b/drivers/media/dvb/pluto2/Kconfig
@@ -2,6 +2,7 @@ config DVB_PLUTO2
tristate "Pluto2 cards"
depends on DVB_CORE && PCI && I2C
select I2C_ALGOBIT
+ select DVB_PLL
select DVB_TDA1004X
help
Support for PCI cards based on the Pluto2 FPGA like the Satelco
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 987881fa988..5fb097595cf 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -3,6 +3,7 @@ config DVB_AV7110
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
select FW_LOADER
select VIDEO_SAA7146_VV
+ select DVB_PLL
select DVB_VES1820
select DVB_VES1X93
select DVB_STV0299
@@ -61,6 +62,7 @@ config DVB_BUDGET
tristate "Budget cards"
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
select VIDEO_SAA7146
+ select DVB_PLL
select DVB_STV0299
select DVB_VES1X93
select DVB_VES1820
@@ -83,6 +85,7 @@ config DVB_BUDGET_CI
tristate "Budget cards with onboard CI connector"
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
select VIDEO_SAA7146
+ select DVB_PLL
select DVB_STV0297
select DVB_STV0299
select DVB_TDA1004X
@@ -104,6 +107,7 @@ config DVB_BUDGET_AV
tristate "Budget cards with analog video inputs"
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
select VIDEO_SAA7146_VV
+ select DVB_PLL
select DVB_STV0299
select DVB_TDA1004X
select DVB_TDA10021
@@ -122,6 +126,7 @@ config DVB_BUDGET_PATCH
tristate "AV7110 cards with Budget Patch"
depends on DVB_CORE && DVB_BUDGET && VIDEO_V4L1
select DVB_AV7110
+ select DVB_PLL
select DVB_STV0299
select DVB_VES1X93
select DVB_TDA8083
diff --git a/drivers/media/dvb/ttusb-budget/Kconfig b/drivers/media/dvb/ttusb-budget/Kconfig
index 92c7cdcf898..46a6a60d2ab 100644
--- a/drivers/media/dvb/ttusb-budget/Kconfig
+++ b/drivers/media/dvb/ttusb-budget/Kconfig
@@ -1,6 +1,7 @@
config DVB_TTUSB_BUDGET
tristate "Technotrend/Hauppauge Nova-USB devices"
- depends on DVB_CORE && USB
+ depends on DVB_CORE && USB && I2C
+ select DVB_PLL
select DVB_CX22700
select DVB_TDA1004X
select DVB_VES1820
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 732bf1e7c32..94d078b77ba 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -260,7 +260,7 @@ source "drivers/media/video/saa7134/Kconfig"
config VIDEO_MXB
tristate "Siemens-Nixdorf 'Multimedia eXtension Board'"
- depends on PCI && VIDEO_V4L1
+ depends on PCI && VIDEO_V4L1 && I2C
select VIDEO_SAA7146_VV
select VIDEO_TUNER
---help---
@@ -272,7 +272,7 @@ config VIDEO_MXB
config VIDEO_DPC
tristate "Philips-Semiconductors 'dpc7146 demonstration board'"
- depends on PCI && VIDEO_V4L1
+ depends on PCI && VIDEO_V4L1 && I2C
select VIDEO_SAA7146_VV
select VIDEO_V4L2
---help---
@@ -287,7 +287,7 @@ config VIDEO_DPC
config VIDEO_HEXIUM_ORION
tristate "Hexium HV-PCI6 and Orion frame grabber"
- depends on PCI && VIDEO_V4L1
+ depends on PCI && VIDEO_V4L1 && I2C
select VIDEO_SAA7146_VV
select VIDEO_V4L2
---help---
@@ -299,7 +299,7 @@ config VIDEO_HEXIUM_ORION
config VIDEO_HEXIUM_GEMINI
tristate "Hexium Gemini frame grabber"
- depends on PCI && VIDEO_V4L1
+ depends on PCI && VIDEO_V4L1 && I2C
select VIDEO_SAA7146_VV
select VIDEO_V4L2
---help---
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index b41f81d2372..933d6db09ac 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -303,6 +303,7 @@ int bttv_input_init(struct bttv *btv)
ir->mask_keyup = 0x010000;
ir->polling = 50; // ms
break;
+ case BTTV_BOARD_PV_M4900:
case BTTV_BOARD_PV_BT878P_9B:
case BTTV_BOARD_PV_BT878P_PLUS:
ir_codes = ir_codes_pixelview;
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 80e23ee9801..7a94e6a1192 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -51,6 +51,7 @@ config VIDEO_CX88_DVB
tristate "DVB/ATSC Support for cx2388x based TV cards"
depends on VIDEO_CX88 && DVB_CORE
select VIDEO_BUF_DVB
+ select DVB_PLL
---help---
This adds support for DVB/ATSC cards based on the
Conexant 2388x chip.
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index e1c1805df1f..f5543166d19 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -40,6 +40,7 @@ config VIDEO_SAA7134_DVB
depends on VIDEO_SAA7134 && DVB_CORE
select VIDEO_BUF_DVB
select FW_LOADER
+ select DVB_PLL
---help---
This adds support for DVB cards based on the
Philips saa7134 chip.
diff --git a/drivers/media/video/tuner-types.c b/drivers/media/video/tuner-types.c
index d7eadc2c298..8b542599ed4 100644
--- a/drivers/media/video/tuner-types.c
+++ b/drivers/media/video/tuner-types.c
@@ -926,11 +926,17 @@ static struct tuner_params tuner_lg_tdvs_h06xf_params[] = {
/* ------------ TUNER_YMEC_TVF66T5_B_DFF - Philips PAL ------------ */
+static struct tuner_range tuner_ymec_tvf66t5_b_dff_pal_ranges[] = {
+ { 16 * 160.25 /*MHz*/, 0x8e, 0x01, },
+ { 16 * 464.25 /*MHz*/, 0x8e, 0x02, },
+ { 16 * 999.99 , 0x8e, 0x08, },
+};
+
static struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
- .ranges = tuner_tena_9533_di_pal_ranges,
- .count = ARRAY_SIZE(tuner_tena_9533_di_pal_ranges),
+ .ranges = tuner_ymec_tvf66t5_b_dff_pal_ranges,
+ .count = ARRAY_SIZE(tuner_ymec_tvf66t5_b_dff_pal_ranges),
},
};
diff --git a/drivers/media/video/zoran.h b/drivers/media/video/zoran.h
index ffcda95ed9d..8fb4a3414e0 100644
--- a/drivers/media/video/zoran.h
+++ b/drivers/media/video/zoran.h
@@ -267,7 +267,7 @@ struct zoran_v4l_settings {
};
/* whoops, this one is undeclared if !v4l2 */
-#ifndef HAVE_V4L2
+#ifndef CONFIG_VIDEO_V4L2
struct v4l2_jpegcompression {
int quality;
int APPn;
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index d9a5876eb38..5f90db27892 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -86,7 +86,7 @@
#include "zoran_device.h"
#include "zoran_card.h"
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
/* we declare some card type definitions here, they mean
* the same as the v4l1 ZORAN_VID_TYPE above, except it's v4l2 */
#define ZORAN_V4L2_VID_FLAGS ( \
@@ -103,7 +103,7 @@ const struct zoran_format zoran_formats[] = {
{
.name = "15-bit RGB",
.palette = VIDEO_PALETTE_RGB555,
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
#ifdef __LITTLE_ENDIAN
.fourcc = V4L2_PIX_FMT_RGB555,
#else
@@ -117,7 +117,7 @@ const struct zoran_format zoran_formats[] = {
}, {
.name = "16-bit RGB",
.palette = VIDEO_PALETTE_RGB565,
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
#ifdef __LITTLE_ENDIAN
.fourcc = V4L2_PIX_FMT_RGB565,
#else
@@ -131,7 +131,7 @@ const struct zoran_format zoran_formats[] = {
}, {
.name = "24-bit RGB",
.palette = VIDEO_PALETTE_RGB24,
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
#ifdef __LITTLE_ENDIAN
.fourcc = V4L2_PIX_FMT_BGR24,
#else
@@ -145,7 +145,7 @@ const struct zoran_format zoran_formats[] = {
}, {
.name = "32-bit RGB",
.palette = VIDEO_PALETTE_RGB32,
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
#ifdef __LITTLE_ENDIAN
.fourcc = V4L2_PIX_FMT_BGR32,
#else
@@ -159,7 +159,7 @@ const struct zoran_format zoran_formats[] = {
}, {
.name = "4:2:2, packed, YUYV",
.palette = VIDEO_PALETTE_YUV422,
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
.fourcc = V4L2_PIX_FMT_YUYV,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
#endif
@@ -169,7 +169,7 @@ const struct zoran_format zoran_formats[] = {
}, {
.name = "Hardware-encoded Motion-JPEG",
.palette = -1,
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
.fourcc = V4L2_PIX_FMT_MJPEG,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
#endif
@@ -210,7 +210,7 @@ static int lock_norm = 0; /* 1=Don't change TV standard (norm) */
module_param(lock_norm, int, 0);
MODULE_PARM_DESC(lock_norm, "Users can't change norm");
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
/* small helper function for calculating buffersizes for v4l2
* we calculate the nearest higher power-of-two, which
* will be the recommended buffersize */
@@ -1761,7 +1761,7 @@ setup_overlay (struct file *file,
return wait_grab_pending(zr);
}
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
/* get the status of a buffer in the clients buffer queue */
static int
zoran_v4l2_buffer_status (struct file *file,
@@ -2676,7 +2676,7 @@ zoran_do_ioctl (struct inode *inode,
}
break;
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
/* The new video4linux2 capture interface - much nicer than video4linux1, since
* it allows for integrating the JPEG capturing calls inside standard v4l2
@@ -4689,7 +4689,7 @@ static struct file_operations zoran_fops = {
struct video_device zoran_template __devinitdata = {
.name = ZORAN_NAME,
.type = ZORAN_VID_TYPE,
-#ifdef HAVE_V4L2
+#ifdef CONFIG_VIDEO_V4L2
.type2 = ZORAN_V4L2_VID_FLAGS,
#endif
.hardware = ZORAN_HARDWARE,
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 7ca9e95bdf8..fb6565b98f3 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -91,6 +91,8 @@ struct imxmci_host {
int dma_allocated;
unsigned char actual_bus_width;
+
+ int prev_cmd_code;
};
#define IMXMCI_PEND_IRQ_b 0
@@ -248,16 +250,14 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
* partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
* This is required for SCR read at least.
*/
- if (datasz < 64) {
+ if (datasz < 512) {
host->dma_size = datasz;
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
/* Hack to enable read SCR */
- if(datasz < 16) {
- MMC_NOB = 1;
- MMC_BLK_LEN = 16;
- }
+ MMC_NOB = 1;
+ MMC_BLK_LEN = 512;
} else {
host->dma_dir = DMA_TO_DEVICE;
}
@@ -409,6 +409,9 @@ static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *
spin_unlock_irqrestore(&host->lock, flags);
+ if(req && req->cmd)
+ host->prev_cmd_code = req->cmd->opcode;
+
host->req = NULL;
host->cmd = NULL;
host->data = NULL;
@@ -553,7 +556,6 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
{
int i;
int burst_len;
- int flush_len;
int trans_done = 0;
unsigned int stat = *pstat;
@@ -566,44 +568,43 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
stat);
+ udelay(20); /* required for clocks < 8MHz*/
+
if(host->dma_dir == DMA_FROM_DEVICE) {
imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE,
- 20, "imxmci_cpu_driven_data read");
+ 50, "imxmci_cpu_driven_data read");
while((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
- (host->data_cnt < host->dma_size)) {
- if(burst_len >= host->dma_size - host->data_cnt) {
- flush_len = burst_len;
- burst_len = host->dma_size - host->data_cnt;
- flush_len -= burst_len;
- host->data_cnt = host->dma_size;
- trans_done = 1;
- } else {
- flush_len = 0;
- host->data_cnt += burst_len;
- }
+ (host->data_cnt < 512)) {
+
+ udelay(20); /* required for clocks < 8MHz*/
for(i = burst_len; i>=2 ; i-=2) {
- *(host->data_ptr++) = MMC_BUFFER_ACCESS;
- udelay(20); /* required for clocks < 8MHz*/
+ u16 data;
+ data = MMC_BUFFER_ACCESS;
+ udelay(10); /* required for clocks < 8MHz*/
+ if(host->data_cnt+2 <= host->dma_size) {
+ *(host->data_ptr++) = data;
+ } else {
+ if(host->data_cnt < host->dma_size)
+ *(u8*)(host->data_ptr) = data;
+ }
+ host->data_cnt += 2;
}
- if(i == 1)
- *(u8*)(host->data_ptr) = MMC_BUFFER_ACCESS;
-
stat = MMC_STATUS;
- /* Flush extra bytes from FIFO */
- while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
- i = MMC_BUFFER_ACCESS;
- stat = MMC_STATUS;
- stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
- }
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read burst %d STATUS = 0x%x\n",
- burst_len, stat);
+ dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
+ host->data_cnt, burst_len, stat);
}
+
+ if((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
+ trans_done = 1;
+
+ if(host->dma_size & 0x1ff)
+ stat &= ~STATUS_CRC_READ_ERR;
+
} else {
imxmci_busy_wait_for_status(host, &stat,
STATUS_APPL_BUFF_FE,
@@ -692,8 +693,8 @@ static void imxmci_tasklet_fnc(unsigned long data)
what, stat, MMC_INT_MASK);
dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
- dev_err(mmc_dev(host->mmc), "CMD%d, bus %d-bit, dma_size = 0x%x\n",
- host->cmd?host->cmd->opcode:0, 1<<host->actual_bus_width, host->dma_size);
+ dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
+ host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
}
if(!host->present || timeout)
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 33525bdf2ab..74eaaee66de 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -247,6 +247,55 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
EXPORT_SYMBOL(mmc_wait_for_app_cmd);
+/**
+ * mmc_set_data_timeout - set the timeout for a data command
+ * @data: data phase for command
+ * @card: the MMC card associated with the data transfer
+ * @write: flag to differentiate reads from writes
+ */
+void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
+ int write)
+{
+ unsigned int mult;
+
+ /*
+ * SD cards use a 100 multiplier rather than 10
+ */
+ mult = mmc_card_sd(card) ? 100 : 10;
+
+ /*
+ * Scale up the multiplier (and therefore the timeout) by
+ * the r2w factor for writes.
+ */
+ if (write)
+ mult <<= card->csd.r2w_factor;
+
+ data->timeout_ns = card->csd.tacc_ns * mult;
+ data->timeout_clks = card->csd.tacc_clks * mult;
+
+ /*
+ * SD cards also have an upper limit on the timeout.
+ */
+ if (mmc_card_sd(card)) {
+ unsigned int timeout_us, limit_us;
+
+ timeout_us = data->timeout_ns / 1000;
+ timeout_us += data->timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+
+ if (write)
+ limit_us = 250000;
+ else
+ limit_us = 100000;
+
+ if (timeout_us > limit_us) {
+ data->timeout_ns = limit_us * 1000;
+ data->timeout_clks = 0;
+ }
+ }
+}
+EXPORT_SYMBOL(mmc_set_data_timeout);
+
static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
/**
@@ -908,11 +957,9 @@ static void mmc_read_scrs(struct mmc_host *host)
{
int err;
struct mmc_card *card;
-
struct mmc_request mrq;
struct mmc_command cmd;
struct mmc_data data;
-
struct scatterlist sg;
list_for_each_entry(card, &host->cards, node) {
@@ -947,8 +994,8 @@ static void mmc_read_scrs(struct mmc_host *host)
memset(&data, 0, sizeof(struct mmc_data));
- data.timeout_ns = card->csd.tacc_ns * 10;
- data.timeout_clks = card->csd.tacc_clks * 10;
+ mmc_set_data_timeout(&data, card, 0);
+
data.blksz_bits = 3;
data.blksz = 1 << 3;
data.blocks = 1;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 115cc21094b..a0e0dad1b41 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -30,6 +30,7 @@
#include <linux/mutex.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/protocol.h>
#include <asm/system.h>
@@ -171,8 +172,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.arg = req->sector << 9;
brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.timeout_ns = card->csd.tacc_ns * 10;
- brq.data.timeout_clks = card->csd.tacc_clks * 10;
brq.data.blksz_bits = md->block_bits;
brq.data.blksz = 1 << md->block_bits;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
@@ -180,6 +179,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
+
if (rq_data_dir(req) == READ) {
brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
brq.data.flags |= MMC_DATA_READ;
@@ -187,12 +188,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.opcode = MMC_WRITE_BLOCK;
brq.data.flags |= MMC_DATA_WRITE;
brq.data.blocks = 1;
-
- /*
- * Scale up the timeout by the r2w factor
- */
- brq.data.timeout_ns <<= card->csd.r2w_factor;
- brq.data.timeout_clks <<= card->csd.r2w_factor;
}
if (brq.data.blocks > 1) {
@@ -324,52 +319,11 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->read_only = mmc_blk_readonly(card);
/*
- * Figure out a workable block size. MMC cards have:
- * - two block sizes, one for read and one for write.
- * - may support partial reads and/or writes
- * (allows block sizes smaller than specified)
- */
- md->block_bits = card->csd.read_blkbits;
- if (card->csd.write_blkbits != card->csd.read_blkbits) {
- if (card->csd.write_blkbits < card->csd.read_blkbits &&
- card->csd.read_partial) {
- /*
- * write block size is smaller than read block
- * size, but we support partial reads, so choose
- * the smaller write block size.
- */
- md->block_bits = card->csd.write_blkbits;
- } else if (card->csd.write_blkbits > card->csd.read_blkbits &&
- card->csd.write_partial) {
- /*
- * read block size is smaller than write block
- * size, but we support partial writes. Use read
- * block size.
- */
- } else {
- /*
- * We don't support this configuration for writes.
- */
- printk(KERN_ERR "%s: unable to select block size for "
- "writing (rb%u wb%u rp%u wp%u)\n",
- mmc_card_id(card),
- 1 << card->csd.read_blkbits,
- 1 << card->csd.write_blkbits,
- card->csd.read_partial,
- card->csd.write_partial);
- md->read_only = 1;
- }
- }
-
- /*
- * Refuse to allow block sizes smaller than 512 bytes.
+ * Both SD and MMC specifications state (although a bit
+ * unclearly in the MMC case) that a block size of 512
+ * bytes must always be supported by the card.
*/
- if (md->block_bits < 9) {
- printk(KERN_ERR "%s: unable to support block size %u\n",
- mmc_card_id(card), 1 << md->block_bits);
- ret = -EINVAL;
- goto err_kfree;
- }
+ md->block_bits = 9;
md->disk = alloc_disk(1 << MMC_SHIFT);
if (md->disk == NULL) {
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 1344ad7a4b1..717e90448fc 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -263,6 +263,14 @@ config RFD_FTL
http://www.gensw.com/pages/prod/bios/rfd.htm
+config SSFDC
+ bool "NAND SSFDC (SmartMedia) read only translation layer"
+ depends on MTD
+ default n
+ help
+ This enables read only access to SmartMedia formatted NAND
+ flash. You can mount it with FAT file system.
+
source "drivers/mtd/chips/Kconfig"
source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index fc9374407c2..1e36b9aed98 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_FTL) += ftl.o mtd_blkdevs.o
obj-$(CONFIG_NFTL) += nftl.o mtd_blkdevs.o
obj-$(CONFIG_INFTL) += inftl.o mtd_blkdevs.o
obj-$(CONFIG_RFD_FTL) += rfd_ftl.o mtd_blkdevs.o
+obj-$(CONFIG_SSFDC) += ssfdc.o mtd_blkdevs.o
nftl-objs := nftlcore.o nftlmount.o
inftl-objs := inftlcore.o inftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 39edb8250fb..7ea49a0d5ec 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -908,7 +908,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
static int __xipram xip_wait_for_operation(
struct map_info *map, struct flchip *chip,
- unsigned long adr, int *chip_op_time )
+ unsigned long adr, unsigned int chip_op_time )
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
@@ -917,7 +917,7 @@ static int __xipram xip_wait_for_operation(
flstate_t oldstate, newstate;
start = xip_currtime();
- usec = *chip_op_time * 8;
+ usec = chip_op_time * 8;
if (usec == 0)
usec = 500000;
done = 0;
@@ -1027,8 +1027,8 @@ static int __xipram xip_wait_for_operation(
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
-#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
- xip_wait_for_operation(map, chip, cmd_adr, p_usec)
+#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
+ xip_wait_for_operation(map, chip, cmd_adr, usec)
#else
@@ -1040,64 +1040,64 @@ static int __xipram xip_wait_for_operation(
static int inval_cache_and_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
- int *chip_op_time )
+ unsigned int chip_op_time)
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80);
- int z, chip_state = chip->state;
- unsigned long timeo;
+ int chip_state = chip->state;
+ unsigned int timeo, sleep_time;
spin_unlock(chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
- if (*chip_op_time)
- cfi_udelay(*chip_op_time);
spin_lock(chip->mutex);
- timeo = *chip_op_time * 8 * HZ / 1000000;
- if (timeo < HZ/2)
- timeo = HZ/2;
- timeo += jiffies;
+ /* set our timeout to 8 times the expected delay */
+ timeo = chip_op_time * 8;
+ if (!timeo)
+ timeo = 500000;
+ sleep_time = chip_op_time / 2;
- z = 0;
for (;;) {
- if (chip->state != chip_state) {
- /* Someone's suspended the operation: sleep */
- DECLARE_WAITQUEUE(wait, current);
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock(chip->mutex);
- continue;
- }
-
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
- /* OK Still waiting */
- if (time_after(jiffies, timeo)) {
+ if (!timeo) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
return -ETIME;
}
- /* Latency issues. Drop the lock, wait a while and retry */
- z++;
+ /* OK Still waiting. Drop the lock, wait a while and retry. */
spin_unlock(chip->mutex);
- cfi_udelay(1);
+ if (sleep_time >= 1000000/HZ) {
+ /*
+ * Half of the normal delay still remaining
+ * can be performed with a sleeping delay instead
+ * of busy waiting.
+ */
+ msleep(sleep_time/1000);
+ timeo -= sleep_time;
+ sleep_time = 1000000/HZ;
+ } else {
+ udelay(1);
+ cond_resched();
+ timeo--;
+ }
spin_lock(chip->mutex);
- }
- if (!z) {
- if (!--(*chip_op_time))
- *chip_op_time = 1;
- } else if (z > 1)
- ++(*chip_op_time);
+ if (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ }
+ }
/* Done and happy. */
chip->state = FL_STATUS;
@@ -1107,8 +1107,7 @@ static int inval_cache_and_wait_for_operation(
#endif
#define WAIT_TIMEOUT(map, chip, adr, udelay) \
- ({ int __udelay = (udelay); \
- INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
+ INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
@@ -1332,7 +1331,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, map_bankwidth(map),
- &chip->word_write_time);
+ chip->word_write_time);
if (ret) {
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
@@ -1569,7 +1568,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
adr, len,
- &chip->buffer_write_time);
+ chip->buffer_write_time);
if (ret) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
@@ -1704,7 +1703,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
- &chip->erase_time);
+ chip->erase_time);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9885726a16e..702ae4cd869 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -45,9 +45,11 @@
#define MAX_WORD_RETRIES 3
#define MANUFACTURER_AMD 0x0001
+#define MANUFACTURER_ATMEL 0x001F
#define MANUFACTURER_SST 0x00BF
#define SST49LF004B 0x0060
#define SST49LF008A 0x005a
+#define AT49BV6416 0x00d6
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -68,6 +70,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
#include "fwh_lock.h"
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_amdstd_destroy,
@@ -161,6 +166,26 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
}
}
+/* Atmel chips don't use the same PRI format as AMD chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ struct cfi_pri_atmel atmel_pri;
+
+ memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+ memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+ if (atmel_pri.Features & 0x02)
+ extp->EraseSuspend = 2;
+
+ if (atmel_pri.BottomBoot)
+ extp->TopBottom = 2;
+ else
+ extp->TopBottom = 3;
+}
+
static void fixup_use_secsi(struct mtd_info *mtd, void *param)
{
/* Setup for chips with a secsi area */
@@ -179,6 +204,17 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
}
+/*
+ * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
+ * locked by default.
+ */
+static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
+{
+ mtd->lock = cfi_atmel_lock;
+ mtd->unlock = cfi_atmel_unlock;
+ mtd->flags |= MTD_STUPID_LOCK;
+}
+
static struct cfi_fixup cfi_fixup_table[] = {
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
@@ -192,6 +228,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
#endif
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
@@ -207,6 +244,7 @@ static struct cfi_fixup fixup_table[] = {
* we know that is the case.
*/
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
+ { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
{ 0, 0, NULL, NULL }
};
@@ -1607,6 +1645,80 @@ static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
return 0;
}
+static int do_atmel_lock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret)
+ goto out_unlock;
+ chip->state = FL_LOCKING;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
+ __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ map_write(map, CMD(0x40), chip->start + adr);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ ret = 0;
+
+out_unlock:
+ spin_unlock(chip->mutex);
+ return ret;
+}
+
+static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
+ if (ret)
+ goto out_unlock;
+ chip->state = FL_UNLOCKING;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
+ __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ map_write(map, CMD(0x70), adr);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ ret = 0;
+
+out_unlock:
+ spin_unlock(chip->mutex);
+ return ret;
+}
+
+static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
+}
+
+static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
+}
+
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 8f39d0a3143..1154dac715a 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -111,6 +111,7 @@
#define MX29LV040C 0x004F
#define MX29LV160T 0x22C4
#define MX29LV160B 0x2249
+#define MX29F040 0x00A4
#define MX29F016 0x00AD
#define MX29F002T 0x00B0
#define MX29F004T 0x0045
@@ -1172,6 +1173,19 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
.mfr_id = MANUFACTURER_MACRONIX,
+ .dev_id = MX29F040,
+ .name = "Macronix MX29F040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",
.uaddr = {
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index ede3561be87..401c6a294ba 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -18,6 +18,7 @@
#include <linux/mtd/mtd.h>
#include <linux/buffer_head.h>
#include <linux/mutex.h>
+#include <linux/mount.h>
#define VERSION "$Revision: 1.30 $"
@@ -236,6 +237,8 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
}
return 0;
}
+
+
static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
@@ -299,6 +302,19 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Get a handle on the device */
bdev = open_bdev_excl(devname, O_RDWR, NULL);
+#ifndef MODULE
+ if (IS_ERR(bdev)) {
+
+ /* We might not have rootfs mounted at this point. Try
+ to resolve the device name by other means. */
+
+ dev_t dev = name_to_dev_t(devname);
+ if (dev != 0) {
+ bdev = open_by_devnum(dev, FMODE_WRITE | FMODE_READ);
+ }
+ }
+#endif
+
if (IS_ERR(bdev)) {
ERROR("error: cannot open device %s", devname);
goto devinit_err;
@@ -393,26 +409,6 @@ static int parse_num(size_t *num, const char *token)
}
-static int parse_name(char **pname, const char *token, size_t limit)
-{
- size_t len;
- char *name;
-
- len = strlen(token) + 1;
- if (len > limit)
- return -ENOSPC;
-
- name = kmalloc(len, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
-
- strcpy(name, token);
-
- *pname = name;
- return 0;
-}
-
-
static inline void kill_final_newline(char *str)
{
char *newline = strrchr(str, '\n');
@@ -426,9 +422,15 @@ static inline void kill_final_newline(char *str)
return 0; \
} while (0)
-static int block2mtd_setup(const char *val, struct kernel_param *kp)
+#ifndef MODULE
+static int block2mtd_init_called = 0;
+static __initdata char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
+#endif
+
+
+static int block2mtd_setup2(const char *val)
{
- char buf[80+12]; /* 80 for device, 12 for erase size */
+ char buf[80 + 12]; /* 80 for device, 12 for erase size */
char *str = buf;
char *token[2];
char *name;
@@ -450,13 +452,9 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
if (!token[0])
parse_err("no argument");
- ret = parse_name(&name, token[0], 80);
- if (ret == -ENOMEM)
- parse_err("out of memory");
- if (ret == -ENOSPC)
- parse_err("name too long");
- if (ret)
- return 0;
+ name = token[0];
+ if (strlen(name) + 1 > 80)
+ parse_err("device name too long");
if (token[1]) {
ret = parse_num(&erase_size, token[1]);
@@ -472,13 +470,48 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
}
+static int block2mtd_setup(const char *val, struct kernel_param *kp)
+{
+#ifdef MODULE
+ return block2mtd_setup2(val);
+#else
+ /* If more parameters are later passed in via
+ /sys/module/block2mtd/parameters/block2mtd
+ and block2mtd_init() has already been called,
+ we can parse the argument now. */
+
+ if (block2mtd_init_called)
+ return block2mtd_setup2(val);
+
+ /* During early boot stage, we only save the parameters
+ here. We must parse them later: if the param passed
+ from kernel boot command line, block2mtd_setup() is
+ called so early that it is not possible to resolve
+ the device (even kmalloc() fails). Deter that work to
+ block2mtd_setup2(). */
+
+ strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
+
+ return 0;
+#endif
+}
+
+
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
static int __init block2mtd_init(void)
{
+ int ret = 0;
INFO("version " VERSION);
- return 0;
+
+#ifndef MODULE
+ if (strlen(block2mtd_paramline))
+ ret = block2mtd_setup2(block2mtd_paramline);
+ block2mtd_init_called = 1;
+#endif
+
+ return ret;
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index a8466141e91..ef4a731ca5c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -406,13 +406,13 @@ struct flash_info {
static struct flash_info __devinitdata m25p_data [] = {
/* REVISIT: fill in JEDEC ids, for parts that have them */
- { "m25p05", 0x05, 0x0000, 32 * 1024, 2 },
- { "m25p10", 0x10, 0x0000, 32 * 1024, 4 },
- { "m25p20", 0x11, 0x0000, 64 * 1024, 4 },
- { "m25p40", 0x12, 0x0000, 64 * 1024, 8 },
+ { "m25p05", 0x05, 0x2010, 32 * 1024, 2 },
+ { "m25p10", 0x10, 0x2011, 32 * 1024, 4 },
+ { "m25p20", 0x11, 0x2012, 64 * 1024, 4 },
+ { "m25p40", 0x12, 0x2013, 64 * 1024, 8 },
{ "m25p80", 0x13, 0x0000, 64 * 1024, 16 },
- { "m25p16", 0x14, 0x0000, 64 * 1024, 32 },
- { "m25p32", 0x15, 0x0000, 64 * 1024, 64 },
+ { "m25p16", 0x14, 0x2015, 64 * 1024, 32 },
+ { "m25p32", 0x15, 0x2016, 64 * 1024, 64 },
{ "m25p64", 0x16, 0x2017, 64 * 1024, 128 },
};
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 6f9bbf6fee4..354e1657cc2 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -4,82 +4,82 @@
* PMC551 PCI Mezzanine Ram Device
*
* Author:
- * Mark Ferrell <mferrell@mvista.com>
- * Copyright 1999,2000 Nortel Networks
+ * Mark Ferrell <mferrell@mvista.com>
+ * Copyright 1999,2000 Nortel Networks
*
* License:
- * As part of this driver was derived from the slram.c driver it
- * falls under the same license, which is GNU General Public
- * License v2
+ * As part of this driver was derived from the slram.c driver it
+ * falls under the same license, which is GNU General Public
+ * License v2
*
* Description:
- * This driver is intended to support the PMC551 PCI Ram device
- * from Ramix Inc. The PMC551 is a PMC Mezzanine module for
- * cPCI embedded systems. The device contains a single SROM
- * that initially programs the V370PDC chipset onboard the
- * device, and various banks of DRAM/SDRAM onboard. This driver
- * implements this PCI Ram device as an MTD (Memory Technology
- * Device) so that it can be used to hold a file system, or for
- * added swap space in embedded systems. Since the memory on
- * this board isn't as fast as main memory we do not try to hook
- * it into main memory as that would simply reduce performance
- * on the system. Using it as a block device allows us to use
- * it as high speed swap or for a high speed disk device of some
- * sort. Which becomes very useful on diskless systems in the
- * embedded market I might add.
+ * This driver is intended to support the PMC551 PCI Ram device
+ * from Ramix Inc. The PMC551 is a PMC Mezzanine module for
+ * cPCI embedded systems. The device contains a single SROM
+ * that initially programs the V370PDC chipset onboard the
+ * device, and various banks of DRAM/SDRAM onboard. This driver
+ * implements this PCI Ram device as an MTD (Memory Technology
+ * Device) so that it can be used to hold a file system, or for
+ * added swap space in embedded systems. Since the memory on
+ * this board isn't as fast as main memory we do not try to hook
+ * it into main memory as that would simply reduce performance
+ * on the system. Using it as a block device allows us to use
+ * it as high speed swap or for a high speed disk device of some
+ * sort. Which becomes very useful on diskless systems in the
+ * embedded market I might add.
*
* Notes:
- * Due to what I assume is more buggy SROM, the 64M PMC551 I
- * have available claims that all 4 of it's DRAM banks have 64M
- * of ram configured (making a grand total of 256M onboard).
- * This is slightly annoying since the BAR0 size reflects the
- * aperture size, not the dram size, and the V370PDC supplies no
- * other method for memory size discovery. This problem is
- * mostly only relevant when compiled as a module, as the
- * unloading of the module with an aperture size smaller then
- * the ram will cause the driver to detect the onboard memory
- * size to be equal to the aperture size when the module is
- * reloaded. Soooo, to help, the module supports an msize
- * option to allow the specification of the onboard memory, and
- * an asize option, to allow the specification of the aperture
- * size. The aperture must be equal to or less then the memory
- * size, the driver will correct this if you screw it up. This
- * problem is not relevant for compiled in drivers as compiled
- * in drivers only init once.
+ * Due to what I assume is more buggy SROM, the 64M PMC551 I
+ * have available claims that all 4 of it's DRAM banks have 64M
+ * of ram configured (making a grand total of 256M onboard).
+ * This is slightly annoying since the BAR0 size reflects the
+ * aperture size, not the dram size, and the V370PDC supplies no
+ * other method for memory size discovery. This problem is
+ * mostly only relevant when compiled as a module, as the
+ * unloading of the module with an aperture size smaller then
+ * the ram will cause the driver to detect the onboard memory
+ * size to be equal to the aperture size when the module is
+ * reloaded. Soooo, to help, the module supports an msize
+ * option to allow the specification of the onboard memory, and
+ * an asize option, to allow the specification of the aperture
+ * size. The aperture must be equal to or less then the memory
+ * size, the driver will correct this if you screw it up. This
+ * problem is not relevant for compiled in drivers as compiled
+ * in drivers only init once.
*
* Credits:
- * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the
- * initial example code of how to initialize this device and for
- * help with questions I had concerning operation of the device.
+ * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the
+ * initial example code of how to initialize this device and for
+ * help with questions I had concerning operation of the device.
*
- * Most of the MTD code for this driver was originally written
- * for the slram.o module in the MTD drivers package which
- * allows the mapping of system memory into an MTD device.
- * Since the PMC551 memory module is accessed in the same
- * fashion as system memory, the slram.c code became a very nice
- * fit to the needs of this driver. All we added was PCI
- * detection/initialization to the driver and automatically figure
- * out the size via the PCI detection.o, later changes by Corey
- * Minyard set up the card to utilize a 1M sliding apature.
+ * Most of the MTD code for this driver was originally written
+ * for the slram.o module in the MTD drivers package which
+ * allows the mapping of system memory into an MTD device.
+ * Since the PMC551 memory module is accessed in the same
+ * fashion as system memory, the slram.c code became a very nice
+ * fit to the needs of this driver. All we added was PCI
+ * detection/initialization to the driver and automatically figure
+ * out the size via the PCI detection.o, later changes by Corey
+ * Minyard set up the card to utilize a 1M sliding apature.
*
- * Corey Minyard <minyard@nortelnetworks.com>
- * * Modified driver to utilize a sliding aperture instead of
- * mapping all memory into kernel space which turned out to
- * be very wasteful.
- * * Located a bug in the SROM's initialization sequence that
- * made the memory unusable, added a fix to code to touch up
- * the DRAM some.
+ * Corey Minyard <minyard@nortelnetworks.com>
+ * * Modified driver to utilize a sliding aperture instead of
+ * mapping all memory into kernel space which turned out to
+ * be very wasteful.
+ * * Located a bug in the SROM's initialization sequence that
+ * made the memory unusable, added a fix to code to touch up
+ * the DRAM some.
*
* Bugs/FIXME's:
- * * MUST fix the init function to not spin on a register
- * waiting for it to set .. this does not safely handle busted
- * devices that never reset the register correctly which will
- * cause the system to hang w/ a reboot being the only chance at
- * recover. [sort of fixed, could be better]
- * * Add I2C handling of the SROM so we can read the SROM's information
- * about the aperture size. This should always accurately reflect the
- * onboard memory size.
- * * Comb the init routine. It's still a bit cludgy on a few things.
+ * * MUST fix the init function to not spin on a register
+ * waiting for it to set .. this does not safely handle busted
+ * devices that never reset the register correctly which will
+ * cause the system to hang w/ a reboot being the only chance at
+ * recover. [sort of fixed, could be better]
+ * * Add I2C handling of the SROM so we can read the SROM's information
+ * about the aperture size. This should always accurately reflect the
+ * onboard memory size.
+ * * Comb the init routine. It's still a bit cludgy on a few things.
*/
#include <linux/kernel.h>
@@ -99,84 +99,83 @@
#include <asm/system.h>
#include <linux/pci.h>
-#ifndef CONFIG_PCI
-#error Enable PCI in your kernel config
-#endif
-
#include <linux/mtd/mtd.h>
#include <linux/mtd/pmc551.h>
#include <linux/mtd/compatmac.h>
static struct mtd_info *pmc551list;
-static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr)
+static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
{
- struct mypriv *priv = mtd->priv;
- u32 soff_hi, soff_lo; /* start address offset hi/lo */
- u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
- unsigned long end;
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
u_char *ptr;
size_t retlen;
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr, (long)instr->len);
+ printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr,
+ (long)instr->len);
#endif
- end = instr->addr + instr->len - 1;
+ end = instr->addr + instr->len - 1;
- /* Is it past the end? */
- if ( end > mtd->size ) {
+ /* Is it past the end? */
+ if (end > mtd->size) {
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n", (long)end, (long)mtd->size);
+ printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n",
+ (long)end, (long)mtd->size);
#endif
- return -EINVAL;
- }
-
- eoff_hi = end & ~(priv->asize - 1);
- soff_hi = instr->addr & ~(priv->asize - 1);
- eoff_lo = end & (priv->asize - 1);
- soff_lo = instr->addr & (priv->asize - 1);
-
- pmc551_point (mtd, instr->addr, instr->len, &retlen, &ptr);
-
- if ( soff_hi == eoff_hi || mtd->size == priv->asize) {
- /* The whole thing fits within one access, so just one shot
- will do it. */
- memset(ptr, 0xff, instr->len);
- } else {
- /* We have to do multiple writes to get all the data
- written. */
- while (soff_hi != eoff_hi) {
+ return -EINVAL;
+ }
+
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_hi = instr->addr & ~(priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+ soff_lo = instr->addr & (priv->asize - 1);
+
+ pmc551_point(mtd, instr->addr, instr->len, &retlen, &ptr);
+
+ if (soff_hi == eoff_hi || mtd->size == priv->asize) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memset(ptr, 0xff, instr->len);
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk( KERN_DEBUG "pmc551_erase() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+ printk(KERN_DEBUG "pmc551_erase() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
#endif
- memset(ptr, 0xff, priv->asize);
- if (soff_hi + priv->asize >= mtd->size) {
- goto out;
- }
- soff_hi += priv->asize;
- pmc551_point (mtd,(priv->base_map0|soff_hi),
- priv->asize, &retlen, &ptr);
- }
- memset (ptr, 0xff, eoff_lo);
- }
-
-out:
+ memset(ptr, 0xff, priv->asize);
+ if (soff_hi + priv->asize >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, (priv->base_map0 | soff_hi),
+ priv->asize, &retlen, &ptr);
+ }
+ memset(ptr, 0xff, eoff_lo);
+ }
+
+ out:
instr->state = MTD_ERASE_DONE;
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_erase() done\n");
#endif
- mtd_erase_callback(instr);
- return 0;
+ mtd_erase_callback(instr);
+ return 0;
}
-
-static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
+static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char ** mtdbuf)
{
- struct mypriv *priv = mtd->priv;
- u32 soff_hi;
- u32 soff_lo;
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi;
+ u32 soff_lo;
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
@@ -184,18 +183,19 @@ static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *
if (from + len > mtd->size) {
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n", (long)from+len, (long)mtd->size);
+ printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n",
+ (long)from + len, (long)mtd->size);
#endif
return -EINVAL;
}
- soff_hi = from & ~(priv->asize - 1);
- soff_lo = from & (priv->asize - 1);
+ soff_hi = from & ~(priv->asize - 1);
+ soff_lo = from & (priv->asize - 1);
/* Cheap hack optimization */
- if( priv->curr_map0 != from ) {
- pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0,
- (priv->base_map0 | soff_hi) );
+ if (priv->curr_map0 != from) {
+ pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
+ (priv->base_map0 | soff_hi));
priv->curr_map0 = soff_hi;
}
@@ -204,137 +204,144 @@ static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *
return 0;
}
-
-static void pmc551_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
+static void pmc551_unpoint(struct mtd_info *mtd, u_char * addr, loff_t from,
+ size_t len)
{
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_unpoint()\n");
#endif
}
-
-static int pmc551_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
{
- struct mypriv *priv = mtd->priv;
- u32 soff_hi, soff_lo; /* start address offset hi/lo */
- u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
- unsigned long end;
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
u_char *ptr;
- u_char *copyto = buf;
+ u_char *copyto = buf;
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n", (long)from, (long)len, (long)priv->asize);
+ printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n",
+ (long)from, (long)len, (long)priv->asize);
#endif
- end = from + len - 1;
+ end = from + len - 1;
- /* Is it past the end? */
- if (end > mtd->size) {
+ /* Is it past the end? */
+ if (end > mtd->size) {
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n", (long) end, (long)mtd->size);
+ printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n",
+ (long)end, (long)mtd->size);
#endif
- return -EINVAL;
- }
-
- soff_hi = from & ~(priv->asize - 1);
- eoff_hi = end & ~(priv->asize - 1);
- soff_lo = from & (priv->asize - 1);
- eoff_lo = end & (priv->asize - 1);
-
- pmc551_point (mtd, from, len, retlen, &ptr);
-
- if (soff_hi == eoff_hi) {
- /* The whole thing fits within one access, so just one shot
- will do it. */
- memcpy(copyto, ptr, len);
- copyto += len;
- } else {
- /* We have to do multiple writes to get all the data
- written. */
- while (soff_hi != eoff_hi) {
+ return -EINVAL;
+ }
+
+ soff_hi = from & ~(priv->asize - 1);
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_lo = from & (priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point(mtd, from, len, retlen, &ptr);
+
+ if (soff_hi == eoff_hi) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memcpy(copyto, ptr, len);
+ copyto += len;
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk( KERN_DEBUG "pmc551_read() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+ printk(KERN_DEBUG "pmc551_read() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
#endif
- memcpy(copyto, ptr, priv->asize);
- copyto += priv->asize;
- if (soff_hi + priv->asize >= mtd->size) {
- goto out;
- }
- soff_hi += priv->asize;
- pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr);
- }
- memcpy(copyto, ptr, eoff_lo);
- copyto += eoff_lo;
- }
-
-out:
+ memcpy(copyto, ptr, priv->asize);
+ copyto += priv->asize;
+ if (soff_hi + priv->asize >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, soff_hi, priv->asize, retlen, &ptr);
+ }
+ memcpy(copyto, ptr, eoff_lo);
+ copyto += eoff_lo;
+ }
+
+ out:
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_read() done\n");
#endif
- *retlen = copyto - buf;
- return 0;
+ *retlen = copyto - buf;
+ return 0;
}
-static int pmc551_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
{
- struct mypriv *priv = mtd->priv;
- u32 soff_hi, soff_lo; /* start address offset hi/lo */
- u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
- unsigned long end;
+ struct mypriv *priv = mtd->priv;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
+ unsigned long end;
u_char *ptr;
- const u_char *copyfrom = buf;
-
+ const u_char *copyfrom = buf;
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n", (long)to, (long)len, (long)priv->asize);
+ printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n",
+ (long)to, (long)len, (long)priv->asize);
#endif
- end = to + len - 1;
- /* Is it past the end? or did the u32 wrap? */
- if (end > mtd->size ) {
+ end = to + len - 1;
+ /* Is it past the end? or did the u32 wrap? */
+ if (end > mtd->size) {
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, size: %ld, to: %ld)\n", (long) end, (long)mtd->size, (long)to);
+ printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, "
+ "size: %ld, to: %ld)\n", (long)end, (long)mtd->size,
+ (long)to);
#endif
- return -EINVAL;
- }
-
- soff_hi = to & ~(priv->asize - 1);
- eoff_hi = end & ~(priv->asize - 1);
- soff_lo = to & (priv->asize - 1);
- eoff_lo = end & (priv->asize - 1);
-
- pmc551_point (mtd, to, len, retlen, &ptr);
-
- if (soff_hi == eoff_hi) {
- /* The whole thing fits within one access, so just one shot
- will do it. */
- memcpy(ptr, copyfrom, len);
- copyfrom += len;
- } else {
- /* We have to do multiple writes to get all the data
- written. */
- while (soff_hi != eoff_hi) {
+ return -EINVAL;
+ }
+
+ soff_hi = to & ~(priv->asize - 1);
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_lo = to & (priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point(mtd, to, len, retlen, &ptr);
+
+ if (soff_hi == eoff_hi) {
+ /* The whole thing fits within one access, so just one shot
+ will do it. */
+ memcpy(ptr, copyfrom, len);
+ copyfrom += len;
+ } else {
+ /* We have to do multiple writes to get all the data
+ written. */
+ while (soff_hi != eoff_hi) {
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk( KERN_DEBUG "pmc551_write() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+ printk(KERN_DEBUG "pmc551_write() soff_hi: %ld, "
+ "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
#endif
- memcpy(ptr, copyfrom, priv->asize);
- copyfrom += priv->asize;
- if (soff_hi >= mtd->size) {
- goto out;
- }
- soff_hi += priv->asize;
- pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr);
- }
- memcpy(ptr, copyfrom, eoff_lo);
- copyfrom += eoff_lo;
- }
-
-out:
+ memcpy(ptr, copyfrom, priv->asize);
+ copyfrom += priv->asize;
+ if (soff_hi >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point(mtd, soff_hi, priv->asize, retlen, &ptr);
+ }
+ memcpy(ptr, copyfrom, eoff_lo);
+ copyfrom += eoff_lo;
+ }
+
+ out:
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_write() done\n");
#endif
- *retlen = copyfrom - buf;
- return 0;
+ *retlen = copyfrom - buf;
+ return 0;
}
/*
@@ -349,58 +356,58 @@ out:
* mechanism
* returns the size of the memory region found.
*/
-static u32 fixup_pmc551 (struct pci_dev *dev)
+static u32 fixup_pmc551(struct pci_dev *dev)
{
#ifdef CONFIG_MTD_PMC551_BUGFIX
- u32 dram_data;
+ u32 dram_data;
#endif
- u32 size, dcmd, cfg, dtmp;
- u16 cmd, tmp, i;
+ u32 size, dcmd, cfg, dtmp;
+ u16 cmd, tmp, i;
u8 bcmd, counter;
- /* Sanity Check */
- if(!dev) {
- return -ENODEV;
- }
+ /* Sanity Check */
+ if (!dev) {
+ return -ENODEV;
+ }
/*
* Attempt to reset the card
* FIXME: Stop Spinning registers
*/
- counter=0;
+ counter = 0;
/* unlock registers */
- pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5 );
+ pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5);
/* read in old data */
- pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd );
+ pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
/* bang the reset line up and down for a few */
- for(i=0;i<10;i++) {
- counter=0;
+ for (i = 0; i < 10; i++) {
+ counter = 0;
bcmd &= ~0x80;
- while(counter++ < 100) {
+ while (counter++ < 100) {
pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
}
- counter=0;
+ counter = 0;
bcmd |= 0x80;
- while(counter++ < 100) {
+ while (counter++ < 100) {
pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
}
}
- bcmd |= (0x40|0x20);
+ bcmd |= (0x40 | 0x20);
pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
- /*
+ /*
* Take care and turn off the memory on the device while we
* tweak the configurations
*/
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- tmp = cmd & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY);
- pci_write_config_word(dev, PCI_COMMAND, tmp);
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ tmp = cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(dev, PCI_COMMAND, tmp);
/*
* Disable existing aperture before probing memory size
*/
pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd);
- dtmp=(dcmd|PMC551_PCI_MEM_MAP_ENABLE|PMC551_PCI_MEM_MAP_REG_EN);
+ dtmp = (dcmd | PMC551_PCI_MEM_MAP_ENABLE | PMC551_PCI_MEM_MAP_REG_EN);
pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp);
/*
* Grab old BAR0 config so that we can figure out memory size
@@ -411,220 +418,230 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
* then write all 1's to the memory space, read back the result into
* "size", and then write back all the old config.
*/
- pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &cfg );
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &cfg);
#ifndef CONFIG_MTD_PMC551_BUGFIX
- pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, ~0 );
- pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &size );
- size = (size&PCI_BASE_ADDRESS_MEM_MASK);
- size &= ~(size-1);
- pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ~0);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &size);
+ size = (size & PCI_BASE_ADDRESS_MEM_MASK);
+ size &= ~(size - 1);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, cfg);
#else
- /*
- * Get the size of the memory by reading all the DRAM size values
- * and adding them up.
- *
- * KLUDGE ALERT: the boards we are using have invalid column and
- * row mux values. We fix them here, but this will break other
- * memory configurations.
- */
- pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data);
- size = PMC551_DRAM_BLK_GET_SIZE(dram_data);
- dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
- dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
- pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data);
-
- pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data);
- size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
- dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
- dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
- pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data);
-
- pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data);
- size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
- dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
- dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
- pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data);
-
- pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data);
- size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
- dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
- dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
- pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data);
-
- /*
- * Oops .. something went wrong
- */
- if( (size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) {
- return -ENODEV;
- }
-#endif /* CONFIG_MTD_PMC551_BUGFIX */
-
- if ((cfg&PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
- return -ENODEV;
+ /*
+ * Get the size of the memory by reading all the DRAM size values
+ * and adding them up.
+ *
+ * KLUDGE ALERT: the boards we are using have invalid column and
+ * row mux values. We fix them here, but this will break other
+ * memory configurations.
+ */
+ pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data);
+ size = PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data);
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data);
+ size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
+ dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
+ dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
+ pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data);
+
+ /*
+ * Oops .. something went wrong
+ */
+ if ((size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) {
+ return -ENODEV;
+ }
+#endif /* CONFIG_MTD_PMC551_BUGFIX */
+
+ if ((cfg & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
+ return -ENODEV;
}
- /*
- * Precharge Dram
- */
- pci_write_config_word( dev, PMC551_SDRAM_MA, 0x0400 );
- pci_write_config_word( dev, PMC551_SDRAM_CMD, 0x00bf );
-
- /*
- * Wait until command has gone through
- * FIXME: register spinning issue
- */
- do { pci_read_config_word( dev, PMC551_SDRAM_CMD, &cmd );
- if(counter++ > 100)break;
- } while ( (PCI_COMMAND_IO) & cmd );
-
- /*
+ /*
+ * Precharge Dram
+ */
+ pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0400);
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x00bf);
+
+ /*
+ * Wait until command has gone through
+ * FIXME: register spinning issue
+ */
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+
+ /*
* Turn on auto refresh
* The loop is taken directly from Ramix's example code. I assume that
* this must be held high for some duration of time, but I can find no
* documentation refrencing the reasons why.
- */
- for ( i = 1; i<=8 ; i++) {
- pci_write_config_word (dev, PMC551_SDRAM_CMD, 0x0df);
-
- /*
- * Make certain command has gone through
- * FIXME: register spinning issue
- */
- counter=0;
- do { pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
- if(counter++ > 100)break;
- } while ( (PCI_COMMAND_IO) & cmd );
- }
-
- pci_write_config_word ( dev, PMC551_SDRAM_MA, 0x0020);
- pci_write_config_word ( dev, PMC551_SDRAM_CMD, 0x0ff);
-
- /*
- * Wait until command completes
- * FIXME: register spinning issue
- */
- counter=0;
- do { pci_read_config_word ( dev, PMC551_SDRAM_CMD, &cmd);
- if(counter++ > 100)break;
- } while ( (PCI_COMMAND_IO) & cmd );
-
- pci_read_config_dword ( dev, PMC551_DRAM_CFG, &dcmd);
- dcmd |= 0x02000000;
- pci_write_config_dword ( dev, PMC551_DRAM_CFG, dcmd);
-
- /*
- * Check to make certain fast back-to-back, if not
- * then set it so
- */
- pci_read_config_word( dev, PCI_STATUS, &cmd);
- if((cmd&PCI_COMMAND_FAST_BACK) == 0) {
- cmd |= PCI_COMMAND_FAST_BACK;
- pci_write_config_word( dev, PCI_STATUS, cmd);
- }
-
- /*
- * Check to make certain the DEVSEL is set correctly, this device
- * has a tendancy to assert DEVSEL and TRDY when a write is performed
- * to the memory when memory is read-only
- */
- if((cmd&PCI_STATUS_DEVSEL_MASK) != 0x0) {
- cmd &= ~PCI_STATUS_DEVSEL_MASK;
- pci_write_config_word( dev, PCI_STATUS, cmd );
- }
- /*
- * Set to be prefetchable and put everything back based on old cfg.
+ */
+ for (i = 1; i <= 8; i++) {
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0df);
+
+ /*
+ * Make certain command has gone through
+ * FIXME: register spinning issue
+ */
+ counter = 0;
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+ }
+
+ pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0020);
+ pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0ff);
+
+ /*
+ * Wait until command completes
+ * FIXME: register spinning issue
+ */
+ counter = 0;
+ do {
+ pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
+ if (counter++ > 100)
+ break;
+ } while ((PCI_COMMAND_IO) & cmd);
+
+ pci_read_config_dword(dev, PMC551_DRAM_CFG, &dcmd);
+ dcmd |= 0x02000000;
+ pci_write_config_dword(dev, PMC551_DRAM_CFG, dcmd);
+
+ /*
+ * Check to make certain fast back-to-back, if not
+ * then set it so
+ */
+ pci_read_config_word(dev, PCI_STATUS, &cmd);
+ if ((cmd & PCI_COMMAND_FAST_BACK) == 0) {
+ cmd |= PCI_COMMAND_FAST_BACK;
+ pci_write_config_word(dev, PCI_STATUS, cmd);
+ }
+
+ /*
+ * Check to make certain the DEVSEL is set correctly, this device
+ * has a tendancy to assert DEVSEL and TRDY when a write is performed
+ * to the memory when memory is read-only
+ */
+ if ((cmd & PCI_STATUS_DEVSEL_MASK) != 0x0) {
+ cmd &= ~PCI_STATUS_DEVSEL_MASK;
+ pci_write_config_word(dev, PCI_STATUS, cmd);
+ }
+ /*
+ * Set to be prefetchable and put everything back based on old cfg.
* it's possible that the reset of the V370PDC nuked the original
* setup
- */
+ */
+ /*
+ cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+ pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
+ */
+
/*
- cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH;
- pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
- */
-
- /*
- * Turn PCI memory and I/O bus access back on
- */
- pci_write_config_word( dev, PCI_COMMAND,
- PCI_COMMAND_MEMORY | PCI_COMMAND_IO );
+ * Turn PCI memory and I/O bus access back on
+ */
+ pci_write_config_word(dev, PCI_COMMAND,
+ PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
#ifdef CONFIG_MTD_PMC551_DEBUG
- /*
- * Some screen fun
- */
- printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at 0x%llx\n",
- (size<1024)?size:(size<1048576)?size>>10:size>>20,
- (size<1024)?'B':(size<1048576)?'K':'M',
- size, ((dcmd&(0x1<<3)) == 0)?"non-":"",
- (unsigned long long)((dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK));
-
- /*
- * Check to see the state of the memory
- */
- pci_read_config_dword( dev, PMC551_DRAM_BLK0, &dcmd );
- printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n"
- "pmc551: DRAM_BLK0 Size: %d at %d\n"
- "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n",
- (((0x1<<1)&dcmd) == 0)?"RW":"RO",
- (((0x1<<0)&dcmd) == 0)?"Off":"On",
- PMC551_DRAM_BLK_GET_SIZE(dcmd),
- ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) );
-
- pci_read_config_dword( dev, PMC551_DRAM_BLK1, &dcmd );
- printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n"
- "pmc551: DRAM_BLK1 Size: %d at %d\n"
- "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n",
- (((0x1<<1)&dcmd) == 0)?"RW":"RO",
- (((0x1<<0)&dcmd) == 0)?"Off":"On",
- PMC551_DRAM_BLK_GET_SIZE(dcmd),
- ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) );
-
- pci_read_config_dword( dev, PMC551_DRAM_BLK2, &dcmd );
- printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n"
- "pmc551: DRAM_BLK2 Size: %d at %d\n"
- "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n",
- (((0x1<<1)&dcmd) == 0)?"RW":"RO",
- (((0x1<<0)&dcmd) == 0)?"Off":"On",
- PMC551_DRAM_BLK_GET_SIZE(dcmd),
- ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) );
-
- pci_read_config_dword( dev, PMC551_DRAM_BLK3, &dcmd );
- printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n"
- "pmc551: DRAM_BLK3 Size: %d at %d\n"
- "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n",
- (((0x1<<1)&dcmd) == 0)?"RW":"RO",
- (((0x1<<0)&dcmd) == 0)?"Off":"On",
- PMC551_DRAM_BLK_GET_SIZE(dcmd),
- ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) );
-
- pci_read_config_word( dev, PCI_COMMAND, &cmd );
- printk( KERN_DEBUG "pmc551: Memory Access %s\n",
- (((0x1<<1)&cmd) == 0)?"off":"on" );
- printk( KERN_DEBUG "pmc551: I/O Access %s\n",
- (((0x1<<0)&cmd) == 0)?"off":"on" );
-
- pci_read_config_word( dev, PCI_STATUS, &cmd );
- printk( KERN_DEBUG "pmc551: Devsel %s\n",
- ((PCI_STATUS_DEVSEL_MASK&cmd)==0x000)?"Fast":
- ((PCI_STATUS_DEVSEL_MASK&cmd)==0x200)?"Medium":
- ((PCI_STATUS_DEVSEL_MASK&cmd)==0x400)?"Slow":"Invalid" );
-
- printk( KERN_DEBUG "pmc551: %sFast Back-to-Back\n",
- ((PCI_COMMAND_FAST_BACK&cmd) == 0)?"Not ":"" );
-
- pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd );
- printk( KERN_DEBUG "pmc551: EEPROM is under %s control\n"
- "pmc551: System Control Register is %slocked to PCI access\n"
- "pmc551: System Control Register is %slocked to EEPROM access\n",
- (bcmd&0x1)?"software":"hardware",
- (bcmd&0x20)?"":"un", (bcmd&0x40)?"":"un");
+ /*
+ * Some screen fun
+ */
+ printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at "
+ "0x%llx\n", (size < 1024) ? size : (size < 1048576) ?
+ size >> 10 : size >> 20,
+ (size < 1024) ? 'B' : (size < 1048576) ? 'K' : 'M', size,
+ ((dcmd & (0x1 << 3)) == 0) ? "non-" : "",
+ (unsigned long long)pci_resource_start(dev, 0));
+
+ /*
+ * Check to see the state of the memory
+ */
+ pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK0 Size: %d at %d\n"
+ "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK1 Size: %d at %d\n"
+ "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK2 Size: %d at %d\n"
+ "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dcmd);
+ printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n"
+ "pmc551: DRAM_BLK3 Size: %d at %d\n"
+ "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n",
+ (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
+ (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
+ PMC551_DRAM_BLK_GET_SIZE(dcmd),
+ ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
+ ((dcmd >> 9) & 0xF));
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ printk(KERN_DEBUG "pmc551: Memory Access %s\n",
+ (((0x1 << 1) & cmd) == 0) ? "off" : "on");
+ printk(KERN_DEBUG "pmc551: I/O Access %s\n",
+ (((0x1 << 0) & cmd) == 0) ? "off" : "on");
+
+ pci_read_config_word(dev, PCI_STATUS, &cmd);
+ printk(KERN_DEBUG "pmc551: Devsel %s\n",
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x000) ? "Fast" :
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x200) ? "Medium" :
+ ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x400) ? "Slow" : "Invalid");
+
+ printk(KERN_DEBUG "pmc551: %sFast Back-to-Back\n",
+ ((PCI_COMMAND_FAST_BACK & cmd) == 0) ? "Not " : "");
+
+ pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
+ printk(KERN_DEBUG "pmc551: EEPROM is under %s control\n"
+ "pmc551: System Control Register is %slocked to PCI access\n"
+ "pmc551: System Control Register is %slocked to EEPROM access\n",
+ (bcmd & 0x1) ? "software" : "hardware",
+ (bcmd & 0x20) ? "" : "un", (bcmd & 0x40) ? "" : "un");
#endif
- return size;
+ return size;
}
/*
* Kernel version specific module stuffages
*/
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>");
MODULE_DESCRIPTION(PMC551_VERSION);
@@ -632,11 +649,11 @@ MODULE_DESCRIPTION(PMC551_VERSION);
/*
* Stuff these outside the ifdef so as to not bust compiled in driver support
*/
-static int msize=0;
+static int msize = 0;
#if defined(CONFIG_MTD_PMC551_APERTURE_SIZE)
-static int asize=CONFIG_MTD_PMC551_APERTURE_SIZE
+static int asize = CONFIG_MTD_PMC551_APERTURE_SIZE
#else
-static int asize=0;
+static int asize = 0;
#endif
module_param(msize, int, 0);
@@ -649,164 +666,174 @@ MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
*/
static int __init init_pmc551(void)
{
- struct pci_dev *PCI_Device = NULL;
- struct mypriv *priv;
- int count, found=0;
- struct mtd_info *mtd;
- u32 length = 0;
-
- if(msize) {
- msize = (1 << (ffs(msize) - 1))<<20;
- if (msize > (1<<30)) {
- printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n", msize);
+ struct pci_dev *PCI_Device = NULL;
+ struct mypriv *priv;
+ int count, found = 0;
+ struct mtd_info *mtd;
+ u32 length = 0;
+
+ if (msize) {
+ msize = (1 << (ffs(msize) - 1)) << 20;
+ if (msize > (1 << 30)) {
+ printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n",
+ msize);
return -EINVAL;
}
}
- if(asize) {
- asize = (1 << (ffs(asize) - 1))<<20;
- if (asize > (1<<30) ) {
- printk(KERN_NOTICE "pmc551: Invalid aperture size [%d]\n", asize);
+ if (asize) {
+ asize = (1 << (ffs(asize) - 1)) << 20;
+ if (asize > (1 << 30)) {
+ printk(KERN_NOTICE "pmc551: Invalid aperture size "
+ "[%d]\n", asize);
return -EINVAL;
}
}
- printk(KERN_INFO PMC551_VERSION);
-
- /*
- * PCU-bus chipset probe.
- */
- for( count = 0; count < MAX_MTD_DEVICES; count++ ) {
-
- if ((PCI_Device = pci_find_device(PCI_VENDOR_ID_V3_SEMI,
- PCI_DEVICE_ID_V3_SEMI_V370PDC,
- PCI_Device ) ) == NULL) {
- break;
- }
-
- printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n",
- (unsigned long long)PCI_Device->resource[0].start);
-
- /*
- * The PMC551 device acts VERY weird if you don't init it
- * first. i.e. it will not correctly report devsel. If for
- * some reason the sdram is in a wrote-protected state the
- * device will DEVSEL when it is written to causing problems
- * with the oldproc.c driver in
- * some kernels (2.2.*)
- */
- if((length = fixup_pmc551(PCI_Device)) <= 0) {
- printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n");
- break;
- }
+ printk(KERN_INFO PMC551_VERSION);
+
+ /*
+ * PCU-bus chipset probe.
+ */
+ for (count = 0; count < MAX_MTD_DEVICES; count++) {
+
+ if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
+ PCI_DEVICE_ID_V3_SEMI_V370PDC,
+ PCI_Device)) == NULL) {
+ break;
+ }
+
+ printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n",
+ (unsigned long long)pci_resource_start(PCI_Device, 0));
+
+ /*
+ * The PMC551 device acts VERY weird if you don't init it
+ * first. i.e. it will not correctly report devsel. If for
+ * some reason the sdram is in a wrote-protected state the
+ * device will DEVSEL when it is written to causing problems
+ * with the oldproc.c driver in
+ * some kernels (2.2.*)
+ */
+ if ((length = fixup_pmc551(PCI_Device)) <= 0) {
+ printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n");
+ break;
+ }
/*
* This is needed until the driver is capable of reading the
* onboard I2C SROM to discover the "real" memory size.
*/
- if(msize) {
+ if (msize) {
length = msize;
- printk(KERN_NOTICE "pmc551: Using specified memory size 0x%x\n", length);
+ printk(KERN_NOTICE "pmc551: Using specified memory "
+ "size 0x%x\n", length);
} else {
msize = length;
}
- mtd = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
- if (!mtd) {
- printk(KERN_NOTICE "pmc551: Cannot allocate new MTD device.\n");
- break;
- }
-
- memset(mtd, 0, sizeof(struct mtd_info));
-
- priv = kmalloc (sizeof(struct mypriv), GFP_KERNEL);
- if (!priv) {
- printk(KERN_NOTICE "pmc551: Cannot allocate new MTD device.\n");
- kfree(mtd);
- break;
- }
- memset(priv, 0, sizeof(*priv));
- mtd->priv = priv;
- priv->dev = PCI_Device;
-
- if(asize > length) {
- printk(KERN_NOTICE "pmc551: reducing aperture size to fit %dM\n",length>>20);
+ mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_NOTICE "pmc551: Cannot allocate new MTD "
+ "device.\n");
+ break;
+ }
+
+ priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL);
+ if (!priv) {
+ printk(KERN_NOTICE "pmc551: Cannot allocate new MTD "
+ "device.\n");
+ kfree(mtd);
+ break;
+ }
+ mtd->priv = priv;
+ priv->dev = PCI_Device;
+
+ if (asize > length) {
+ printk(KERN_NOTICE "pmc551: reducing aperture size to "
+ "fit %dM\n", length >> 20);
priv->asize = asize = length;
} else if (asize == 0 || asize == length) {
- printk(KERN_NOTICE "pmc551: Using existing aperture size %dM\n", length>>20);
+ printk(KERN_NOTICE "pmc551: Using existing aperture "
+ "size %dM\n", length >> 20);
priv->asize = asize = length;
} else {
- printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20);
+ printk(KERN_NOTICE "pmc551: Using specified aperture "
+ "size %dM\n", asize >> 20);
priv->asize = asize;
}
- priv->start = ioremap(((PCI_Device->resource[0].start)
- & PCI_BASE_ADDRESS_MEM_MASK),
- priv->asize);
+ priv->start = pci_iomap(PCI_Device, 0, priv->asize);
if (!priv->start) {
printk(KERN_NOTICE "pmc551: Unable to map IO space\n");
- kfree(mtd->priv);
- kfree(mtd);
+ kfree(mtd->priv);
+ kfree(mtd);
break;
}
-
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk( KERN_DEBUG "pmc551: setting aperture to %d\n",
- ffs(priv->asize>>20)-1);
+ printk(KERN_DEBUG "pmc551: setting aperture to %d\n",
+ ffs(priv->asize >> 20) - 1);
#endif
- priv->base_map0 = ( PMC551_PCI_MEM_MAP_REG_EN
- | PMC551_PCI_MEM_MAP_ENABLE
- | (ffs(priv->asize>>20)-1)<<4 );
- priv->curr_map0 = priv->base_map0;
- pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0,
- priv->curr_map0 );
+ priv->base_map0 = (PMC551_PCI_MEM_MAP_REG_EN
+ | PMC551_PCI_MEM_MAP_ENABLE
+ | (ffs(priv->asize >> 20) - 1) << 4);
+ priv->curr_map0 = priv->base_map0;
+ pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
+ priv->curr_map0);
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk( KERN_DEBUG "pmc551: aperture set to %d\n",
- (priv->base_map0 & 0xF0)>>4 );
+ printk(KERN_DEBUG "pmc551: aperture set to %d\n",
+ (priv->base_map0 & 0xF0) >> 4);
#endif
- mtd->size = msize;
- mtd->flags = MTD_CAP_RAM;
- mtd->erase = pmc551_erase;
- mtd->read = pmc551_read;
- mtd->write = pmc551_write;
- mtd->point = pmc551_point;
- mtd->unpoint = pmc551_unpoint;
- mtd->type = MTD_RAM;
- mtd->name = "PMC551 RAM board";
- mtd->erasesize = 0x10000;
- mtd->writesize = 1;
- mtd->owner = THIS_MODULE;
-
- if (add_mtd_device(mtd)) {
- printk(KERN_NOTICE "pmc551: Failed to register new device\n");
- iounmap(priv->start);
- kfree(mtd->priv);
- kfree(mtd);
- break;
- }
- printk(KERN_NOTICE "Registered pmc551 memory device.\n");
- printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n",
- priv->asize>>20,
- priv->start,
- priv->start + priv->asize);
- printk(KERN_NOTICE "Total memory is %d%c\n",
- (length<1024)?length:
- (length<1048576)?length>>10:length>>20,
- (length<1024)?'B':(length<1048576)?'K':'M');
+ mtd->size = msize;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->erase = pmc551_erase;
+ mtd->read = pmc551_read;
+ mtd->write = pmc551_write;
+ mtd->point = pmc551_point;
+ mtd->unpoint = pmc551_unpoint;
+ mtd->type = MTD_RAM;
+ mtd->name = "PMC551 RAM board";
+ mtd->erasesize = 0x10000;
+ mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
+
+ if (add_mtd_device(mtd)) {
+ printk(KERN_NOTICE "pmc551: Failed to register new "
+ "device\n");
+ pci_iounmap(PCI_Device, priv->start);
+ kfree(mtd->priv);
+ kfree(mtd);
+ break;
+ }
+
+ /* Keep a reference as the add_mtd_device worked */
+ pci_dev_get(PCI_Device);
+
+ printk(KERN_NOTICE "Registered pmc551 memory device.\n");
+ printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n",
+ priv->asize >> 20,
+ priv->start, priv->start + priv->asize);
+ printk(KERN_NOTICE "Total memory is %d%c\n",
+ (length < 1024) ? length :
+ (length < 1048576) ? length >> 10 : length >> 20,
+ (length < 1024) ? 'B' : (length < 1048576) ? 'K' : 'M');
priv->nextpmc551 = pmc551list;
pmc551list = mtd;
found++;
- }
+ }
+
+ /* Exited early, reference left over */
+ if (PCI_Device)
+ pci_dev_put(PCI_Device);
- if( !pmc551list ) {
- printk(KERN_NOTICE "pmc551: not detected\n");
- return -ENODEV;
- } else {
+ if (!pmc551list) {
+ printk(KERN_NOTICE "pmc551: not detected\n");
+ return -ENODEV;
+ } else {
printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found);
- return 0;
+ return 0;
}
}
@@ -815,23 +842,24 @@ static int __init init_pmc551(void)
*/
static void __exit cleanup_pmc551(void)
{
- int found=0;
- struct mtd_info *mtd;
+ int found = 0;
+ struct mtd_info *mtd;
struct mypriv *priv;
- while((mtd=pmc551list)) {
+ while ((mtd = pmc551list)) {
priv = mtd->priv;
pmc551list = priv->nextpmc551;
- if(priv->start) {
- printk (KERN_DEBUG "pmc551: unmapping %dM starting at 0x%p\n",
- priv->asize>>20, priv->start);
- iounmap (priv->start);
+ if (priv->start) {
+ printk(KERN_DEBUG "pmc551: unmapping %dM starting at "
+ "0x%p\n", priv->asize >> 20, priv->start);
+ pci_iounmap(priv->dev, priv->start);
}
+ pci_dev_put(priv->dev);
- kfree (mtd->priv);
- del_mtd_device (mtd);
- kfree (mtd);
+ kfree(mtd->priv);
+ del_mtd_device(mtd);
+ kfree(mtd);
found++;
}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 83d0b2a5252..24747bdc3e1 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -13,13 +13,13 @@ config MTD_COMPLEX_MAPPINGS
config MTD_PHYSMAP
tristate "CFI Flash device in physical memory map"
- depends on MTD_CFI
+ depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM
help
- This provides a 'mapping' driver which allows the CFI probe and
- command set driver code to communicate with flash chips which
- are mapped physically into the CPU's memory. You will need to
- configure the physical address and size of the flash chips on
- your particular board as well as the bus width, either statically
+ This provides a 'mapping' driver which allows the NOR Flash and
+ ROM driver code to communicate with chips which are mapped
+ physically into the CPU's memory. You will need to configure
+ the physical address and size of the flash chips on your
+ particular board as well as the bus width, either statically
with config options or at run-time.
config MTD_PHYSMAP_START
@@ -447,14 +447,6 @@ config MTD_DC21285
21285 bridge used with Intel's StrongARM processors. More info at
<http://www.intel.com/design/bridge/docs/21285_documentation.htm>.
-config MTD_IQ80310
- tristate "CFI Flash device mapped on the XScale IQ80310 board"
- depends on MTD_CFI && ARCH_IQ80310
- help
- This enables access routines for the flash chips on the Intel XScale
- IQ80310 evaluation board. If you have one of these boards and would
- like to use the flash chips on it, say 'Y'.
-
config MTD_IXP4XX
tristate "CFI Flash device mapped on Intel IXP4xx based systems"
depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index ab71f172eb7..191c1928bbe 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
obj-$(CONFIG_MTD_DC21285) += dc21285.o
obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
-obj-$(CONFIG_MTD_IQ80310) += iq80310.o
obj-$(CONFIG_MTD_L440GX) += l440gx.o
obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 447955be18a..797caffb20b 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -57,6 +57,7 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
/* Disable writes through the rom window */
pci_read_config_byte(window->pdev, 0x40, &byte);
pci_write_config_byte(window->pdev, 0x40, byte & ~1);
+ pci_dev_put(window->pdev);
}
/* Free all of the mtd devices */
@@ -91,7 +92,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
struct amd76xrom_map_info *map = NULL;
unsigned long map_top;
- /* Remember the pci dev I find the window in */
+ /* Remember the pci dev I find the window in - already have a ref */
window->pdev = pdev;
/* Assume the rom window is properly setup, and find it's size */
@@ -302,7 +303,7 @@ static int __init init_amd76xrom(void)
struct pci_device_id *id;
pdev = NULL;
for(id = amd76xrom_pci_tbl; id->vendor; id++) {
- pdev = pci_find_device(id->vendor, id->device, NULL);
+ pdev = pci_get_device(id->vendor, id->device, NULL);
if (pdev) {
break;
}
diff --git a/drivers/mtd/maps/arctic-mtd.c b/drivers/mtd/maps/arctic-mtd.c
index d95ae582fbe..642d96bc891 100644
--- a/drivers/mtd/maps/arctic-mtd.c
+++ b/drivers/mtd/maps/arctic-mtd.c
@@ -96,6 +96,8 @@ static struct mtd_partition arctic_partitions[PARTITIONS] = {
static int __init
init_arctic_mtd(void)
{
+ int err = 0;
+
printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
arctic_mtd_map.virt = ioremap(PADDR, SIZE);
@@ -109,12 +111,20 @@ init_arctic_mtd(void)
printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map);
- if (!arctic_mtd)
+ if (!arctic_mtd) {
+ iounmap((void *) arctic_mtd_map.virt);
return -ENXIO;
+ }
arctic_mtd->owner = THIS_MODULE;
- return add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS);
+ err = add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS);
+ if (err) {
+ printk("%s: add_mtd_partitions failed\n", NAME);
+ iounmap((void *) arctic_mtd_map.virt);
+ }
+
+ return err;
}
static void __exit
diff --git a/drivers/mtd/maps/beech-mtd.c b/drivers/mtd/maps/beech-mtd.c
index 5df7361d140..a64b1a5ab31 100644
--- a/drivers/mtd/maps/beech-mtd.c
+++ b/drivers/mtd/maps/beech-mtd.c
@@ -72,6 +72,8 @@ static struct mtd_partition beech_partitions[2] = {
static int __init
init_beech_mtd(void)
{
+ int err = 0;
+
printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
beech_mtd_map.virt = ioremap(PADDR, SIZE);
@@ -86,12 +88,20 @@ init_beech_mtd(void)
printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map);
- if (!beech_mtd)
+ if (!beech_mtd) {
+ iounmap((void *) beech_mtd_map.virt);
return -ENXIO;
+ }
beech_mtd->owner = THIS_MODULE;
- return add_mtd_partitions(beech_mtd, beech_partitions, 2);
+ err = add_mtd_partitions(beech_mtd, beech_partitions, 2);
+ if (err) {
+ printk("%s: add_mtd_partitions failed\n", NAME);
+ iounmap((void *) beech_mtd_map.virt);
+ }
+
+ return err;
}
static void __exit
diff --git a/drivers/mtd/maps/cstm_mips_ixx.c b/drivers/mtd/maps/cstm_mips_ixx.c
index aa56defb94c..d6bef100d69 100644
--- a/drivers/mtd/maps/cstm_mips_ixx.c
+++ b/drivers/mtd/maps/cstm_mips_ixx.c
@@ -171,7 +171,14 @@ int __init init_cstm_mips_ixx(void)
cstm_mips_ixx_map[i].phys = cstm_mips_ixx_board_desc[i].window_addr;
cstm_mips_ixx_map[i].virt = ioremap(cstm_mips_ixx_board_desc[i].window_addr, cstm_mips_ixx_board_desc[i].window_size);
if (!cstm_mips_ixx_map[i].virt) {
+ int j = 0;
printk(KERN_WARNING "Failed to ioremap\n");
+ for (j = 0; j < i; j++) {
+ if (cstm_mips_ixx_map[j].virt) {
+ iounmap((void *)cstm_mips_ixx_map[j].virt);
+ cstm_mips_ixx_map[j].virt = 0;
+ }
+ }
return -EIO;
}
cstm_mips_ixx_map[i].name = cstm_mips_ixx_board_desc[i].name;
@@ -204,8 +211,15 @@ int __init init_cstm_mips_ixx(void)
cstm_mips_ixx_map[i].map_priv_2 = (unsigned long)mymtd;
add_mtd_partitions(mymtd, parts, cstm_mips_ixx_board_desc[i].num_partitions);
}
- else
- return -ENXIO;
+ else {
+ for (i = 0; i < PHYSMAP_NUMBER; i++) {
+ if (cstm_mips_ixx_map[i].virt) {
+ iounmap((void *)cstm_mips_ixx_map[i].virt);
+ cstm_mips_ixx_map[i].virt = 0;
+ }
+ }
+ return -ENXIO;
+ }
}
return 0;
}
diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c
index 641e1dd8479..1488bb92f26 100644
--- a/drivers/mtd/maps/ebony.c
+++ b/drivers/mtd/maps/ebony.c
@@ -108,6 +108,7 @@ int __init init_ebony(void)
ARRAY_SIZE(ebony_small_partitions));
} else {
printk("map probe failed for flash\n");
+ iounmap(ebony_small_map.virt);
return -ENXIO;
}
@@ -117,6 +118,7 @@ int __init init_ebony(void)
if (!ebony_large_map.virt) {
printk("Failed to ioremap flash\n");
+ iounmap(ebony_small_map.virt);
return -EIO;
}
@@ -129,6 +131,8 @@ int __init init_ebony(void)
ARRAY_SIZE(ebony_large_partitions));
} else {
printk("map probe failed for flash\n");
+ iounmap(ebony_small_map.virt);
+ iounmap(ebony_large_map.virt);
return -ENXIO;
}
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index c6bf4e1219e..7c50c271651 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -218,8 +218,11 @@ int __init init_fortunet(void)
map_regions[ix].map_info.size);
if(!map_regions[ix].map_info.virt)
{
+ int j = 0;
printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
map_regions[ix].map_info.name);
+ for (j = 0 ; j < ix; j++)
+ iounmap(map_regions[j].map_info.virt);
return -ENXIO;
}
simple_map_init(&map_regions[ix].map_info);
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index db4b570d874..2bb3e63606e 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -61,6 +61,7 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
/* Disable writes through the rom window */
pci_read_config_word(window->pdev, BIOS_CNTL, &word);
pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
+ pci_dev_put(window->pdev);
/* Free all of the mtd devices */
list_for_each_entry_safe(map, scratch, &window->maps, list) {
@@ -355,7 +356,7 @@ static int __init init_ichxrom(void)
pdev = NULL;
for (id = ichxrom_pci_tbl; id->vendor; id++) {
- pdev = pci_find_device(id->vendor, id->device, NULL);
+ pdev = pci_get_device(id->vendor, id->device, NULL);
if (pdev) {
break;
}
diff --git a/drivers/mtd/maps/iq80310.c b/drivers/mtd/maps/iq80310.c
deleted file mode 100644
index 62d9e87d84e..00000000000
--- a/drivers/mtd/maps/iq80310.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * $Id: iq80310.c,v 1.21 2005/11/07 11:14:27 gleixner Exp $
- *
- * Mapping for the Intel XScale IQ80310 evaluation board
- *
- * Author: Nicolas Pitre
- * Copyright: (C) 2001 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-#define WINDOW_ADDR 0
-#define WINDOW_SIZE 8*1024*1024
-#define BUSWIDTH 1
-
-static struct mtd_info *mymtd;
-
-static struct map_info iq80310_map = {
- .name = "IQ80310 flash",
- .size = WINDOW_SIZE,
- .bankwidth = BUSWIDTH,
- .phys = WINDOW_ADDR
-};
-
-static struct mtd_partition iq80310_partitions[4] = {
- {
- .name = "Firmware",
- .size = 0x00080000,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- },{
- .name = "Kernel",
- .size = 0x000a0000,
- .offset = 0x00080000,
- },{
- .name = "Filesystem",
- .size = 0x00600000,
- .offset = 0x00120000
- },{
- .name = "RedBoot",
- .size = 0x000e0000,
- .offset = 0x00720000,
- .mask_flags = MTD_WRITEABLE
- }
-};
-
-static struct mtd_info *mymtd;
-static struct mtd_partition *parsed_parts;
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-static int __init init_iq80310(void)
-{
- struct mtd_partition *parts;
- int nb_parts = 0;
- int parsed_nr_parts = 0;
- int ret;
-
- iq80310_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
- if (!iq80310_map.virt) {
- printk("Failed to ioremap\n");
- return -EIO;
- }
- simple_map_init(&iq80310_map);
-
- mymtd = do_map_probe("cfi_probe", &iq80310_map);
- if (!mymtd) {
- iounmap((void *)iq80310_map.virt);
- return -ENXIO;
- }
- mymtd->owner = THIS_MODULE;
-
- ret = parse_mtd_partitions(mymtd, probes, &parsed_parts, 0);
-
- if (ret > 0)
- parsed_nr_parts = ret;
-
- if (parsed_nr_parts > 0) {
- parts = parsed_parts;
- nb_parts = parsed_nr_parts;
- } else {
- parts = iq80310_partitions;
- nb_parts = ARRAY_SIZE(iq80310_partitions);
- }
- add_mtd_partitions(mymtd, parts, nb_parts);
- return 0;
-}
-
-static void __exit cleanup_iq80310(void)
-{
- if (mymtd) {
- del_mtd_partitions(mymtd);
- map_destroy(mymtd);
- kfree(parsed_parts);
- }
- if (iq80310_map.virt)
- iounmap((void *)iq80310_map.virt);
-}
-
-module_init(init_iq80310);
-module_exit(cleanup_iq80310);
-
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
-MODULE_DESCRIPTION("MTD map driver for Intel XScale IQ80310 evaluation board");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 986c5862839..7a828e3e644 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -253,7 +253,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
/* Use the fast version */
info->map.write = ixp4xx_write16,
- err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
+ err = parse_mtd_partitions(info->mtd, probes, &info->partitions, dev->resource->start);
if (err > 0) {
err = add_mtd_partitions(info->mtd, info->partitions, err);
if(err)
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 6b784ef5ee7..67620adf481 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -61,14 +61,17 @@ static int __init init_l440gx(void)
struct resource *pm_iobase;
__u16 word;
- dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_0, NULL);
- pm_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+ pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
+ pci_dev_put(dev);
+
if (!dev || !pm_dev) {
printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
+ pci_dev_put(pm_dev);
return -ENODEV;
}
@@ -76,6 +79,7 @@ static int __init init_l440gx(void)
if (!l440gx_map.virt) {
printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
+ pci_dev_put(pm_dev);
return -ENOMEM;
}
simple_map_init(&l440gx_map);
@@ -99,8 +103,12 @@ static int __init init_l440gx(void)
pm_iobase->start += iobase & ~1;
pm_iobase->end += iobase & ~1;
+ pci_dev_put(pm_dev);
+
/* Allocate the resource region */
if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
+ pci_dev_put(dev);
+ pci_dev_put(pm_dev);
printk(KERN_WARNING "Could not allocate pm iobase resource\n");
iounmap(l440gx_map.virt);
return -ENXIO;
diff --git a/drivers/mtd/maps/lasat.c b/drivers/mtd/maps/lasat.c
index 1c13d2dc0cd..e3437632105 100644
--- a/drivers/mtd/maps/lasat.c
+++ b/drivers/mtd/maps/lasat.c
@@ -79,6 +79,7 @@ static int __init init_lasat(void)
return 0;
}
+ iounmap(lasat_map.virt);
return -ENXIO;
}
@@ -89,6 +90,7 @@ static void __exit cleanup_lasat(void)
map_destroy(lasat_mtd);
}
if (lasat_map.virt) {
+ iounmap(lasat_map.virt);
lasat_map.virt = 0;
}
}
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 0994b5b2e33..198e840ff6d 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -277,6 +277,7 @@ int __init nettel_init(void)
nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
if (!nettel_amd_map.virt) {
printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
+ iounmap(nettel_mmcrp);
return(-EIO);
}
simple_map_init(&nettel_amd_map);
@@ -337,7 +338,8 @@ int __init nettel_init(void)
nettel_amd_map.virt = NULL;
#else
/* Only AMD flash supported */
- return(-ENXIO);
+ rc = -ENXIO;
+ goto out_unmap2;
#endif
}
@@ -361,14 +363,15 @@ int __init nettel_init(void)
nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
- return(-EIO);
+ rc = -EIO;
+ goto out_unmap2;
}
simple_map_init(&nettel_intel_map);
intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
if (!intel_mtd) {
- iounmap(nettel_intel_map.virt);
- return(-ENXIO);
+ rc = -ENXIO;
+ goto out_unmap1;
}
/* Set PAR to the detected size */
@@ -394,13 +397,14 @@ int __init nettel_init(void)
nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
- return(-EIO);
+ rc = -EIO;
+ goto out_unmap2;
}
intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
if (! intel_mtd) {
- iounmap((void *) nettel_intel_map.virt);
- return(-ENXIO);
+ rc = -ENXIO;
+ goto out_unmap1;
}
intel1size = intel_mtd->size - intel0size;
@@ -456,6 +460,18 @@ int __init nettel_init(void)
#endif
return(rc);
+
+#ifdef CONFIG_MTD_CFI_INTELEXT
+out_unmap1:
+ iounmap((void *) nettel_intel_map.virt);
+#endif
+
+out_unmap2:
+ iounmap(nettel_mmcrp);
+ iounmap(nettel_amd_map.virt);
+
+ return(rc);
+
}
/****************************************************************************/
@@ -469,6 +485,10 @@ void __exit nettel_cleanup(void)
del_mtd_partitions(amd_mtd);
map_destroy(amd_mtd);
}
+ if (nettel_mmcrp) {
+ iounmap(nettel_mmcrp);
+ nettel_mmcrp = NULL;
+ }
if (nettel_amd_map.virt) {
iounmap(nettel_amd_map.virt);
nettel_amd_map.virt = NULL;
diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c
index 2f07602ba94..5522eac8c98 100644
--- a/drivers/mtd/maps/ocotea.c
+++ b/drivers/mtd/maps/ocotea.c
@@ -97,6 +97,7 @@ int __init init_ocotea(void)
ARRAY_SIZE(ocotea_small_partitions));
} else {
printk("map probe failed for flash\n");
+ iounmap(ocotea_small_map.virt);
return -ENXIO;
}
@@ -106,6 +107,7 @@ int __init init_ocotea(void)
if (!ocotea_large_map.virt) {
printk("Failed to ioremap flash\n");
+ iounmap(ocotea_small_map.virt);
return -EIO;
}
@@ -118,6 +120,8 @@ int __init init_ocotea(void)
ARRAY_SIZE(ocotea_large_partitions));
} else {
printk("map probe failed for flash\n");
+ iounmap(ocotea_small_map.virt);
+ iounmap(ocotea_large_map.virt);
return -ENXIO;
}
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index c861134cbc4..995347b1beb 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -602,6 +602,10 @@ static int pcmciamtd_config(struct pcmcia_device *link)
ret = pcmcia_request_configuration(link, &link->conf);
if(ret != CS_SUCCESS) {
cs_error(link, RequestConfiguration, ret);
+ if (dev->win_base) {
+ iounmap(dev->win_base);
+ dev->win_base = NULL;
+ }
return -ENODEV;
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 7799a25a7f2..bc7cc71788b 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -158,9 +158,42 @@ err_out:
return err;
}
+#ifdef CONFIG_PM
+static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ int ret = 0;
+
+ if (info)
+ ret = info->mtd->suspend(info->mtd);
+
+ return ret;
+}
+
+static int physmap_flash_resume(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ if (info)
+ info->mtd->resume(info->mtd);
+ return 0;
+}
+
+static void physmap_flash_shutdown(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ if (info && info->mtd->suspend(info->mtd) == 0)
+ info->mtd->resume(info->mtd);
+}
+#endif
+
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
+#ifdef CONFIG_PM
+ .suspend = physmap_flash_suspend,
+ .resume = physmap_flash_resume,
+ .shutdown = physmap_flash_shutdown,
+#endif
.driver = {
.name = "physmap-flash",
},
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index ec8fdae1dd9..2257d2b500c 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -126,6 +126,8 @@ static struct mtd_info *redwood_mtd;
int __init init_redwood_flash(void)
{
+ int err = 0;
+
printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n",
WINDOW_SIZE, WINDOW_ADDR);
@@ -141,11 +143,18 @@ int __init init_redwood_flash(void)
if (redwood_mtd) {
redwood_mtd->owner = THIS_MODULE;
- return add_mtd_partitions(redwood_mtd,
+ err = add_mtd_partitions(redwood_mtd,
redwood_flash_partitions,
NUM_REDWOOD_FLASH_PARTITIONS);
+ if (err) {
+ printk("init_redwood_flash: add_mtd_partitions failed\n");
+ iounmap(redwood_flash_map.virt);
+ }
+ return err;
+
}
+ iounmap(redwood_flash_map.virt);
return -ENXIO;
}
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index 7d0fcf8f4f3..b8c1331b7a0 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -156,7 +156,7 @@ int __init init_sbc8240_mtd (void)
};
int devicesfound = 0;
- int i;
+ int i,j;
for (i = 0; i < NUM_FLASH_BANKS; i++) {
printk (KERN_NOTICE MSG_PREFIX
@@ -166,6 +166,10 @@ int __init init_sbc8240_mtd (void)
(unsigned long) ioremap (pt[i].addr, pt[i].size);
if (!sbc8240_map[i].map_priv_1) {
printk (MSG_PREFIX "failed to ioremap\n");
+ for (j = 0; j < i; j++) {
+ iounmap((void *) sbc8240_map[j].map_priv_1);
+ sbc8240_map[j].map_priv_1 = 0;
+ }
return -EIO;
}
simple_map_init(&sbc8240_mtd[i]);
@@ -175,6 +179,11 @@ int __init init_sbc8240_mtd (void)
if (sbc8240_mtd[i]) {
sbc8240_mtd[i]->module = THIS_MODULE;
devicesfound++;
+ } else {
+ if (sbc8240_map[i].map_priv_1) {
+ iounmap((void *) sbc8240_map[i].map_priv_1);
+ sbc8240_map[i].map_priv_1 = 0;
+ }
}
}
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 7391fd544e8..5e2bce22f37 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -87,19 +87,23 @@ static int __init init_scx200_docflash(void)
printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n");
- if ((bridge = pci_find_device(PCI_VENDOR_ID_NS,
+ if ((bridge = pci_get_device(PCI_VENDOR_ID_NS,
PCI_DEVICE_ID_NS_SCx200_BRIDGE,
NULL)) == NULL)
return -ENODEV;
/* check that we have found the configuration block */
- if (!scx200_cb_present())
+ if (!scx200_cb_present()) {
+ pci_dev_put(bridge);
return -ENODEV;
+ }
if (probe) {
/* Try to use the present flash mapping if any */
pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base);
pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl);
+ pci_dev_put(bridge);
+
pmr = inl(scx200_cb_base + SCx200_PMR);
if (base == 0
@@ -127,6 +131,7 @@ static int __init init_scx200_docflash(void)
return -ENOMEM;
}
} else {
+ pci_dev_put(bridge);
for (u = size; u > 1; u >>= 1)
;
if (u != 1) {
diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c
index ec80eec376b..ca932122fb6 100644
--- a/drivers/mtd/maps/walnut.c
+++ b/drivers/mtd/maps/walnut.c
@@ -68,6 +68,7 @@ int __init init_walnut(void)
if (WALNUT_FLASH_ONBD_N(fpga_brds1)) {
printk("The on-board flash is disabled (U79 sw 5)!");
+ iounmap(fpga_status_adr);
return -EIO;
}
if (WALNUT_FLASH_SRAM_SEL(fpga_brds1))
@@ -81,6 +82,7 @@ int __init init_walnut(void)
if (!walnut_map.virt) {
printk("Failed to ioremap flash.\n");
+ iounmap(fpga_status_adr);
return -EIO;
}
@@ -93,9 +95,11 @@ int __init init_walnut(void)
ARRAY_SIZE(walnut_partitions));
} else {
printk("map probe failed for flash\n");
+ iounmap(fpga_status_adr);
return -ENXIO;
}
+ iounmap(fpga_status_adr);
return 0;
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index fb8b4f7e48d..5b6acfcb2b8 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -62,15 +62,12 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
struct mtd_info *mtd = mfi->mtd;
switch (orig) {
- case 0:
- /* SEEK_SET */
+ case SEEK_SET:
break;
- case 1:
- /* SEEK_CUR */
+ case SEEK_CUR:
offset += file->f_pos;
break;
- case 2:
- /* SEEK_END */
+ case SEEK_END:
offset += mtd->size;
break;
default:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 168d3ba063c..c4d26de7434 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -57,6 +57,16 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->index = i;
mtd->usecount = 0;
+ /* Some chips always power up locked. Unlock them now */
+ if ((mtd->flags & MTD_WRITEABLE)
+ && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) {
+ if (mtd->unlock(mtd, 0, mtd->size))
+ printk(KERN_WARNING
+ "%s: unlock failed, "
+ "writes may not work\n",
+ mtd->name);
+ }
+
DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 3db77eec0ed..c99302ed382 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -11,7 +11,7 @@ config MTD_NAND
help
This enables support for accessing all type of NAND flash
devices. For further information see
- <http://www.linux-mtd.infradead.org/tech/nand.html>.
+ <http://www.linux-mtd.infradead.org/doc/nand.html>.
config MTD_NAND_VERIFY_WRITE
bool "Verify NAND page writes"
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 31228334da1..09e421a9689 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -21,18 +21,7 @@
#include <linux/version.h>
#include <asm/io.h>
-/* fixme: this is ugly */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
#include <asm/mach-au1x00/au1xxx.h>
-#else
-#include <asm/au1000.h>
-#ifdef CONFIG_MIPS_PB1550
-#include <asm/pb1550.h>
-#endif
-#ifdef CONFIG_MIPS_DB1550
-#include <asm/db1x00.h>
-#endif
-#endif
/*
* MTD structure for NAND controller
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 516c0e5e564..12017f3c6bd 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -198,6 +198,9 @@ static void __exit ep7312_cleanup(void)
/* Release resources, unregister device */
nand_release(ap7312_mtd);
+ /* Release io resource */
+ iounmap((void *)this->IO_ADDR_R);
+
/* Free the MTD device structure */
kfree(ep7312_mtd);
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index c8cbc00243f..975b2ef6112 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1204,7 +1204,7 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
pos = steps * (eccsize + chunk);
steps = 0;
} else
- pos = eccsize + chunk;
+ pos = eccsize;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
for (i = 0; i < steps; i++) {
@@ -1567,7 +1567,7 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
bytes = min_t(size_t, len, free->length);
boffs = free->offset;
}
- memcpy(chip->oob_poi + woffs, oob, bytes);
+ memcpy(chip->oob_poi + boffs, oob, bytes);
oob += bytes;
}
return oob;
@@ -2224,7 +2224,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
}
/* Try to identify manufacturer */
- for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_id++) {
+ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
if (nand_manuf_ids[maf_idx].id == *maf_id)
break;
}
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index e5bd88f2d56..039c759cfbf 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -168,7 +168,7 @@ static void ndfc_chip_init(struct ndfc_nand_mtd *mtd)
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
- chip->ecclayout = mtd->pl_chip->ecclayout;
+ chip->ecclayout = chip->ecc.layout = mtd->pl_chip->ecclayout;
mtd->mtd.priv = chip;
mtd->mtd.owner = THIS_MODULE;
}
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 22fa65c12ab..eb7d4d443de 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -276,6 +276,7 @@ static int __init ppchameleonevb_init(void)
/* Scan to find existence of the device (it could not be mounted) */
if (nand_scan(ppchameleon_mtd, 1)) {
iounmap((void *)ppchameleon_fio_base);
+ ppchameleon_fio_base = NULL;
kfree(ppchameleon_mtd);
goto nand_evb_init;
}
@@ -314,6 +315,8 @@ static int __init ppchameleonevb_init(void)
ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!ppchameleonevb_mtd) {
printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
+ if (ppchameleon_fio_base)
+ iounmap(ppchameleon_fio_base);
return -ENOMEM;
}
@@ -322,6 +325,8 @@ static int __init ppchameleonevb_init(void)
if (!ppchameleonevb_fio_base) {
printk("ioremap PPChameleonEVB NAND flash failed\n");
kfree(ppchameleonevb_mtd);
+ if (ppchameleon_fio_base)
+ iounmap(ppchameleon_fio_base);
return -EIO;
}
@@ -378,6 +383,8 @@ static int __init ppchameleonevb_init(void)
if (nand_scan(ppchameleonevb_mtd, 1)) {
iounmap((void *)ppchameleonevb_fio_base);
kfree(ppchameleonevb_mtd);
+ if (ppchameleon_fio_base)
+ iounmap(ppchameleon_fio_base);
return -ENXIO;
}
#ifdef CONFIG_MTD_PARTITIONS
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index fbeedc3184e..51c7288ab49 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -78,7 +78,7 @@ static struct mtd_partition sharpsl_nand_default_partition_info[] = {
/*
* hardware specific access to control-lines
* ctrl:
- * NAND_CNE: bit 0 -> bit 0 & 4
+ * NAND_CNE: bit 0 -> ! bit 0 & 4
* NAND_CLE: bit 1 -> bit 1
* NAND_ALE: bit 2 -> bit 2
*
@@ -92,7 +92,10 @@ static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned char bits = ctrl & 0x07;
bits |= (ctrl & 0x01) << 4;
- writeb((readb(FLASHCTL) & 0x17) | bits, FLASHCTL);
+
+ bits ^= 0x11;
+
+ writeb((readb(FLASHCTL) & ~0x17) | bits, FLASHCTL);
}
if (cmd != NAND_CMD_NONE)
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
new file mode 100644
index 00000000000..ddbf015f411
--- /dev/null
+++ b/drivers/mtd/ssfdc.c
@@ -0,0 +1,468 @@
+/*
+ * Linux driver for SSFDC Flash Translation Layer (Read only)
+ * (c) 2005 Eptar srl
+ * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
+ *
+ * Based on NTFL and MTDBLOCK_RO drivers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/blktrans.h>
+
+struct ssfdcr_record {
+ struct mtd_blktrans_dev mbd;
+ int usecount;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ int cis_block; /* block n. containing CIS/IDI */
+ int erase_size; /* phys_block_size */
+ unsigned short *logic_block_map; /* all zones (max 8192 phys blocks on
+ the 128MB) */
+ int map_len; /* n. phys_blocks on the card */
+};
+
+#define SSFDCR_MAJOR 257
+#define SSFDCR_PARTN_BITS 3
+
+#define SECTOR_SIZE 512
+#define SECTOR_SHIFT 9
+#define OOB_SIZE 16
+
+#define MAX_LOGIC_BLK_PER_ZONE 1000
+#define MAX_PHYS_BLK_PER_ZONE 1024
+
+#define KB(x) ( (x) * 1024L )
+#define MB(x) ( KB(x) * 1024L )
+
+/** CHS Table
+ 1MB 2MB 4MB 8MB 16MB 32MB 64MB 128MB
+NCylinder 125 125 250 250 500 500 500 500
+NHead 4 4 4 4 4 8 8 16
+NSector 4 8 8 16 16 16 32 32
+SumSector 2,000 4,000 8,000 16,000 32,000 64,000 128,000 256,000
+SectorSize 512 512 512 512 512 512 512 512
+**/
+
+typedef struct {
+ unsigned long size;
+ unsigned short cyl;
+ unsigned char head;
+ unsigned char sec;
+} chs_entry_t;
+
+/* Must be ordered by size */
+static const chs_entry_t chs_table[] = {
+ { MB( 1), 125, 4, 4 },
+ { MB( 2), 125, 4, 8 },
+ { MB( 4), 250, 4, 8 },
+ { MB( 8), 250, 4, 16 },
+ { MB( 16), 500, 4, 16 },
+ { MB( 32), 500, 8, 16 },
+ { MB( 64), 500, 8, 32 },
+ { MB(128), 500, 16, 32 },
+ { 0 },
+};
+
+static int get_chs(unsigned long size, unsigned short *cyl, unsigned char *head,
+ unsigned char *sec)
+{
+ int k;
+ int found = 0;
+
+ k = 0;
+ while (chs_table[k].size > 0 && size > chs_table[k].size)
+ k++;
+
+ if (chs_table[k].size > 0) {
+ if (cyl)
+ *cyl = chs_table[k].cyl;
+ if (head)
+ *head = chs_table[k].head;
+ if (sec)
+ *sec = chs_table[k].sec;
+ found = 1;
+ }
+
+ return found;
+}
+
+/* These bytes are the signature for the CIS/IDI sector */
+static const uint8_t cis_numbers[] = {
+ 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
+};
+
+/* Read and check for a valid CIS sector */
+static int get_valid_cis_sector(struct mtd_info *mtd)
+{
+ int ret, k, cis_sector;
+ size_t retlen;
+ loff_t offset;
+ uint8_t sect_buf[SECTOR_SIZE];
+
+ /*
+ * Look for CIS/IDI sector on the first GOOD block (give up after 4 bad
+ * blocks). If the first good block doesn't contain CIS number the flash
+ * is not SSFDC formatted
+ */
+ cis_sector = -1;
+ for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
+ if (!mtd->block_isbad(mtd, offset)) {
+ ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
+ sect_buf);
+
+ /* CIS pattern match on the sector buffer */
+ if ( ret < 0 || retlen != SECTOR_SIZE ) {
+ printk(KERN_WARNING
+ "SSFDC_RO:can't read CIS/IDI sector\n");
+ } else if ( !memcmp(sect_buf, cis_numbers,
+ sizeof(cis_numbers)) ) {
+ /* Found */
+ cis_sector = (int)(offset >> SECTOR_SHIFT);
+ } else {
+ DEBUG(MTD_DEBUG_LEVEL1,
+ "SSFDC_RO: CIS/IDI sector not found"
+ " on %s (mtd%d)\n", mtd->name,
+ mtd->index);
+ }
+ break;
+ }
+ }
+
+ return cis_sector;
+}
+
+/* Read physical sector (wrapper to MTD_READ) */
+static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
+ int sect_no)
+{
+ int ret;
+ size_t retlen;
+ loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
+
+ ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
+ if (ret < 0 || retlen != SECTOR_SIZE)
+ return -1;
+
+ return 0;
+}
+
+/* Read redundancy area (wrapper to MTD_READ_OOB */
+static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
+{
+ struct mtd_oob_ops ops;
+ int ret;
+
+ ops.mode = MTD_OOB_RAW;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.len = OOB_SIZE;
+ ops.oobbuf = buf;
+ ops.datbuf = NULL;
+
+ ret = mtd->read_oob(mtd, offs, &ops);
+ if (ret < 0 || ops.retlen != OOB_SIZE)
+ return -1;
+
+ return 0;
+}
+
+/* Parity calculator on a word of n bit size */
+static int get_parity(int number, int size)
+{
+ int k;
+ int parity;
+
+ parity = 1;
+ for (k = 0; k < size; k++) {
+ parity += (number >> k);
+ parity &= 1;
+ }
+ return parity;
+}
+
+/* Read and validate the logical block address field stored in the OOB */
+static int get_logical_address(uint8_t *oob_buf)
+{
+ int block_address, parity;
+ int offset[2] = {6, 11}; /* offset of the 2 address fields within OOB */
+ int j;
+ int ok = 0;
+
+ /*
+ * Look for the first valid logical address
+ * Valid address has fixed pattern on most significant bits and
+ * parity check
+ */
+ for (j = 0; j < ARRAY_SIZE(offset); j++) {
+ block_address = ((int)oob_buf[offset[j]] << 8) |
+ oob_buf[offset[j]+1];
+
+ /* Check for the signature bits in the address field (MSBits) */
+ if ((block_address & ~0x7FF) == 0x1000) {
+ parity = block_address & 0x01;
+ block_address &= 0x7FF;
+ block_address >>= 1;
+
+ if (get_parity(block_address, 10) != parity) {
+ DEBUG(MTD_DEBUG_LEVEL0,
+ "SSFDC_RO: logical address field%d"
+ "parity error(0x%04X)\n", j+1,
+ block_address);
+ } else {
+ ok = 1;
+ break;
+ }
+ }
+ }
+
+ if ( !ok )
+ block_address = -2;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n",
+ block_address);
+
+ return block_address;
+}
+
+/* Build the logic block map */
+static int build_logical_block_map(struct ssfdcr_record *ssfdc)
+{
+ unsigned long offset;
+ uint8_t oob_buf[OOB_SIZE];
+ int ret, block_address, phys_block;
+ struct mtd_info *mtd = ssfdc->mbd.mtd;
+
+ DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
+ ssfdc->map_len, (unsigned long)ssfdc->map_len *
+ ssfdc->erase_size / 1024 );
+
+ /* Scan every physical block, skip CIS block */
+ for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
+ phys_block++) {
+ offset = (unsigned long)phys_block * ssfdc->erase_size;
+ if (mtd->block_isbad(mtd, offset))
+ continue; /* skip bad blocks */
+
+ ret = read_raw_oob(mtd, offset, oob_buf);
+ if (ret < 0) {
+ DEBUG(MTD_DEBUG_LEVEL0,
+ "SSFDC_RO: mtd read_oob() failed at %lu\n",
+ offset);
+ return -1;
+ }
+ block_address = get_logical_address(oob_buf);
+
+ /* Skip invalid addresses */
+ if (block_address >= 0 &&
+ block_address < MAX_LOGIC_BLK_PER_ZONE) {
+ int zone_index;
+
+ zone_index = phys_block / MAX_PHYS_BLK_PER_ZONE;
+ block_address += zone_index * MAX_LOGIC_BLK_PER_ZONE;
+ ssfdc->logic_block_map[block_address] =
+ (unsigned short)phys_block;
+
+ DEBUG(MTD_DEBUG_LEVEL2,
+ "SSFDC_RO: build_block_map() phys_block=%d,"
+ "logic_block_addr=%d, zone=%d\n",
+ phys_block, block_address, zone_index);
+ }
+ }
+ return 0;
+}
+
+static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct ssfdcr_record *ssfdc;
+ int cis_sector;
+
+ /* Check for small page NAND flash */
+ if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE)
+ return;
+
+ /* Check for SSDFC format by reading CIS/IDI sector */
+ cis_sector = get_valid_cis_sector(mtd);
+ if (cis_sector == -1)
+ return;
+
+ ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
+ if (!ssfdc) {
+ printk(KERN_WARNING
+ "SSFDC_RO: out of memory for data structures\n");
+ return;
+ }
+
+ ssfdc->mbd.mtd = mtd;
+ ssfdc->mbd.devnum = -1;
+ ssfdc->mbd.blksize = SECTOR_SIZE;
+ ssfdc->mbd.tr = tr;
+ ssfdc->mbd.readonly = 1;
+
+ ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
+ ssfdc->erase_size = mtd->erasesize;
+ ssfdc->map_len = mtd->size / mtd->erasesize;
+
+ DEBUG(MTD_DEBUG_LEVEL1,
+ "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
+ ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
+ (ssfdc->map_len + MAX_PHYS_BLK_PER_ZONE - 1) /
+ MAX_PHYS_BLK_PER_ZONE);
+
+ /* Set geometry */
+ ssfdc->heads = 16;
+ ssfdc->sectors = 32;
+ get_chs( mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
+ ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) /
+ ((long)ssfdc->sectors * (long)ssfdc->heads));
+
+ DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
+ ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
+ (long)ssfdc->cylinders * (long)ssfdc->heads *
+ (long)ssfdc->sectors );
+
+ ssfdc->mbd.size = (long)ssfdc->heads * (long)ssfdc->cylinders *
+ (long)ssfdc->sectors;
+
+ /* Allocate logical block map */
+ ssfdc->logic_block_map = kmalloc( sizeof(ssfdc->logic_block_map[0]) *
+ ssfdc->map_len, GFP_KERNEL);
+ if (!ssfdc->logic_block_map) {
+ printk(KERN_WARNING
+ "SSFDC_RO: out of memory for data structures\n");
+ goto out_err;
+ }
+ memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
+ ssfdc->map_len);
+
+ /* Build logical block map */
+ if (build_logical_block_map(ssfdc) < 0)
+ goto out_err;
+
+ /* Register device + partitions */
+ if (add_mtd_blktrans_dev(&ssfdc->mbd))
+ goto out_err;
+
+ printk(KERN_INFO "SSFDC_RO: Found ssfdc%c on mtd%d (%s)\n",
+ ssfdc->mbd.devnum + 'a', mtd->index, mtd->name);
+ return;
+
+out_err:
+ kfree(ssfdc->logic_block_map);
+ kfree(ssfdc);
+}
+
+static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+
+ DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+ kfree(ssfdc->logic_block_map);
+ kfree(ssfdc);
+}
+
+static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long logic_sect_no, char *buf)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+ int sectors_per_block, offset, block_address;
+
+ sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT;
+ offset = (int)(logic_sect_no % sectors_per_block);
+ block_address = (int)(logic_sect_no / sectors_per_block);
+
+ DEBUG(MTD_DEBUG_LEVEL3,
+ "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
+ " block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
+ block_address);
+
+ if (block_address >= ssfdc->map_len)
+ BUG();
+
+ block_address = ssfdc->logic_block_map[block_address];
+
+ DEBUG(MTD_DEBUG_LEVEL3,
+ "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
+ block_address);
+
+ if (block_address < 0xffff) {
+ unsigned long sect_no;
+
+ sect_no = (unsigned long)block_address * sectors_per_block +
+ offset;
+
+ DEBUG(MTD_DEBUG_LEVEL3,
+ "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
+ sect_no);
+
+ if (read_physical_sector( ssfdc->mbd.mtd, buf, sect_no ) < 0)
+ return -EIO;
+ } else {
+ memset(buf, 0xff, SECTOR_SIZE);
+ }
+
+ return 0;
+}
+
+static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
+
+ DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
+ ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
+
+ geo->heads = ssfdc->heads;
+ geo->sectors = ssfdc->sectors;
+ geo->cylinders = ssfdc->cylinders;
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * Module stuff
+ *
+ ****************************************************************************/
+
+static struct mtd_blktrans_ops ssfdcr_tr = {
+ .name = "ssfdc",
+ .major = SSFDCR_MAJOR,
+ .part_bits = SSFDCR_PARTN_BITS,
+ .getgeo = ssfdcr_getgeo,
+ .readsect = ssfdcr_readsect,
+ .add_mtd = ssfdcr_add_mtd,
+ .remove_dev = ssfdcr_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_ssfdcr(void)
+{
+ printk(KERN_INFO "SSFDC read-only Flash Translation layer\n");
+
+ return register_mtd_blktrans(&ssfdcr_tr);
+}
+
+static void __exit cleanup_ssfdcr(void)
+{
+ deregister_mtd_blktrans(&ssfdcr_tr);
+}
+
+module_init(init_ssfdcr);
+module_exit(cleanup_ssfdcr);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
+MODULE_DESCRIPTION("Flash Translation Layer for read-only SSFDC SmartMedia card");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 30b3671d833..a2bd8119270 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2393,7 +2393,7 @@ config MYRI10GE
you will need a newer firmware image.
You may get this image or more information, at:
- <http://www.myri.com/Myri-10G/>
+ <http://www.myri.com/scs/download-Myri10GE.html>
To compile this driver as a module, choose M here and read
<file:Documentation/networking/net-modules.txt>. The module
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 6fad83f24c4..71160966563 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1264,7 +1264,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
{
int j, rev, ret;
struct bmac_data *bp;
- unsigned char *addr;
+ const unsigned char *prop_addr;
+ unsigned char addr[6];
struct net_device *dev;
int is_bmac_plus = ((int)match->data) != 0;
@@ -1272,14 +1273,16 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
return -ENODEV;
}
- addr = get_property(macio_get_of_node(mdev), "mac-address", NULL);
- if (addr == NULL) {
- addr = get_property(macio_get_of_node(mdev), "local-mac-address", NULL);
- if (addr == NULL) {
+ prop_addr = get_property(macio_get_of_node(mdev), "mac-address", NULL);
+ if (prop_addr == NULL) {
+ prop_addr = get_property(macio_get_of_node(mdev),
+ "local-mac-address", NULL);
+ if (prop_addr == NULL) {
printk(KERN_ERR "BMAC: Can't get mac-address\n");
return -ENODEV;
}
}
+ memcpy(addr, prop_addr, sizeof(addr));
dev = alloc_etherdev(PRIV_BYTES);
if (!dev) {
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 3d76fa144c4..a860ebbbf81 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -377,8 +377,8 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db)
kfree(db->data_req);
}
- if (db->addr_res != NULL) {
- release_resource(db->addr_res);
+ if (db->addr_req != NULL) {
+ release_resource(db->addr_req);
kfree(db->addr_req);
}
}
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 91ef5f2fd76..ce850f1078b 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -173,8 +173,11 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
static int debug = 3;
+static int eeprom_bad_csum_allow = 0;
module_param(debug, int, 0);
+module_param(eeprom_bad_csum_allow, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
#define DPRINTK(nlevel, klevel, fmt, args...) \
(void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
@@ -756,7 +759,8 @@ static int e100_eeprom_load(struct nic *nic)
checksum = le16_to_cpu(0xBABA - checksum);
if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
- return -EAGAIN;
+ if (!eeprom_bad_csum_allow)
+ return -EAGAIN;
}
return 0;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 726f43d5593..98ef9f85482 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1433,8 +1433,8 @@ e1000_configure_tx(struct e1000_adapter *adapter)
E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, TDT, 0);
E1000_WRITE_REG(hw, TDH, 0);
- adapter->tx_ring[0].tdh = E1000_TDH;
- adapter->tx_ring[0].tdt = E1000_TDT;
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
break;
}
@@ -1840,8 +1840,8 @@ e1000_configure_rx(struct e1000_adapter *adapter)
E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
E1000_WRITE_REG(hw, RDT, 0);
E1000_WRITE_REG(hw, RDH, 0);
- adapter->rx_ring[0].rdh = E1000_RDH;
- adapter->rx_ring[0].rdt = E1000_RDT;
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
break;
}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 0464e78f733..e56eac88b80 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -702,7 +702,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
desc[3].desc,
desc[4].desc,
desc[5].desc,
- correlator);
+ correlator,
+ &correlator);
} while ((lpar_rc == H_BUSY) && (retry_count--));
if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 8385bf83650..f5b25bff154 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -41,16 +41,6 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
-/* hcall numbers */
-#define H_VIO_SIGNAL 0x104
-#define H_REGISTER_LOGICAL_LAN 0x114
-#define H_FREE_LOGICAL_LAN 0x118
-#define H_ADD_LOGICAL_LAN_BUFFER 0x11C
-#define H_SEND_LOGICAL_LAN 0x120
-#define H_MULTICAST_CTRL 0x130
-#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
-#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
-
/* hcall macros */
#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
@@ -61,8 +51,21 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
-#define h_send_logical_lan(ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator) \
- plpar_hcall_8arg_2ret(H_SEND_LOGICAL_LAN, ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator, &correlator)
+static inline long h_send_logical_lan(unsigned long unit_address,
+ unsigned long desc1, unsigned long desc2, unsigned long desc3,
+ unsigned long desc4, unsigned long desc5, unsigned long desc6,
+ unsigned long corellator_in, unsigned long *corellator_out)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
+ desc2, desc3, desc4, desc5, desc6, corellator_in);
+
+ *corellator_out = retbuf[0];
+
+ return rc;
+}
#define h_multicast_ctrl(ua, cmd, mac) \
plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index b783a6984ab..393aba95cf1 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -442,16 +442,16 @@ init_rx_bufs(struct net_device *dev, int num) {
if (rbd) {
rbd->pad = 0;
rbd->count = 0;
- rbd->skb = dev_alloc_skb(RX_SKB_SIZE);
+ rbd->skb = dev_alloc_skb(RX_SKBSIZE);
if (!rbd->skb) {
printk("dev_alloc_skb failed");
}
rbd->next = rfd->rbd;
if (i) {
rfd->rbd->prev = rbd;
- rbd->size = RX_SKB_SIZE;
+ rbd->size = RX_SKBSIZE;
} else {
- rbd->size = (RX_SKB_SIZE | RBD_EL);
+ rbd->size = (RX_SKBSIZE | RBD_EL);
lp->rbd_tail = rbd;
}
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 29e4b5aa6ea..5d80e0e6a8e 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -113,7 +113,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
struct device_node *mace = macio_get_of_node(mdev);
struct net_device *dev;
struct mace_data *mp;
- unsigned char *addr;
+ const unsigned char *addr;
int j, rev, rc = -EBUSY;
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 760c61b9886..59de3e74d2d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -385,6 +385,8 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
struct pkt_info pkt_info;
while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {
+ dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE,
+ DMA_FROM_DEVICE);
mp->rx_desc_count--;
received_packets++;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 51ff9a9d1bb..f3655fd772f 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -43,6 +43,7 @@
* deprecated in 2.6
*/
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
@@ -64,12 +65,13 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
MODULE_VERSION("1.0.2");
-static void
+static unsigned int
setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
{
sg[0].page = virt_to_page(address);
sg[0].offset = offset_in_page(address);
sg[0].length = length;
+ return length;
}
#define SHA1_PAD_SIZE 40
@@ -95,8 +97,8 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_tfm *arc4;
- struct crypto_tfm *sha1;
+ struct crypto_blkcipher *arc4;
+ struct crypto_hash *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -136,14 +138,21 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey)
{
+ struct hash_desc desc;
struct scatterlist sg[4];
+ unsigned int nbytes;
- setup_sg(&sg[0], state->master_key, state->keylen);
- setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1));
- setup_sg(&sg[2], state->session_key, state->keylen);
- setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2));
+ nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
+ nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
+ sizeof(sha_pad->sha_pad1));
+ nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
+ nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
+ sizeof(sha_pad->sha_pad2));
- crypto_digest_digest (state->sha1, sg, 4, state->sha1_digest);
+ desc.tfm = state->sha1;
+ desc.flags = 0;
+
+ crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
memcpy(InterimKey, state->sha1_digest, state->keylen);
}
@@ -156,14 +165,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
unsigned char InterimKey[MPPE_MAX_KEY_LEN];
struct scatterlist sg_in[1], sg_out[1];
+ struct blkcipher_desc desc = { .tfm = state->arc4 };
get_new_key_from_sha(state, InterimKey);
if (!initial_key) {
- crypto_cipher_setkey(state->arc4, InterimKey, state->keylen);
+ crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen);
setup_sg(sg_in, InterimKey, state->keylen);
setup_sg(sg_out, state->session_key, state->keylen);
- if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in,
- state->keylen) != 0) {
+ if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+ state->keylen) != 0) {
printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
}
} else {
@@ -175,7 +185,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_cipher_setkey(state->arc4, state->session_key, state->keylen);
+ crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
}
/*
@@ -196,15 +206,19 @@ static void *mppe_alloc(unsigned char *options, int optlen)
memset(state, 0, sizeof(*state));
- state->arc4 = crypto_alloc_tfm("arc4", 0);
- if (!state->arc4)
+ state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(state->arc4)) {
+ state->arc4 = NULL;
goto out_free;
+ }
- state->sha1 = crypto_alloc_tfm("sha1", 0);
- if (!state->sha1)
+ state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(state->sha1)) {
+ state->sha1 = NULL;
goto out_free;
+ }
- digestsize = crypto_tfm_alg_digestsize(state->sha1);
+ digestsize = crypto_hash_digestsize(state->sha1);
if (digestsize < MPPE_MAX_KEY_LEN)
goto out_free;
@@ -229,9 +243,9 @@ static void *mppe_alloc(unsigned char *options, int optlen)
if (state->sha1_digest)
kfree(state->sha1_digest);
if (state->sha1)
- crypto_free_tfm(state->sha1);
+ crypto_free_hash(state->sha1);
if (state->arc4)
- crypto_free_tfm(state->arc4);
+ crypto_free_blkcipher(state->arc4);
kfree(state);
out:
return NULL;
@@ -247,9 +261,9 @@ static void mppe_free(void *arg)
if (state->sha1_digest)
kfree(state->sha1_digest);
if (state->sha1)
- crypto_free_tfm(state->sha1);
+ crypto_free_hash(state->sha1);
if (state->arc4)
- crypto_free_tfm(state->arc4);
+ crypto_free_blkcipher(state->arc4);
kfree(state);
}
}
@@ -356,6 +370,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+ struct blkcipher_desc desc = { .tfm = state->arc4 };
int proto;
struct scatterlist sg_in[1], sg_out[1];
@@ -413,7 +428,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
/* Encrypt packet */
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
- if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) {
+ if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
return -1;
}
@@ -462,6 +477,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
+ struct blkcipher_desc desc = { .tfm = state->arc4 };
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
int sanity = 0;
@@ -599,7 +615,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
*/
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
- if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, 1) != 0) {
+ if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
return DECOMP_ERROR;
}
@@ -619,7 +635,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
/* And finally, decrypt the rest of the packet. */
setup_sg(sg_in, ibuf + 1, isize - 1);
setup_sg(sg_out, obuf + 1, osize - 1);
- if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, isize - 1) != 0) {
+ if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
return DECOMP_ERROR;
}
@@ -694,8 +710,8 @@ static struct compressor ppp_mppe = {
static int __init ppp_mppe_init(void)
{
int answer;
- if (!(crypto_alg_available("arc4", 0) &&
- crypto_alg_available("sha1", 0)))
+ if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
+ crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
return -ENODEV;
sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 88907218457..d64e718afbd 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1697,10 +1697,10 @@ spider_net_setup_phy(struct spider_net_card *card)
*/
static int
spider_net_download_firmware(struct spider_net_card *card,
- u8 *firmware_ptr)
+ const void *firmware_ptr)
{
int sequencer, i;
- u32 *fw_ptr = (u32 *)firmware_ptr;
+ const u32 *fw_ptr = firmware_ptr;
/* stop sequencers */
spider_net_write_reg(card, SPIDER_NET_GSINIT,
@@ -1757,7 +1757,7 @@ spider_net_init_firmware(struct spider_net_card *card)
{
struct firmware *firmware = NULL;
struct device_node *dn;
- u8 *fw_prop = NULL;
+ const u8 *fw_prop = NULL;
int err = -ENOENT;
int fw_size;
@@ -1783,7 +1783,7 @@ try_host_fw:
if (!dn)
goto out_err;
- fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
+ fw_prop = get_property(dn, "firmware", &fw_size);
if (!fw_prop)
goto out_err;
@@ -1986,7 +1986,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
struct net_device *netdev = card->netdev;
struct device_node *dn;
struct sockaddr addr;
- u8 *mac;
+ const u8 *mac;
SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &card->pdev->dev);
@@ -2019,7 +2019,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
if (!dn)
return -EIO;
- mac = (u8 *)get_property(dn, "local-mac-address", NULL);
+ mac = get_property(dn, "local-mac-address", NULL);
if (!mac)
return -EIO;
memcpy(addr.sa_data, mac, ETH_ALEN);
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b70bbd74897..d7b1d1882ca 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2896,7 +2896,7 @@ static int __devinit gem_get_device_address(struct gem *gp)
if (use_idprom)
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
#elif defined(CONFIG_PPC_PMAC)
- unsigned char *addr;
+ const unsigned char *addr;
addr = get_property(gp->of_node, "local-mac-address", NULL);
if (addr == NULL) {
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 0e3fdf7c6dd..ec0413609f3 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1566,20 +1566,21 @@ static int __exit sunlance_sun4_remove(void)
static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match)
{
struct sbus_dev *sdev = to_sbus_device(&dev->dev);
- struct device_node *dp = dev->node;
int err;
- if (!strcmp(dp->name, "le")) {
- err = sparc_lance_probe_one(sdev, NULL, NULL);
- } else if (!strcmp(dp->name, "ledma")) {
- struct sbus_dma *ledma = find_ledma(sdev);
+ if (sdev->parent) {
+ struct of_device *parent = &sdev->parent->ofdev;
- err = sparc_lance_probe_one(sdev->child, ledma, NULL);
- } else {
- BUG_ON(strcmp(dp->name, "lebuffer"));
+ if (!strcmp(parent->node->name, "ledma")) {
+ struct sbus_dma *ledma = find_ledma(to_sbus_device(&parent->dev));
- err = sparc_lance_probe_one(sdev->child, NULL, sdev);
- }
+ err = sparc_lance_probe_one(sdev, ledma, NULL);
+ } else if (!strcmp(parent->node->name, "lebuffer")) {
+ err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev));
+ } else
+ err = sparc_lance_probe_one(sdev, NULL, NULL);
+ } else
+ err = sparc_lance_probe_one(sdev, NULL, NULL);
return err;
}
@@ -1604,12 +1605,6 @@ static struct of_device_id sunlance_sbus_match[] = {
{
.name = "le",
},
- {
- .name = "ledma",
- },
- {
- .name = "lebuffer",
- },
{},
};
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a4dd1394271..170c500169d 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -19,6 +19,7 @@
======================================================================*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -1203,7 +1204,7 @@ struct airo_info {
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
/* MIC stuff */
- struct crypto_tfm *tfm;
+ struct crypto_cipher *tfm;
mic_module mod[2];
mic_statistics micstats;
HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
@@ -1271,7 +1272,8 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev);
static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
static void MoveWindow(miccntx *context, u32 micSeq);
-static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *);
+static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
+ struct crypto_cipher *tfm);
static void emmh32_init(emmh32_context *context);
static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
static void emmh32_final(emmh32_context *context, u8 digest[4]);
@@ -1339,10 +1341,11 @@ static int micsetup(struct airo_info *ai) {
int i;
if (ai->tfm == NULL)
- ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP);
+ ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
- if (ai->tfm == NULL) {
+ if (IS_ERR(ai->tfm)) {
airo_print_err(ai->dev->name, "failed to load transform for AES");
+ ai->tfm = NULL;
return ERROR;
}
@@ -1608,7 +1611,8 @@ static void MoveWindow(miccntx *context, u32 micSeq)
static unsigned char aes_counter[16];
/* expand the key to fill the MMH coefficient array */
-static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm)
+static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
+ struct crypto_cipher *tfm)
{
/* take the keying material, expand if necessary, truncate at 16-bytes */
/* run through AES counter mode to generate context->coeff[] */
@@ -1616,7 +1620,6 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
int i,j;
u32 counter;
u8 *cipher, plain[16];
- struct scatterlist sg[1];
crypto_cipher_setkey(tfm, pkey, 16);
counter = 0;
@@ -1627,9 +1630,8 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
aes_counter[12] = (u8)(counter >> 24);
counter++;
memcpy (plain, aes_counter, 16);
- sg_set_buf(sg, plain, 16);
- crypto_cipher_encrypt(tfm, sg, sg, 16);
- cipher = kmap(sg->page) + sg->offset;
+ crypto_cipher_encrypt_one(tfm, plain, plain);
+ cipher = plain;
for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) {
context->coeff[i++] = ntohl(*(u32 *)&cipher[j]);
j += 4;
@@ -2432,7 +2434,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
ai->shared, ai->shared_dma);
}
}
- crypto_free_tfm(ai->tfm);
+ crypto_free_cipher(ai->tfm);
del_airo_dev( dev );
free_netdev( dev );
}
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index fd31885c684..ccaf28e8db0 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -467,6 +467,7 @@ static int arp_query(unsigned char *haddr, u32 paddr,
struct net_device *dev)
{
struct neighbour *neighbor_entry;
+ int ret = 0;
neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev);
@@ -474,10 +475,11 @@ static int arp_query(unsigned char *haddr, u32 paddr,
neighbor_entry->used = jiffies;
if (neighbor_entry->nud_state & NUD_VALID) {
memcpy(haddr, neighbor_entry->ha, dev->addr_len);
- return 1;
+ ret = 1;
}
+ neigh_release(neighbor_entry);
}
- return 0;
+ return ret;
}
static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr,
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index da9d06bdb81..aa792821854 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1430,9 +1430,43 @@ static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size)
break;
}
+ switch (rate) {
+ case ZD_OFDM_RATE_6M:
+ case ZD_OFDM_RATE_9M:
+ i += 3;
+ break;
+ case ZD_OFDM_RATE_12M:
+ case ZD_OFDM_RATE_18M:
+ i += 5;
+ break;
+ case ZD_OFDM_RATE_24M:
+ case ZD_OFDM_RATE_36M:
+ i += 9;
+ break;
+ case ZD_OFDM_RATE_48M:
+ case ZD_OFDM_RATE_54M:
+ i += 15;
+ break;
+ default:
+ return -EINVAL;
+ }
+
return i;
}
+static int ofdm_qual_percent(u8 status_quality, u8 rate, unsigned int size)
+{
+ int r;
+
+ r = ofdm_qual_db(status_quality, rate, size);
+ ZD_ASSERT(r >= 0);
+ if (r < 0)
+ r = 0;
+
+ r = (r * 100)/29;
+ return r <= 100 ? r : 100;
+}
+
static unsigned int log10times100(unsigned int x)
{
static const u8 log10[] = {
@@ -1476,31 +1510,28 @@ static int cck_snr_db(u8 status_quality)
return r;
}
-static int rx_qual_db(const void *rx_frame, unsigned int size,
- const struct rx_status *status)
+static int cck_qual_percent(u8 status_quality)
{
- return (status->frame_status&ZD_RX_OFDM) ?
- ofdm_qual_db(status->signal_quality_ofdm,
- zd_ofdm_plcp_header_rate(rx_frame),
- size) :
- cck_snr_db(status->signal_quality_cck);
+ int r;
+
+ r = cck_snr_db(status_quality);
+ r = (100*r)/17;
+ return r <= 100 ? r : 100;
}
u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
const struct rx_status *status)
{
- int r = rx_qual_db(rx_frame, size, status);
- if (r < 0)
- r = 0;
- r = (r * 100) / 14;
- if (r > 100)
- r = 100;
- return r;
+ return (status->frame_status&ZD_RX_OFDM) ?
+ ofdm_qual_percent(status->signal_quality_ofdm,
+ zd_ofdm_plcp_header_rate(rx_frame),
+ size) :
+ cck_qual_percent(status->signal_quality_cck);
}
u8 zd_rx_strength_percent(u8 rssi)
{
- int r = (rssi*100) / 30;
+ int r = (rssi*100) / 41;
if (r > 100)
r = 100;
return (u8) r;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index d6f3e02a0b5..a9bd80a0861 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -816,13 +816,25 @@ static int filter_rx(struct ieee80211_device *ieee,
return -EINVAL;
}
-static void update_qual_rssi(struct zd_mac *mac, u8 qual_percent, u8 rssi)
+static void update_qual_rssi(struct zd_mac *mac,
+ const u8 *buffer, unsigned int length,
+ u8 qual_percent, u8 rssi_percent)
{
unsigned long flags;
+ struct ieee80211_hdr_3addr *hdr;
+ int i;
+
+ hdr = (struct ieee80211_hdr_3addr *)buffer;
+ if (length < offsetof(struct ieee80211_hdr_3addr, addr3))
+ return;
+ if (memcmp(hdr->addr2, zd_mac_to_ieee80211(mac)->bssid, ETH_ALEN) != 0)
+ return;
spin_lock_irqsave(&mac->lock, flags);
- mac->qual_average = (7 * mac->qual_average + qual_percent) / 8;
- mac->rssi_average = (7 * mac->rssi_average + rssi) / 8;
+ i = mac->stats_count % ZD_MAC_STATS_BUFFER_SIZE;
+ mac->qual_buffer[i] = qual_percent;
+ mac->rssi_buffer[i] = rssi_percent;
+ mac->stats_count++;
spin_unlock_irqrestore(&mac->lock, flags);
}
@@ -853,7 +865,6 @@ static int fill_rx_stats(struct ieee80211_rx_stats *stats,
if (stats->rate)
stats->mask |= IEEE80211_STATMASK_RATE;
- update_qual_rssi(mac, stats->signal, stats->rssi);
return 0;
}
@@ -877,6 +888,8 @@ int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length)
sizeof(struct rx_status);
buffer += ZD_PLCP_HEADER_SIZE;
+ update_qual_rssi(mac, buffer, length, stats.signal, stats.rssi);
+
r = filter_rx(ieee, buffer, length, &stats);
if (r <= 0)
return r;
@@ -981,17 +994,31 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
{
struct zd_mac *mac = zd_netdev_mac(ndev);
struct iw_statistics *iw_stats = &mac->iw_stats;
+ unsigned int i, count, qual_total, rssi_total;
memset(iw_stats, 0, sizeof(struct iw_statistics));
/* We are not setting the status, because ieee->state is not updated
* at all and this driver doesn't track authentication state.
*/
spin_lock_irq(&mac->lock);
- iw_stats->qual.qual = mac->qual_average;
- iw_stats->qual.level = mac->rssi_average;
- iw_stats->qual.updated = IW_QUAL_QUAL_UPDATED|IW_QUAL_LEVEL_UPDATED|
- IW_QUAL_NOISE_INVALID;
+ count = mac->stats_count < ZD_MAC_STATS_BUFFER_SIZE ?
+ mac->stats_count : ZD_MAC_STATS_BUFFER_SIZE;
+ qual_total = rssi_total = 0;
+ for (i = 0; i < count; i++) {
+ qual_total += mac->qual_buffer[i];
+ rssi_total += mac->rssi_buffer[i];
+ }
spin_unlock_irq(&mac->lock);
+ iw_stats->qual.updated = IW_QUAL_NOISE_INVALID;
+ if (count > 0) {
+ iw_stats->qual.qual = qual_total / count;
+ iw_stats->qual.level = rssi_total / count;
+ iw_stats->qual.updated |=
+ IW_QUAL_QUAL_UPDATED|IW_QUAL_LEVEL_UPDATED;
+ } else {
+ iw_stats->qual.updated |=
+ IW_QUAL_QUAL_INVALID|IW_QUAL_LEVEL_INVALID;
+ }
/* TODO: update counter */
return iw_stats;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 71e382c589e..b3ba49b8463 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -1,4 +1,4 @@
-/* zd_mac.c
+/* zd_mac.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -87,9 +87,9 @@ struct rx_length_info {
#define RX_LENGTH_INFO_TAG 0x697e
struct rx_status {
+ u8 signal_quality_cck;
/* rssi */
u8 signal_strength;
- u8 signal_quality_cck;
u8 signal_quality_ofdm;
u8 decryption_type;
u8 frame_status;
@@ -120,14 +120,17 @@ enum mac_flags {
MAC_FIXED_CHANNEL = 0x01,
};
+#define ZD_MAC_STATS_BUFFER_SIZE 16
+
struct zd_mac {
struct net_device *netdev;
struct zd_chip chip;
spinlock_t lock;
/* Unlocked reading possible */
struct iw_statistics iw_stats;
- u8 qual_average;
- u8 rssi_average;
+ unsigned int stats_count;
+ u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
+ u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE];
u8 regdomain;
u8 default_regdomain;
u8 requested_channel;
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 076bd6dcafa..7288a3eccfb 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -176,16 +176,16 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
return 0;
}
-static int get_children_props(struct device_node *dn, int **drc_indexes,
- int **drc_names, int **drc_types, int **drc_power_domains)
+static int get_children_props(struct device_node *dn, const int **drc_indexes,
+ const int **drc_names, const int **drc_types,
+ const int **drc_power_domains)
{
- int *indexes, *names;
- int *types, *domains;
+ const int *indexes, *names, *types, *domains;
- indexes = (int *) get_property(dn, "ibm,drc-indexes", NULL);
- names = (int *) get_property(dn, "ibm,drc-names", NULL);
- types = (int *) get_property(dn, "ibm,drc-types", NULL);
- domains = (int *) get_property(dn, "ibm,drc-power-domains", NULL);
+ indexes = get_property(dn, "ibm,drc-indexes", NULL);
+ names = get_property(dn, "ibm,drc-names", NULL);
+ types = get_property(dn, "ibm,drc-types", NULL);
+ domains = get_property(dn, "ibm,drc-power-domains", NULL);
if (!indexes || !names || !types || !domains) {
/* Slot does not have dynamically-removable children */
@@ -212,13 +212,13 @@ static int get_children_props(struct device_node *dn, int **drc_indexes,
int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
char **drc_name, char **drc_type, int *drc_power_domain)
{
- int *indexes, *names;
- int *types, *domains;
- unsigned int *my_index;
+ const int *indexes, *names;
+ const int *types, *domains;
+ const unsigned int *my_index;
char *name_tmp, *type_tmp;
int i, rc;
- my_index = (int *) get_property(dn, "ibm,my-drc-index", NULL);
+ my_index = get_property(dn, "ibm,my-drc-index", NULL);
if (!my_index) {
/* Node isn't DLPAR/hotplug capable */
return -EINVAL;
@@ -265,10 +265,10 @@ static int is_php_type(char *drc_type)
return 1;
}
-static int is_php_dn(struct device_node *dn, int **indexes, int **names,
- int **types, int **power_domains)
+static int is_php_dn(struct device_node *dn, const int **indexes,
+ const int **names, const int **types, const int **power_domains)
{
- int *drc_types;
+ const int *drc_types;
int rc;
rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
@@ -296,7 +296,7 @@ int rpaphp_add_slot(struct device_node *dn)
struct slot *slot;
int retval = 0;
int i;
- int *indexes, *names, *types, *power_domains;
+ const int *indexes, *names, *types, *power_domains;
char *name, *type;
dbg("Entry %s: dn->full_name=%s\n", __FUNCTION__, dn->full_name);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 73177429fe7..17e709e7d72 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -667,6 +667,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_vi
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq);
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq);
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index 4d36208ff8d..ae89b9b8874 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -213,17 +213,35 @@ config MONREADER
help
Character device driver for reading z/VM monitor service records
+config MONWRITER
+ tristate "API for writing z/VM monitor service records"
+ default "m"
+ help
+ Character device driver for writing z/VM monitor service records
+
endmenu
menu "Cryptographic devices"
-config Z90CRYPT
+config ZCRYPT
tristate "Support for PCI-attached cryptographic adapters"
- default "m"
- help
+ select ZCRYPT_MONOLITHIC if ZCRYPT="y"
+ default "m"
+ help
Select this option if you want to use a PCI-attached cryptographic
- adapter like the PCI Cryptographic Accelerator (PCICA) or the PCI
- Cryptographic Coprocessor (PCICC). This option is also available
- as a module called z90crypt.ko.
+ adapter like:
+ + PCI Cryptographic Accelerator (PCICA)
+ + PCI Cryptographic Coprocessor (PCICC)
+ + PCI-X Cryptographic Coprocessor (PCIXCC)
+ + Crypto Express2 Coprocessor (CEX2C)
+ + Crypto Express2 Accelerator (CEX2A)
+
+config ZCRYPT_MONOLITHIC
+ bool "Monolithic zcrypt module"
+ depends on ZCRYPT="m"
+ help
+ Select this option if you want to have a single module z90crypt.ko
+ that contains all parts of the crypto device driver (ap bus,
+ request router and all the card drivers).
endmenu
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d8e9b95f0a1..d0647d116ea 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -52,7 +52,7 @@ static void dasd_setup_queue(struct dasd_device * device);
static void dasd_free_queue(struct dasd_device * device);
static void dasd_flush_request_queue(struct dasd_device *);
static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
-static void dasd_flush_ccw_queue(struct dasd_device *, int);
+static int dasd_flush_ccw_queue(struct dasd_device *, int);
static void dasd_tasklet(struct dasd_device *);
static void do_kick_device(void *data);
@@ -60,6 +60,7 @@ static void do_kick_device(void *data);
* SECTION: Operations on the device structure.
*/
static wait_queue_head_t dasd_init_waitq;
+static wait_queue_head_t dasd_flush_wq;
/*
* Allocate memory for a new device structure.
@@ -121,7 +122,7 @@ dasd_free_device(struct dasd_device *device)
/*
* Make a new device known to the system.
*/
-static inline int
+static int
dasd_state_new_to_known(struct dasd_device *device)
{
int rc;
@@ -145,7 +146,7 @@ dasd_state_new_to_known(struct dasd_device *device)
/*
* Let the system forget about a device.
*/
-static inline void
+static int
dasd_state_known_to_new(struct dasd_device * device)
{
/* Disable extended error reporting for this device. */
@@ -163,12 +164,13 @@ dasd_state_known_to_new(struct dasd_device * device)
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device(device);
+ return 0;
}
/*
* Request the irq line for the device.
*/
-static inline int
+static int
dasd_state_known_to_basic(struct dasd_device * device)
{
int rc;
@@ -182,7 +184,7 @@ dasd_state_known_to_basic(struct dasd_device * device)
device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
8 * sizeof (long));
debug_register_view(device->debug_area, &debug_sprintf_view);
- debug_set_level(device->debug_area, DBF_EMERG);
+ debug_set_level(device->debug_area, DBF_WARNING);
DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
device->state = DASD_STATE_BASIC;
@@ -192,17 +194,23 @@ dasd_state_known_to_basic(struct dasd_device * device)
/*
* Release the irq line for the device. Terminate any running i/o.
*/
-static inline void
+static int
dasd_state_basic_to_known(struct dasd_device * device)
{
+ int rc;
+
dasd_gendisk_free(device);
- dasd_flush_ccw_queue(device, 1);
+ rc = dasd_flush_ccw_queue(device, 1);
+ if (rc)
+ return rc;
+
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
debug_unregister(device->debug_area);
device->debug_area = NULL;
}
device->state = DASD_STATE_KNOWN;
+ return 0;
}
/*
@@ -219,7 +227,7 @@ dasd_state_basic_to_known(struct dasd_device * device)
* In case the analysis returns an error, the device setup is stopped
* (a fake disk was already added to allow formatting).
*/
-static inline int
+static int
dasd_state_basic_to_ready(struct dasd_device * device)
{
int rc;
@@ -247,25 +255,31 @@ dasd_state_basic_to_ready(struct dasd_device * device)
* Forget format information. Check if the target level is basic
* and if it is create fake disk for formatting.
*/
-static inline void
+static int
dasd_state_ready_to_basic(struct dasd_device * device)
{
- dasd_flush_ccw_queue(device, 0);
+ int rc;
+
+ rc = dasd_flush_ccw_queue(device, 0);
+ if (rc)
+ return rc;
dasd_destroy_partitions(device);
dasd_flush_request_queue(device);
device->blocks = 0;
device->bp_block = 0;
device->s2b_shift = 0;
device->state = DASD_STATE_BASIC;
+ return 0;
}
/*
* Back to basic.
*/
-static inline void
+static int
dasd_state_unfmt_to_basic(struct dasd_device * device)
{
device->state = DASD_STATE_BASIC;
+ return 0;
}
/*
@@ -273,7 +287,7 @@ dasd_state_unfmt_to_basic(struct dasd_device * device)
* the requeueing of requests from the linux request queue to the
* ccw queue.
*/
-static inline int
+static int
dasd_state_ready_to_online(struct dasd_device * device)
{
device->state = DASD_STATE_ONLINE;
@@ -284,16 +298,17 @@ dasd_state_ready_to_online(struct dasd_device * device)
/*
* Stop the requeueing of requests again.
*/
-static inline void
+static int
dasd_state_online_to_ready(struct dasd_device * device)
{
device->state = DASD_STATE_READY;
+ return 0;
}
/*
* Device startup state changes.
*/
-static inline int
+static int
dasd_increase_state(struct dasd_device *device)
{
int rc;
@@ -329,30 +344,37 @@ dasd_increase_state(struct dasd_device *device)
/*
* Device shutdown state changes.
*/
-static inline int
+static int
dasd_decrease_state(struct dasd_device *device)
{
+ int rc;
+
+ rc = 0;
if (device->state == DASD_STATE_ONLINE &&
device->target <= DASD_STATE_READY)
- dasd_state_online_to_ready(device);
+ rc = dasd_state_online_to_ready(device);
- if (device->state == DASD_STATE_READY &&
+ if (!rc &&
+ device->state == DASD_STATE_READY &&
device->target <= DASD_STATE_BASIC)
- dasd_state_ready_to_basic(device);
+ rc = dasd_state_ready_to_basic(device);
- if (device->state == DASD_STATE_UNFMT &&
+ if (!rc &&
+ device->state == DASD_STATE_UNFMT &&
device->target <= DASD_STATE_BASIC)
- dasd_state_unfmt_to_basic(device);
+ rc = dasd_state_unfmt_to_basic(device);
- if (device->state == DASD_STATE_BASIC &&
+ if (!rc &&
+ device->state == DASD_STATE_BASIC &&
device->target <= DASD_STATE_KNOWN)
- dasd_state_basic_to_known(device);
+ rc = dasd_state_basic_to_known(device);
- if (device->state == DASD_STATE_KNOWN &&
+ if (!rc &&
+ device->state == DASD_STATE_KNOWN &&
device->target <= DASD_STATE_NEW)
- dasd_state_known_to_new(device);
+ rc = dasd_state_known_to_new(device);
- return 0;
+ return rc;
}
/*
@@ -701,6 +723,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
cqr->retries--;
cqr->status = DASD_CQR_CLEAR;
cqr->stopclk = get_clock();
+ cqr->starttime = 0;
DBF_DEV_EVENT(DBF_DEBUG, device,
"terminate cqr %p successful",
cqr);
@@ -870,7 +893,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
device = (struct dasd_device *) cqr->device;
if (device == NULL ||
- device != dasd_device_from_cdev(cdev) ||
+ device != dasd_device_from_cdev_locked(cdev) ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
cdev->dev.bus_id);
@@ -947,7 +970,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((irb->scsw.dstat & mask) == mask) {
- device = dasd_device_from_cdev(cdev);
+ device = dasd_device_from_cdev_locked(cdev);
if (!IS_ERR(device)) {
dasd_handle_state_change_pending(device);
dasd_put_device(device);
@@ -978,6 +1001,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_QUEUED;
dasd_clear_timer(device);
+ wake_up(&dasd_flush_wq);
dasd_schedule_bh(device);
return;
}
@@ -1241,6 +1265,10 @@ __dasd_check_expire(struct dasd_device * device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
+ DEV_MESSAGE(KERN_ERR, device,
+ "internal error - timeout (%is) expired "
+ "for cqr %p (%i retries left)",
+ (cqr->expires/HZ), cqr, cqr->retries);
if (device->discipline->term_IO(cqr) != 0)
/* Hmpf, try again in 1/10 sec */
dasd_set_timer(device, 10);
@@ -1285,46 +1313,100 @@ __dasd_start_head(struct dasd_device * device)
dasd_set_timer(device, 50);
}
+static inline int
+_wait_for_clear(struct dasd_ccw_req *cqr)
+{
+ return (cqr->status == DASD_CQR_QUEUED);
+}
+
/*
- * Remove requests from the ccw queue.
+ * Remove all requests from the ccw queue (all = '1') or only block device
+ * requests in case all = '0'.
+ * Take care of the erp-chain (chained via cqr->refers) and remove either
+ * the whole erp-chain or none of the erp-requests.
+ * If a request is currently running, term_IO is called and the request
+ * is re-queued. Prior to removing the terminated request we need to wait
+ * for the clear-interrupt.
+ * In case termination is not possible we stop processing and just finishing
+ * the already moved requests.
*/
-static void
+static int
dasd_flush_ccw_queue(struct dasd_device * device, int all)
{
+ struct dasd_ccw_req *cqr, *orig, *n;
+ int rc, i;
+
struct list_head flush_queue;
- struct list_head *l, *n;
- struct dasd_ccw_req *cqr;
INIT_LIST_HEAD(&flush_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
- list_for_each_safe(l, n, &device->ccw_queue) {
- cqr = list_entry(l, struct dasd_ccw_req, list);
+ rc = 0;
+restart:
+ list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) {
+ /* get original request of erp request-chain */
+ for (orig = cqr; orig->refers != NULL; orig = orig->refers);
+
/* Flush all request or only block device requests? */
- if (all == 0 && cqr->callback == dasd_end_request_cb)
+ if (all == 0 && cqr->callback != dasd_end_request_cb &&
+ orig->callback != dasd_end_request_cb) {
continue;
- if (cqr->status == DASD_CQR_IN_IO)
- device->discipline->term_IO(cqr);
- if (cqr->status != DASD_CQR_DONE ||
- cqr->status != DASD_CQR_FAILED) {
- cqr->status = DASD_CQR_FAILED;
+ }
+ /* Check status and move request to flush_queue */
+ switch (cqr->status) {
+ case DASD_CQR_IN_IO:
+ rc = device->discipline->term_IO(cqr);
+ if (rc) {
+ /* unable to terminate requeust */
+ DEV_MESSAGE(KERN_ERR, device,
+ "dasd flush ccw_queue is unable "
+ " to terminate request %p",
+ cqr);
+ /* stop flush processing */
+ goto finished;
+ }
+ break;
+ case DASD_CQR_QUEUED:
+ case DASD_CQR_ERROR:
+ /* set request to FAILED */
cqr->stopclk = get_clock();
+ cqr->status = DASD_CQR_FAILED;
+ break;
+ default: /* do not touch the others */
+ break;
+ }
+ /* Rechain request (including erp chain) */
+ for (i = 0; cqr != NULL; cqr = cqr->refers, i++) {
+ cqr->endclk = get_clock();
+ list_move_tail(&cqr->list, &flush_queue);
+ }
+ if (i > 1)
+ /* moved more than one request - need to restart */
+ goto restart;
+ }
+
+finished:
+ spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ /* Now call the callback function of flushed requests */
+restart_cb:
+ list_for_each_entry_safe(cqr, n, &flush_queue, list) {
+ if (cqr->status == DASD_CQR_CLEAR) {
+ /* wait for clear interrupt! */
+ wait_event(dasd_flush_wq, _wait_for_clear(cqr));
+ cqr->status = DASD_CQR_FAILED;
}
/* Process finished ERP request. */
if (cqr->refers) {
__dasd_process_erp(device, cqr);
- continue;
+ /* restart list_for_xx loop since dasd_process_erp
+ * might remove multiple elements */
+ goto restart_cb;
}
- /* Rechain request on device request queue */
+ /* call the callback function */
cqr->endclk = get_clock();
- list_move_tail(&cqr->list, &flush_queue);
- }
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
- /* Now call the callback function of flushed requests */
- list_for_each_safe(l, n, &flush_queue) {
- cqr = list_entry(l, struct dasd_ccw_req, list);
if (cqr->callback != NULL)
(cqr->callback)(cqr, cqr->callback_data);
}
+ return rc;
}
/*
@@ -1510,10 +1592,8 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
if (device->discipline->term_IO) {
cqr->retries = -1;
device->discipline->term_IO(cqr);
- /*nished =
- * wait (non-interruptible) for final status
- * because signal ist still pending
- */
+ /* wait (non-interruptible) for final status
+ * because signal ist still pending */
spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(wait_q, _wait_for_wakeup(cqr));
spin_lock_irq(get_ccwdev_lock(device->cdev));
@@ -1546,19 +1626,11 @@ static inline int
_dasd_term_running_cqr(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
- int rc;
if (list_empty(&device->ccw_queue))
return 0;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
- rc = device->discipline->term_IO(cqr);
- if (rc == 0) {
- /* termination successful */
- cqr->status = DASD_CQR_QUEUED;
- cqr->startclk = cqr->stopclk = 0;
- cqr->starttime = 0;
- }
- return rc;
+ return device->discipline->term_IO(cqr);
}
int
@@ -1726,10 +1798,7 @@ dasd_flush_request_queue(struct dasd_device * device)
return;
spin_lock_irq(&device->request_queue_lock);
- while (!list_empty(&device->request_queue->queue_head)) {
- req = elv_next_request(device->request_queue);
- if (req == NULL)
- break;
+ while ((req = elv_next_request(device->request_queue))) {
blkdev_dequeue_request(req);
dasd_end_request(req, 0);
}
@@ -2091,6 +2160,7 @@ dasd_init(void)
int rc;
init_waitqueue_head(&dasd_init_waitq);
+ init_waitqueue_head(&dasd_flush_wq);
/* register 'common' DASD debug area, used for all DBF_XXX calls */
dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
@@ -2099,7 +2169,7 @@ dasd_init(void)
goto failed;
}
debug_register_view(dasd_debug_area, &debug_sprintf_view);
- debug_set_level(dasd_debug_area, DBF_EMERG);
+ debug_set_level(dasd_debug_area, DBF_WARNING);
DBF_EVENT(DBF_EMERG, "%s", "debug area created");
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 9af02c79ce8..91cf971f065 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -258,8 +258,12 @@ dasd_parse_keyword( char *parsestring ) {
return residual_str;
}
if (strncmp("nopav", parsestring, length) == 0) {
- dasd_nopav = 1;
- MESSAGE(KERN_INFO, "%s", "disable PAV mode");
+ if (MACHINE_IS_VM)
+ MESSAGE(KERN_INFO, "%s", "'nopav' not supported on VM");
+ else {
+ dasd_nopav = 1;
+ MESSAGE(KERN_INFO, "%s", "disable PAV mode");
+ }
return residual_str;
}
if (strncmp("fixedbuffers", parsestring, length) == 0) {
@@ -523,17 +527,17 @@ dasd_create_device(struct ccw_device *cdev)
{
struct dasd_devmap *devmap;
struct dasd_device *device;
+ unsigned long flags;
int rc;
devmap = dasd_devmap_from_cdev(cdev);
if (IS_ERR(devmap))
return (void *) devmap;
- cdev->dev.driver_data = devmap;
device = dasd_alloc_device();
if (IS_ERR(device))
return device;
- atomic_set(&device->ref_count, 2);
+ atomic_set(&device->ref_count, 3);
spin_lock(&dasd_devmap_lock);
if (!devmap->device) {
@@ -552,6 +556,11 @@ dasd_create_device(struct ccw_device *cdev)
dasd_free_device(device);
return ERR_PTR(rc);
}
+
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ cdev->dev.driver_data = device;
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
return device;
}
@@ -569,6 +578,7 @@ dasd_delete_device(struct dasd_device *device)
{
struct ccw_device *cdev;
struct dasd_devmap *devmap;
+ unsigned long flags;
/* First remove device pointer from devmap. */
devmap = dasd_find_busid(device->cdev->dev.bus_id);
@@ -582,9 +592,16 @@ dasd_delete_device(struct dasd_device *device)
devmap->device = NULL;
spin_unlock(&dasd_devmap_lock);
- /* Drop ref_count by 2, one for the devmap reference and
- * one for the passed reference. */
- atomic_sub(2, &device->ref_count);
+ /* Disconnect dasd_device structure from ccw_device structure. */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ device->cdev->dev.driver_data = NULL;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+ /*
+ * Drop ref_count by 3, one for the devmap reference, one for
+ * the cdev reference and one for the passed reference.
+ */
+ atomic_sub(3, &device->ref_count);
/* Wait for reference counter to drop to zero. */
wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
@@ -593,9 +610,6 @@ dasd_delete_device(struct dasd_device *device)
cdev = device->cdev;
device->cdev = NULL;
- /* Disconnect dasd_devmap structure from ccw_device structure. */
- cdev->dev.driver_data = NULL;
-
/* Put ccw_device structure. */
put_device(&cdev->dev);
@@ -615,21 +629,32 @@ dasd_put_device_wake(struct dasd_device *device)
/*
* Return dasd_device structure associated with cdev.
+ * This function needs to be called with the ccw device
+ * lock held. It can be used from interrupt context.
+ */
+struct dasd_device *
+dasd_device_from_cdev_locked(struct ccw_device *cdev)
+{
+ struct dasd_device *device = cdev->dev.driver_data;
+
+ if (!device)
+ return ERR_PTR(-ENODEV);
+ dasd_get_device(device);
+ return device;
+}
+
+/*
+ * Return dasd_device structure associated with cdev.
*/
struct dasd_device *
dasd_device_from_cdev(struct ccw_device *cdev)
{
- struct dasd_devmap *devmap;
struct dasd_device *device;
+ unsigned long flags;
- device = ERR_PTR(-ENODEV);
- spin_lock(&dasd_devmap_lock);
- devmap = cdev->dev.driver_data;
- if (devmap && devmap->device) {
- device = devmap->device;
- dasd_get_device(device);
- }
- spin_unlock(&dasd_devmap_lock);
+ spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ device = dasd_device_from_cdev_locked(cdev);
+ spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return device;
}
@@ -730,16 +755,17 @@ static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dasd_devmap *devmap;
- char *dname;
+ struct dasd_device *device;
+ ssize_t len;
- spin_lock(&dasd_devmap_lock);
- dname = "none";
- devmap = dev->driver_data;
- if (devmap && devmap->device && devmap->device->discipline)
- dname = devmap->device->discipline->name;
- spin_unlock(&dasd_devmap_lock);
- return snprintf(buf, PAGE_SIZE, "%s\n", dname);
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (!IS_ERR(device) && device->discipline) {
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
+ device->discipline->name);
+ dasd_put_device(device);
+ } else
+ len = snprintf(buf, PAGE_SIZE, "none\n");
+ return len;
}
static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index da65f1b032f..e0bf30ebb21 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -678,7 +678,7 @@ int __init dasd_eer_init(void)
return 0;
}
-void __exit dasd_eer_exit(void)
+void dasd_eer_exit(void)
{
WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
}
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 4c272b70f41..d163632101d 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -83,10 +83,12 @@ dasd_gendisk_alloc(struct dasd_device *device)
void
dasd_gendisk_free(struct dasd_device *device)
{
- del_gendisk(device->gdp);
- device->gdp->queue = NULL;
- put_disk(device->gdp);
- device->gdp = NULL;
+ if (device->gdp) {
+ del_gendisk(device->gdp);
+ device->gdp->queue = NULL;
+ put_disk(device->gdp);
+ device->gdp = NULL;
+ }
}
/*
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 3ccf06d28ba..9f52004f6fc 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -534,6 +534,7 @@ int dasd_add_sysfs_files(struct ccw_device *);
void dasd_remove_sysfs_files(struct ccw_device *);
struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
+struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
struct dasd_device *dasd_device_from_devindex(int);
int dasd_parse(void);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index ca7d51f7ecc..cab2c736683 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -453,7 +453,7 @@ static int __init xpram_init(void)
PRINT_WARN("No expanded memory available\n");
return -ENODEV;
}
- xpram_pages = xpram_highest_page_index();
+ xpram_pages = xpram_highest_page_index() + 1;
PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
xpram_pages, (unsigned long) xpram_pages*4);
rc = xpram_setup_sizes(xpram_pages);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 0c0162ff6c0..c3e97b4fc18 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
obj-$(CONFIG_MONREADER) += monreader.o
+obj-$(CONFIG_MONWRITER) += monwriter.o
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
new file mode 100644
index 00000000000..1e3939aeb8a
--- /dev/null
+++ b/drivers/s390/char/monwriter.c
@@ -0,0 +1,292 @@
+/*
+ * drivers/s390/char/monwriter.c
+ *
+ * Character device driver for writing z/VM *MONITOR service records.
+ *
+ * Copyright (C) IBM Corp. 2006
+ *
+ * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/ctype.h>
+#include <linux/poll.h>
+#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/appldata.h>
+#include <asm/monwriter.h>
+
+#define MONWRITE_MAX_DATALEN 4024
+
+static int mon_max_bufs = 255;
+
+struct mon_buf {
+ struct list_head list;
+ struct monwrite_hdr hdr;
+ int diag_done;
+ char *data;
+};
+
+struct mon_private {
+ struct list_head list;
+ struct monwrite_hdr hdr;
+ size_t hdr_to_read;
+ size_t data_to_read;
+ struct mon_buf *current_buf;
+ int mon_buf_count;
+};
+
+/*
+ * helper functions
+ */
+
+static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
+{
+ struct appldata_product_id id;
+ int rc;
+
+ strcpy(id.prod_nr, "LNXAPPL");
+ id.prod_fn = myhdr->applid;
+ id.record_nr = myhdr->record_num;
+ id.version_nr = myhdr->version;
+ id.release_nr = myhdr->release;
+ id.mod_lvl = myhdr->mod_level;
+ rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
+ if (rc <= 0)
+ return rc;
+ if (rc == 5)
+ return -EPERM;
+ printk("DIAG X'DC' error with return code: %i\n", rc);
+ return -EINVAL;
+}
+
+static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
+ struct monwrite_hdr *monhdr)
+{
+ struct mon_buf *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &monpriv->list, list)
+ if (entry->hdr.applid == monhdr->applid &&
+ entry->hdr.record_num == monhdr->record_num &&
+ entry->hdr.version == monhdr->version &&
+ entry->hdr.release == monhdr->release &&
+ entry->hdr.mod_level == monhdr->mod_level)
+ return entry;
+ return NULL;
+}
+
+static int monwrite_new_hdr(struct mon_private *monpriv)
+{
+ struct monwrite_hdr *monhdr = &monpriv->hdr;
+ struct mon_buf *monbuf;
+ int rc;
+
+ if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
+ monhdr->mon_function > MONWRITE_START_CONFIG ||
+ monhdr->hdrlen != sizeof(struct monwrite_hdr))
+ return -EINVAL;
+ monbuf = monwrite_find_hdr(monpriv, monhdr);
+ if (monbuf) {
+ if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
+ monhdr->datalen = monbuf->hdr.datalen;
+ rc = monwrite_diag(monhdr, monbuf->data,
+ APPLDATA_STOP_REC);
+ list_del(&monbuf->list);
+ monpriv->mon_buf_count--;
+ kfree(monbuf->data);
+ kfree(monbuf);
+ monbuf = NULL;
+ }
+ } else {
+ if (monpriv->mon_buf_count >= mon_max_bufs)
+ return -ENOSPC;
+ monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
+ if (!monbuf)
+ return -ENOMEM;
+ monbuf->data = kzalloc(monbuf->hdr.datalen,
+ GFP_KERNEL | GFP_DMA);
+ if (!monbuf->data) {
+ kfree(monbuf);
+ return -ENOMEM;
+ }
+ monbuf->hdr = *monhdr;
+ list_add_tail(&monbuf->list, &monpriv->list);
+ monpriv->mon_buf_count++;
+ }
+ monpriv->current_buf = monbuf;
+ return 0;
+}
+
+static int monwrite_new_data(struct mon_private *monpriv)
+{
+ struct monwrite_hdr *monhdr = &monpriv->hdr;
+ struct mon_buf *monbuf = monpriv->current_buf;
+ int rc = 0;
+
+ switch (monhdr->mon_function) {
+ case MONWRITE_START_INTERVAL:
+ if (!monbuf->diag_done) {
+ rc = monwrite_diag(monhdr, monbuf->data,
+ APPLDATA_START_INTERVAL_REC);
+ monbuf->diag_done = 1;
+ }
+ break;
+ case MONWRITE_START_CONFIG:
+ if (!monbuf->diag_done) {
+ rc = monwrite_diag(monhdr, monbuf->data,
+ APPLDATA_START_CONFIG_REC);
+ monbuf->diag_done = 1;
+ }
+ break;
+ case MONWRITE_GEN_EVENT:
+ rc = monwrite_diag(monhdr, monbuf->data,
+ APPLDATA_GEN_EVENT_REC);
+ list_del(&monpriv->current_buf->list);
+ kfree(monpriv->current_buf->data);
+ kfree(monpriv->current_buf);
+ monpriv->current_buf = NULL;
+ break;
+ default:
+ /* monhdr->mon_function is checked in monwrite_new_hdr */
+ BUG();
+ }
+ return rc;
+}
+
+/*
+ * file operations
+ */
+
+static int monwrite_open(struct inode *inode, struct file *filp)
+{
+ struct mon_private *monpriv;
+
+ monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
+ if (!monpriv)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&monpriv->list);
+ monpriv->hdr_to_read = sizeof(monpriv->hdr);
+ filp->private_data = monpriv;
+ return nonseekable_open(inode, filp);
+}
+
+static int monwrite_close(struct inode *inode, struct file *filp)
+{
+ struct mon_private *monpriv = filp->private_data;
+ struct mon_buf *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &monpriv->list, list) {
+ if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
+ monwrite_diag(&entry->hdr, entry->data,
+ APPLDATA_STOP_REC);
+ monpriv->mon_buf_count--;
+ list_del(&entry->list);
+ kfree(entry->data);
+ kfree(entry);
+ }
+ kfree(monpriv);
+ return 0;
+}
+
+static ssize_t monwrite_write(struct file *filp, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct mon_private *monpriv = filp->private_data;
+ size_t len, written;
+ void *to;
+ int rc;
+
+ for (written = 0; written < count; ) {
+ if (monpriv->hdr_to_read) {
+ len = min(count - written, monpriv->hdr_to_read);
+ to = (char *) &monpriv->hdr +
+ sizeof(monpriv->hdr) - monpriv->hdr_to_read;
+ if (copy_from_user(to, data + written, len)) {
+ rc = -EFAULT;
+ goto out_error;
+ }
+ monpriv->hdr_to_read -= len;
+ written += len;
+ if (monpriv->hdr_to_read > 0)
+ continue;
+ rc = monwrite_new_hdr(monpriv);
+ if (rc)
+ goto out_error;
+ monpriv->data_to_read = monpriv->current_buf ?
+ monpriv->current_buf->hdr.datalen : 0;
+ }
+
+ if (monpriv->data_to_read) {
+ len = min(count - written, monpriv->data_to_read);
+ to = monpriv->current_buf->data +
+ monpriv->hdr.datalen - monpriv->data_to_read;
+ if (copy_from_user(to, data + written, len)) {
+ rc = -EFAULT;
+ goto out_error;
+ }
+ monpriv->data_to_read -= len;
+ written += len;
+ if (monpriv->data_to_read > 0)
+ continue;
+ rc = monwrite_new_data(monpriv);
+ if (rc)
+ goto out_error;
+ }
+ monpriv->hdr_to_read = sizeof(monpriv->hdr);
+ }
+ return written;
+
+out_error:
+ monpriv->data_to_read = 0;
+ monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
+ return rc;
+}
+
+static struct file_operations monwrite_fops = {
+ .owner = THIS_MODULE,
+ .open = &monwrite_open,
+ .release = &monwrite_close,
+ .write = &monwrite_write,
+};
+
+static struct miscdevice mon_dev = {
+ .name = "monwriter",
+ .fops = &monwrite_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+/*
+ * module init/exit
+ */
+
+static int __init mon_init(void)
+{
+ if (MACHINE_IS_VM)
+ return misc_register(&mon_dev);
+ else
+ return -ENODEV;
+}
+
+static void __exit mon_exit(void)
+{
+ WARN_ON(misc_deregister(&mon_dev) != 0);
+}
+
+module_init(mon_init);
+module_exit(mon_exit);
+
+module_param_named(max_bufs, mon_max_bufs, int, 0644);
+MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers"
+ "that can be active at one time");
+
+MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
+MODULE_DESCRIPTION("Character device driver for writing z/VM "
+ "APPLDATA monitor records.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 19762f3476a..1678b6c757e 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2004,2005 IBM Corporation
- * Interface implementation for communication with the v/VM control program
+ * Interface implementation for communication with the z/VM control program
* Author(s): Christian Borntraeger <cborntra@de.ibm.com>
*
*
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 87389e73046..8a5975f3dad 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2004, 2005 IBM Corporation
- * Interface implementation for communication with the v/VM control program
+ * Interface implementation for communication with the z/VM control program
* Version 1.0
* Author(s): Christian Borntraeger <cborntra@de.ibm.com>
*
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 3cba6c9fab1..38954f5cd14 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -183,11 +183,9 @@ ccwgroup_create(struct device *root,
gdev->creator_id = creator_id;
gdev->count = argc;
- gdev->dev = (struct device ) {
- .bus = &ccwgroup_bus_type,
- .parent = root,
- .release = ccwgroup_release,
- };
+ gdev->dev.bus = &ccwgroup_bus_type;
+ gdev->dev.parent = root;
+ gdev->dev.release = ccwgroup_release;
snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s",
gdev->cdev[0]->dev.bus_id);
@@ -391,10 +389,8 @@ int
ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
- cdriver->driver = (struct device_driver) {
- .bus = &ccwgroup_bus_type,
- .name = cdriver->name,
- };
+ cdriver->driver.bus = &ccwgroup_bus_type;
+ cdriver->driver.name = cdriver->name;
return driver_register(&cdriver->driver);
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 61ce3f1d522..3bb4e472d73 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -238,8 +238,6 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
/* Check for single path devices. */
if (sch->schib.pmcw.pim == 0x80)
goto out_unreg;
- if (sch->vpm == mask)
- goto out_unreg;
if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
(sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
@@ -258,6 +256,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
/* trigger path verification. */
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
+ else if (sch->lpm == mask)
+ goto out_unreg;
out_unlock:
spin_unlock_irq(&sch->lock);
return 0;
@@ -378,6 +378,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
if (chp_mask == 0) {
spin_unlock_irq(&sch->lock);
+ put_device(&sch->dev);
return 0;
}
old_lpm = sch->lpm;
@@ -392,7 +393,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
spin_unlock_irq(&sch->lock);
put_device(&sch->dev);
- return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
+ return 0;
}
@@ -1391,10 +1392,8 @@ new_channel_path(int chpid)
/* fill in status, etc. */
chp->id = chpid;
chp->state = 1;
- chp->dev = (struct device) {
- .parent = &css[0]->device,
- .release = chp_release,
- };
+ chp->dev.parent = &css[0]->device;
+ chp->dev.release = chp_release;
snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
/* Obtain channel path description and fill it in. */
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 89320c1ad82..2e2882daefb 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -16,11 +16,10 @@
#include <linux/device.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
-
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/irq.h>
-
+#include <asm/setup.h>
#include "airq.h"
#include "cio.h"
#include "css.h"
@@ -192,7 +191,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
sch->orb.pfch = sch->options.prefetch == 0;
sch->orb.spnd = sch->options.suspend;
sch->orb.ssic = sch->options.suspend && sch->options.inter;
- sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
+ sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm;
#ifdef CONFIG_64BIT
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
@@ -570,10 +569,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
sch->opm = 0xff;
if (!cio_is_console(sch->schid))
chsc_validate_chpids(sch);
- sch->lpm = sch->schib.pmcw.pim &
- sch->schib.pmcw.pam &
- sch->schib.pmcw.pom &
- sch->opm;
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
CIO_DEBUG(KERN_INFO, 0,
"Detected device %04x on subchannel 0.%x.%04X"
@@ -841,14 +837,26 @@ __clear_subchannel_easy(struct subchannel_id schid)
return -EBUSY;
}
-extern void do_reipl(unsigned long devno);
-static int
-__shutdown_subchannel_easy(struct subchannel_id schid, void *data)
+struct sch_match_id {
+ struct subchannel_id schid;
+ struct ccw_dev_id devid;
+ int rc;
+};
+
+static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
+ void *data)
{
struct schib schib;
+ struct sch_match_id *match_id = data;
if (stsch_err(schid, &schib))
return -ENXIO;
+ if (match_id && schib.pmcw.dnv &&
+ (schib.pmcw.dev == match_id->devid.devno) &&
+ (schid.ssid == match_id->devid.ssid)) {
+ match_id->schid = schid;
+ match_id->rc = 0;
+ }
if (!schib.pmcw.ena)
return 0;
switch(__disable_subchannel_easy(schid, &schib)) {
@@ -864,18 +872,71 @@ __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
return 0;
}
-void
-clear_all_subchannels(void)
+static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
+ struct subchannel_id *schid)
{
+ struct sch_match_id match_id;
+
+ match_id.devid = *devid;
+ match_id.rc = -ENODEV;
local_irq_disable();
- for_each_subchannel(__shutdown_subchannel_easy, NULL);
+ for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
+ if (match_id.rc == 0)
+ *schid = match_id.schid;
+ return match_id.rc;
}
+
+void clear_all_subchannels(void)
+{
+ local_irq_disable();
+ for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
+}
+
+extern void do_reipl_asm(__u32 schid);
+
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
-void
-reipl(unsigned long devno)
+void reipl_ccw_dev(struct ccw_dev_id *devid)
{
- clear_all_subchannels();
+ struct subchannel_id schid;
+
+ if (clear_all_subchannels_and_match(devid, &schid))
+ panic("IPL Device not found\n");
cio_reset_channel_paths();
- do_reipl(devno);
+ do_reipl_asm(*((__u32*)&schid));
+}
+
+extern struct schib ipl_schib;
+
+/*
+ * ipl_save_parameters gets called very early. It is not allowed to access
+ * anything in the bss section at all. The bss section is not cleared yet,
+ * but may contain some ipl parameters written by the firmware.
+ * These parameters (if present) are copied to 0x2000.
+ * To avoid corruption of the ipl parameters, all variables used by this
+ * function must reside on the stack or in the data section.
+ */
+void ipl_save_parameters(void)
+{
+ struct subchannel_id schid;
+ unsigned int *ipl_ptr;
+ void *src, *dst;
+
+ schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
+ if (!schid.one)
+ return;
+ if (stsch(schid, &ipl_schib))
+ return;
+ if (!ipl_schib.pmcw.dnv)
+ return;
+ ipl_devno = ipl_schib.pmcw.dev;
+ ipl_flags |= IPL_DEVNO_VALID;
+ if (!ipl_schib.pmcw.qf)
+ return;
+ ipl_flags |= IPL_PARMBLOCK_VALID;
+ ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
+ src = (void *)(unsigned long)*ipl_ptr;
+ dst = (void *)IPL_PARMBLOCK_ORIGIN;
+ memmove(dst, src, PAGE_SIZE);
+ *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 13eeea3d547..7086a74e987 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -182,136 +182,141 @@ get_subchannel_by_schid(struct subchannel_id schid)
return dev ? to_subchannel(dev) : NULL;
}
-
-static inline int
-css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
+static inline int css_get_subchannel_status(struct subchannel *sch)
{
struct schib schib;
- int cc;
- cc = stsch(schid, &schib);
- if (cc)
- return CIO_GONE;
- if (!schib.pmcw.dnv)
+ if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
return CIO_GONE;
- if (sch && sch->schib.pmcw.dnv &&
- (schib.pmcw.dev != sch->schib.pmcw.dev))
+ if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
return CIO_REVALIDATE;
- if (sch && !sch->lpm)
+ if (!sch->lpm)
return CIO_NO_PATH;
return CIO_OPER;
}
-
-static int
-css_evaluate_subchannel(struct subchannel_id schid, int slow)
+
+static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
int event, ret, disc;
- struct subchannel *sch;
unsigned long flags;
+ enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
- sch = get_subchannel_by_schid(schid);
- disc = sch ? device_is_disconnected(sch) : 0;
+ spin_lock_irqsave(&sch->lock, flags);
+ disc = device_is_disconnected(sch);
if (disc && slow) {
- if (sch)
- put_device(&sch->dev);
- return 0; /* Already processed. */
+ /* Disconnected devices are evaluated directly only.*/
+ spin_unlock_irqrestore(&sch->lock, flags);
+ return 0;
}
- /*
- * We've got a machine check, so running I/O won't get an interrupt.
- * Kill any pending timers.
- */
- if (sch)
- device_kill_pending_timer(sch);
+ /* No interrupt after machine check - kill pending timers. */
+ device_kill_pending_timer(sch);
if (!disc && !slow) {
- if (sch)
- put_device(&sch->dev);
- return -EAGAIN; /* Will be done on the slow path. */
+ /* Non-disconnected devices are evaluated on the slow path. */
+ spin_unlock_irqrestore(&sch->lock, flags);
+ return -EAGAIN;
}
- event = css_get_subchannel_status(sch, schid);
+ event = css_get_subchannel_status(sch);
CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
- schid.ssid, schid.sch_no, event,
- sch?(disc?"disconnected":"normal"):"unknown",
- slow?"slow":"fast");
+ sch->schid.ssid, sch->schid.sch_no, event,
+ disc ? "disconnected" : "normal",
+ slow ? "slow" : "fast");
+ /* Analyze subchannel status. */
+ action = NONE;
switch (event) {
case CIO_NO_PATH:
- case CIO_GONE:
- if (!sch) {
- /* Never used this subchannel. Ignore. */
- ret = 0;
+ if (disc) {
+ /* Check if paths have become available. */
+ action = REPROBE;
break;
}
- if (disc && (event == CIO_NO_PATH)) {
- /*
- * Uargh, hack again. Because we don't get a machine
- * check on configure on, our path bookkeeping can
- * be out of date here (it's fine while we only do
- * logical varying or get chsc machine checks). We
- * need to force reprobing or we might miss devices
- * coming operational again. It won't do harm in real
- * no path situations.
- */
- spin_lock_irqsave(&sch->lock, flags);
- device_trigger_reprobe(sch);
+ /* fall through */
+ case CIO_GONE:
+ /* Prevent unwanted effects when opening lock. */
+ cio_disable_subchannel(sch);
+ device_set_disconnected(sch);
+ /* Ask driver what to do with device. */
+ action = UNREGISTER;
+ if (sch->driver && sch->driver->notify) {
spin_unlock_irqrestore(&sch->lock, flags);
- ret = 0;
- break;
- }
- if (sch->driver && sch->driver->notify &&
- sch->driver->notify(&sch->dev, event)) {
- cio_disable_subchannel(sch);
- device_set_disconnected(sch);
- ret = 0;
- break;
+ ret = sch->driver->notify(&sch->dev, event);
+ spin_lock_irqsave(&sch->lock, flags);
+ if (ret)
+ action = NONE;
}
- /*
- * Unregister subchannel.
- * The device will be killed automatically.
- */
- cio_disable_subchannel(sch);
- css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->schib.pmcw.intparm = 0;
- cio_modify(sch);
- put_device(&sch->dev);
- ret = 0;
break;
case CIO_REVALIDATE:
- /*
- * Revalidation machine check. Sick.
- * We don't notify the driver since we have to throw the device
- * away in any case.
- */
- if (!disc) {
- css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->schib.pmcw.intparm = 0;
- cio_modify(sch);
- put_device(&sch->dev);
- ret = css_probe_device(schid);
- } else {
- /*
- * We can't immediately deregister the disconnected
- * device since it might block.
- */
- spin_lock_irqsave(&sch->lock, flags);
- device_trigger_reprobe(sch);
- spin_unlock_irqrestore(&sch->lock, flags);
- ret = 0;
- }
+ /* Device will be removed, so no notify necessary. */
+ if (disc)
+ /* Reprobe because immediate unregister might block. */
+ action = REPROBE;
+ else
+ action = UNREGISTER_PROBE;
break;
case CIO_OPER:
- if (disc) {
- spin_lock_irqsave(&sch->lock, flags);
+ if (disc)
/* Get device operational again. */
- device_trigger_reprobe(sch);
- spin_unlock_irqrestore(&sch->lock, flags);
- }
- ret = sch ? 0 : css_probe_device(schid);
+ action = REPROBE;
+ break;
+ }
+ /* Perform action. */
+ ret = 0;
+ switch (action) {
+ case UNREGISTER:
+ case UNREGISTER_PROBE:
+ /* Unregister device (will use subchannel lock). */
+ spin_unlock_irqrestore(&sch->lock, flags);
+ css_sch_device_unregister(sch);
+ spin_lock_irqsave(&sch->lock, flags);
+
+ /* Reset intparm to zeroes. */
+ sch->schib.pmcw.intparm = 0;
+ cio_modify(sch);
+
+ /* Probe if necessary. */
+ if (action == UNREGISTER_PROBE)
+ ret = css_probe_device(sch->schid);
+ break;
+ case REPROBE:
+ device_trigger_reprobe(sch);
break;
default:
- BUG();
- ret = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&sch->lock, flags);
+
+ return ret;
+}
+
+static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
+{
+ struct schib schib;
+
+ if (!slow) {
+ /* Will be done on the slow path. */
+ return -EAGAIN;
}
+ if (stsch(schid, &schib) || !schib.pmcw.dnv) {
+ /* Unusable - ignore. */
+ return 0;
+ }
+ CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
+ "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
+
+ return css_probe_device(schid);
+}
+
+static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
+{
+ struct subchannel *sch;
+ int ret;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ ret = css_evaluate_known_subchannel(sch, slow);
+ put_device(&sch->dev);
+ } else
+ ret = css_evaluate_new_subchannel(schid, slow);
+
return ret;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 585fa04233c..688945662c1 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -52,53 +52,81 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
return 1;
}
-/*
- * Hotplugging interface for ccw devices.
- * Heavily modeled on pci and usb hotplug.
- */
-static int
-ccw_uevent (struct device *dev, char **envp, int num_envp,
- char *buffer, int buffer_size)
+/* Store modalias string delimited by prefix/suffix string into buffer with
+ * specified size. Return length of resulting string (excluding trailing '\0')
+ * even if string doesn't fit buffer (snprintf semantics). */
+static int snprint_alias(char *buf, size_t size, const char *prefix,
+ struct ccw_device_id *id, const char *suffix)
{
- struct ccw_device *cdev = to_ccwdev(dev);
- int i = 0;
- int length = 0;
+ int len;
- if (!cdev)
- return -ENODEV;
+ len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type,
+ id->cu_model);
+ if (len > size)
+ return len;
+ buf += len;
+ size -= len;
- /* what we want to pass to /sbin/hotplug */
+ if (id->dev_type != 0)
+ len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
+ id->dev_model, suffix);
+ else
+ len += snprintf(buf, size, "dtdm%s", suffix);
- envp[i++] = buffer;
- length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X",
- cdev->id.cu_type);
- if ((buffer_size - length <= 0) || (i >= num_envp))
- return -ENOMEM;
- ++length;
- buffer += length;
+ return len;
+}
+/* Set up environment variables for ccw device uevent. Return 0 on success,
+ * non-zero otherwise. */
+static int ccw_uevent(struct device *dev, char **envp, int num_envp,
+ char *buffer, int buffer_size)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_id *id = &(cdev->id);
+ int i = 0;
+ int len;
+
+ /* CU_TYPE= */
+ len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1;
+ if (len > buffer_size || i >= num_envp)
+ return -ENOMEM;
envp[i++] = buffer;
- length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X",
- cdev->id.cu_model);
- if ((buffer_size - length <= 0) || (i >= num_envp))
+ buffer += len;
+ buffer_size -= len;
+
+ /* CU_MODEL= */
+ len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1;
+ if (len > buffer_size || i >= num_envp)
return -ENOMEM;
- ++length;
- buffer += length;
+ envp[i++] = buffer;
+ buffer += len;
+ buffer_size -= len;
/* The next two can be zero, that's ok for us */
- envp[i++] = buffer;
- length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X",
- cdev->id.dev_type);
- if ((buffer_size - length <= 0) || (i >= num_envp))
+ /* DEV_TYPE= */
+ len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1;
+ if (len > buffer_size || i >= num_envp)
return -ENOMEM;
- ++length;
- buffer += length;
+ envp[i++] = buffer;
+ buffer += len;
+ buffer_size -= len;
+ /* DEV_MODEL= */
+ len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X",
+ (unsigned char) id->dev_model) + 1;
+ if (len > buffer_size || i >= num_envp)
+ return -ENOMEM;
envp[i++] = buffer;
- length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X",
- cdev->id.dev_model);
- if ((buffer_size - length <= 0) || (i >= num_envp))
+ buffer += len;
+ buffer_size -= len;
+
+ /* MODALIAS= */
+ len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1;
+ if (len > buffer_size || i >= num_envp)
return -ENOMEM;
+ envp[i++] = buffer;
+ buffer += len;
+ buffer_size -= len;
envp[i] = NULL;
@@ -251,16 +279,11 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_device_id *id = &(cdev->id);
- int ret;
+ int len;
- ret = sprintf(buf, "ccw:t%04Xm%02X",
- id->cu_type, id->cu_model);
- if (id->dev_type != 0)
- ret += sprintf(buf + ret, "dt%04Xdm%02X\n",
- id->dev_type, id->dev_model);
- else
- ret += sprintf(buf + ret, "dtdm\n");
- return ret;
+ len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1;
+
+ return len > PAGE_SIZE ? PAGE_SIZE : len;
}
static ssize_t
@@ -556,12 +579,11 @@ get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
struct ccw_device *sibling)
{
struct device *dev;
- struct match_data data = {
- .devno = devno,
- .ssid = ssid,
- .sibling = sibling,
- };
+ struct match_data data;
+ data.devno = devno;
+ data.ssid = ssid;
+ data.sibling = sibling;
dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
return dev ? to_ccwdev(dev) : NULL;
@@ -835,10 +857,8 @@ io_subchannel_probe (struct subchannel *sch)
return -ENOMEM;
}
atomic_set(&cdev->private->onoff, 0);
- cdev->dev = (struct device) {
- .parent = &sch->dev,
- .release = ccw_device_release,
- };
+ cdev->dev.parent = &sch->dev;
+ cdev->dev.release = ccw_device_release;
INIT_LIST_HEAD(&cdev->private->kick_work.entry);
/* Do first half of device_register. */
device_initialize(&cdev->dev);
@@ -977,9 +997,7 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
int rc;
/* Initialize the ccw_device structure. */
- cdev->dev = (struct device) {
- .parent = &sch->dev,
- };
+ cdev->dev.parent= &sch->dev;
rc = io_subchannel_recog(cdev, sch);
if (rc)
return rc;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 6d91c2eb205..dace46fc32e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -232,10 +232,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
*/
old_lpm = sch->lpm;
stsch(sch->schid, &sch->schib);
- sch->lpm = sch->schib.pmcw.pim &
- sch->schib.pmcw.pam &
- sch->schib.pmcw.pom &
- sch->opm;
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
/* Check since device may again have become not operational. */
if (!sch->schib.pmcw.dnv)
state = DEV_STATE_NOT_OPER;
@@ -267,12 +264,11 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
notify = 1;
}
/* fill out sense information */
- cdev->id = (struct ccw_device_id) {
- .cu_type = cdev->private->senseid.cu_type,
- .cu_model = cdev->private->senseid.cu_model,
- .dev_type = cdev->private->senseid.dev_type,
- .dev_model = cdev->private->senseid.dev_model,
- };
+ memset(&cdev->id, 0, sizeof(cdev->id));
+ cdev->id.cu_type = cdev->private->senseid.cu_type;
+ cdev->id.cu_model = cdev->private->senseid.cu_model;
+ cdev->id.dev_type = cdev->private->senseid.dev_type;
+ cdev->id.dev_model = cdev->private->senseid.dev_model;
if (notify) {
cdev->private->state = DEV_STATE_OFFLINE;
if (same_dev) {
@@ -456,8 +452,8 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
return;
}
/* Start Path Group verification. */
- sch->vpm = 0; /* Start with no path groups set. */
cdev->private->state = DEV_STATE_VERIFY;
+ cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
}
@@ -557,7 +553,19 @@ ccw_device_nopath_notify(void *data)
void
ccw_device_verify_done(struct ccw_device *cdev, int err)
{
- cdev->private->flags.doverify = 0;
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ /* Update schib - pom may have changed. */
+ stsch(sch->schid, &sch->schib);
+ /* Update lpm with verified path mask. */
+ sch->lpm = sch->vpm;
+ /* Repeat path verification? */
+ if (cdev->private->flags.doverify) {
+ cdev->private->flags.doverify = 0;
+ ccw_device_verify_start(cdev);
+ return;
+ }
switch (err) {
case -EOPNOTSUPP: /* path grouping not supported, just set online. */
cdev->private->options.pgroup = 0;
@@ -566,12 +574,10 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw = (struct scsw) {
- .cc = 1,
- .fctl = SCSW_FCTL_START_FUNC,
- .actl = SCSW_ACTL_START_PEND,
- .stctl = SCSW_STCTL_STATUS_PEND,
- };
+ cdev->private->irb.scsw.cc = 1;
+ cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
+ cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
+ cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
@@ -617,6 +623,7 @@ ccw_device_online(struct ccw_device *cdev)
if (!cdev->private->options.pgroup) {
/* Start initial path verification. */
cdev->private->state = DEV_STATE_VERIFY;
+ cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
return 0;
}
@@ -663,7 +670,6 @@ ccw_device_offline(struct ccw_device *cdev)
/* Are we doing path grouping? */
if (!cdev->private->options.pgroup) {
/* No, set state offline immediately. */
- sch->vpm = 0;
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
}
@@ -784,6 +790,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
}
/* Device is idle, we can do the path verification. */
cdev->private->state = DEV_STATE_VERIFY;
+ cdev->private->flags.doverify = 0;
ccw_device_verify_start(cdev);
}
@@ -1046,9 +1053,9 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
}
static void
-ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
+ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
{
- /* When the I/O has terminated, we have to start verification. */
+ /* Start verification after current task finished. */
cdev->private->flags.doverify = 1;
}
@@ -1114,10 +1121,7 @@ device_trigger_reprobe(struct subchannel *sch)
* The pim, pam, pom values may not be accurate, but they are the best
* we have before performing device selection :/
*/
- sch->lpm = sch->schib.pmcw.pim &
- sch->schib.pmcw.pam &
- sch->schib.pmcw.pom &
- sch->opm;
+ sch->lpm = sch->schib.pmcw.pam & sch->opm;
/* Re-set some bits in the pmcw that were lost. */
sch->schib.pmcw.isc = 3;
sch->schib.pmcw.csense = 1;
@@ -1241,7 +1245,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_ONLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
@@ -1284,7 +1288,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
- [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify,
+ [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_QUIESCE] = {
[DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
@@ -1297,7 +1301,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_start_id,
[DEV_EVENT_TIMEOUT] = ccw_device_bug,
- [DEV_EVENT_VERIFY] = ccw_device_nop,
+ [DEV_EVENT_VERIFY] = ccw_device_start_id,
},
[DEV_STATE_DISCONNECTED_SENSE_ID] = {
[DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 9e3de0bd59b..93a897eebff 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -96,6 +96,12 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
+ /* Adjust requested path mask to excluded varied off paths. */
+ if (lpm) {
+ lpm &= sch->opm;
+ if (lpm == 0)
+ return -EACCES;
+ }
ret = cio_start_key (sch, cpa, lpm, key);
if (ret == 0)
cdev->private->intparm = intparm;
@@ -250,7 +256,7 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
if (!sch)
return 0;
else
- return sch->vpm;
+ return sch->lpm;
}
static void
@@ -304,7 +310,7 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
sch = to_subchannel(cdev->dev.parent);
do {
ret = cio_start (sch, ccw, lpm);
- if ((ret == -EBUSY) || (ret == -EACCES)) {
+ if (ret == -EBUSY) {
/* Try again later. */
spin_unlock_irq(&sch->lock);
msleep(10);
@@ -433,6 +439,13 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
if (!ciw || ciw->cmd == 0)
return -EOPNOTSUPP;
+ /* Adjust requested path mask to excluded varied off paths. */
+ if (lpm) {
+ lpm &= sch->opm;
+ if (lpm == 0)
+ return -EACCES;
+ }
+
rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
if (!rcd_ccw)
return -ENOMEM;
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 32610fd8868..8ca2d078848 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -24,6 +24,21 @@
#include "ioasm.h"
/*
+ * Helper function called from interrupt context to decide whether an
+ * operation should be tried again.
+ */
+static int __ccw_device_should_retry(struct scsw *scsw)
+{
+ /* CC is only valid if start function bit is set. */
+ if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1)
+ return 1;
+ /* No more activity. For sense and set PGID we stubbornly try again. */
+ if (!scsw->actl)
+ return 1;
+ return 0;
+}
+
+/*
* Start Sense Path Group ID helper function. Used in ccw_device_recog
* and ccw_device_sense_pgid.
*/
@@ -155,10 +170,10 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
irb = (struct irb *) __LC_IRB;
- /* Retry sense pgid for cc=1. */
+
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (irb->scsw.cc == 1) {
+ if (__ccw_device_should_retry(&irb->scsw)) {
ret = __ccw_device_sense_pgid_start(cdev);
if (ret && ret != -EBUSY)
ccw_device_sense_pgid_done(cdev, ret);
@@ -230,18 +245,17 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try multiple times. */
- ret = -ENODEV;
+ ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
- /* ret is 0, -EBUSY, -EACCES or -ENODEV */
- if ((ret != -EACCES) && (ret != -ENODEV))
+ /* We expect an interrupt in case of success or busy
+ * indication. */
+ if ((ret == 0) || (ret == -EBUSY))
return ret;
}
- /* PGID command failed on this path. Switch it off. */
- sch->lpm &= ~cdev->private->imask;
- sch->vpm &= ~cdev->private->imask;
+ /* PGID command failed on this path. */
CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not operational'\n",
cdev->private->devno, sch->schid.ssid,
@@ -271,18 +285,17 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
memset(&cdev->private->irb, 0, sizeof(struct irb));
/* Try multiple times. */
- ret = -ENODEV;
+ ret = -EACCES;
if (cdev->private->iretry > 0) {
cdev->private->iretry--;
ret = cio_start (sch, cdev->private->iccws,
cdev->private->imask);
- /* ret is 0, -EBUSY, -EACCES or -ENODEV */
- if ((ret != -EACCES) && (ret != -ENODEV))
+ /* We expect an interrupt in case of success or busy
+ * indication. */
+ if ((ret == 0) || (ret == -EBUSY))
return ret;
}
- /* nop command failed on this path. Switch it off. */
- sch->lpm &= ~cdev->private->imask;
- sch->vpm &= ~cdev->private->imask;
+ /* nop command failed on this path. */
CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel "
"0.%x.%04x, lpm %02X, became 'not operational'\n",
cdev->private->devno, sch->schid.ssid,
@@ -357,27 +370,32 @@ static void
__ccw_device_verify_start(struct ccw_device *cdev)
{
struct subchannel *sch;
- __u8 imask, func;
+ __u8 func;
int ret;
sch = to_subchannel(cdev->dev.parent);
- while (sch->vpm != sch->lpm) {
- /* Find first unequal bit in vpm vs. lpm */
- for (imask = 0x80; imask != 0; imask >>= 1)
- if ((sch->vpm & imask) != (sch->lpm & imask))
- break;
- cdev->private->imask = imask;
+ /* Repeat for all paths. */
+ for (; cdev->private->imask; cdev->private->imask >>= 1,
+ cdev->private->iretry = 5) {
+ if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
+ /* Path not available, try next. */
+ continue;
if (cdev->private->options.pgroup) {
- func = (sch->vpm & imask) ?
- SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH;
+ if (sch->opm & cdev->private->imask)
+ func = SPID_FUNC_ESTABLISH;
+ else
+ func = SPID_FUNC_RESIGN;
ret = __ccw_device_do_pgid(cdev, func);
} else
ret = __ccw_device_do_nop(cdev);
+ /* We expect an interrupt in case of success or busy
+ * indication. */
if (ret == 0 || ret == -EBUSY)
return;
- cdev->private->iretry = 5;
+ /* Permanent path failure, try next. */
}
- ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
+ /* Done with all paths. */
+ ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
}
/*
@@ -391,10 +409,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
irb = (struct irb *) __LC_IRB;
- /* Retry set pgid for cc=1. */
+
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (irb->scsw.cc == 1)
+ if (__ccw_device_should_retry(&irb->scsw))
__ccw_device_verify_start(cdev);
return;
}
@@ -406,14 +424,14 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
else
ret = __ccw_device_check_nop(cdev);
memset(&cdev->private->irb, 0, sizeof(struct irb));
+
switch (ret) {
/* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
case 0:
- /* Establish or Resign Path Group done. Update vpm. */
- if ((sch->lpm & cdev->private->imask) != 0)
- sch->vpm |= cdev->private->imask;
- else
- sch->vpm &= ~cdev->private->imask;
+ /* Path verification ccw finished successfully, update lpm. */
+ sch->vpm |= sch->opm & cdev->private->imask;
+ /* Go on with next path. */
+ cdev->private->imask >>= 1;
cdev->private->iretry = 5;
__ccw_device_verify_start(cdev);
break;
@@ -426,6 +444,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
cdev->private->options.pgroup = 0;
else
cdev->private->flags.pgid_single = 1;
+ /* Retry */
+ sch->vpm = 0;
+ cdev->private->imask = 0x80;
+ cdev->private->iretry = 5;
/* fall through. */
case -EAGAIN: /* Try again. */
__ccw_device_verify_start(cdev);
@@ -434,8 +456,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
ccw_device_verify_done(cdev, -ETIME);
break;
case -EACCES: /* channel is not operational. */
- sch->lpm &= ~cdev->private->imask;
- sch->vpm &= ~cdev->private->imask;
+ cdev->private->imask >>= 1;
cdev->private->iretry = 5;
__ccw_device_verify_start(cdev);
break;
@@ -448,19 +469,17 @@ ccw_device_verify_start(struct ccw_device *cdev)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
cdev->private->flags.pgid_single = 0;
+ cdev->private->imask = 0x80;
cdev->private->iretry = 5;
- /*
- * Update sch->lpm with current values to catch paths becoming
- * available again.
- */
+
+ /* Start with empty vpm. */
+ sch->vpm = 0;
+
+ /* Get current pam. */
if (stsch(sch->schid, &sch->schib)) {
ccw_device_verify_done(cdev, -ENODEV);
return;
}
- sch->lpm = sch->schib.pmcw.pim &
- sch->schib.pmcw.pam &
- sch->schib.pmcw.pom &
- sch->opm;
__ccw_device_verify_start(cdev);
}
@@ -494,10 +513,10 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
int ret;
irb = (struct irb *) __LC_IRB;
- /* Retry set pgid for cc=1. */
+
if (irb->scsw.stctl ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
- if (irb->scsw.cc == 1)
+ if (__ccw_device_should_retry(&irb->scsw))
__ccw_device_disband_start(cdev);
return;
}
@@ -509,7 +528,6 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
switch (ret) {
/* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
case 0: /* disband successful. */
- sch->vpm = 0;
ccw_device_disband_done(cdev, ret);
break;
case -EOPNOTSUPP:
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 7c93a8798d2..cde822d8b5c 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -115,7 +115,7 @@ qdio_min(int a,int b)
static inline __u64
qdio_get_micros(void)
{
- return (get_clock() >> 10); /* time>>12 is microseconds */
+ return (get_clock() >> 12); /* time>>12 is microseconds */
}
/*
@@ -1129,7 +1129,7 @@ out:
#ifdef QDIO_USE_PROCESSING_STATE
if (last_position>=0)
- set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count);
+ set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
#endif /* QDIO_USE_PROCESSING_STATE */
QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ceb3ab31ee0..124569362f0 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -191,49 +191,49 @@ enum qdio_irq_states {
#if QDIO_VERBOSE_LEVEL>8
#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
#else
-#define QDIO_PRINT_STUPID(x...)
+#define QDIO_PRINT_STUPID(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>7
#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
#else
-#define QDIO_PRINT_ALL(x...)
+#define QDIO_PRINT_ALL(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>6
#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
#else
-#define QDIO_PRINT_INFO(x...)
+#define QDIO_PRINT_INFO(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>5
#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
#else
-#define QDIO_PRINT_WARN(x...)
+#define QDIO_PRINT_WARN(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>4
#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
#else
-#define QDIO_PRINT_ERR(x...)
+#define QDIO_PRINT_ERR(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>3
#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
#else
-#define QDIO_PRINT_CRIT(x...)
+#define QDIO_PRINT_CRIT(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>2
#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
#else
-#define QDIO_PRINT_ALERT(x...)
+#define QDIO_PRINT_ALERT(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>1
#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
#else
-#define QDIO_PRINT_EMERG(x...)
+#define QDIO_PRINT_EMERG(x...) do { } while (0)
#endif
#define HEXDUMP16(importance,header,ptr) \
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 15edebbead7..f0a12d2eb78 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,5 +2,16 @@
# S/390 crypto devices
#
-z90crypt-objs := z90main.o z90hardware.o
-obj-$(CONFIG_Z90CRYPT) += z90crypt.o
+ifdef CONFIG_ZCRYPT_MONOLITHIC
+
+z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \
+ zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o
+obj-$(CONFIG_ZCRYPT) += z90crypt.o
+
+else
+
+ap-objs := ap_bus.o
+obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
+
+endif
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
new file mode 100644
index 00000000000..6ed0985c0c9
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.c
@@ -0,0 +1,1221 @@
+/*
+ * linux/drivers/s390/crypto/ap_bus.c
+ *
+ * Copyright (C) 2006 IBM Corporation
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * Adjunct processor bus.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <asm/s390_rdev.h>
+
+#include "ap_bus.h"
+
+/* Some prototypes. */
+static void ap_scan_bus(void *);
+static void ap_poll_all(unsigned long);
+static void ap_poll_timeout(unsigned long);
+static int ap_poll_thread_start(void);
+static void ap_poll_thread_stop(void);
+
+/**
+ * Module description.
+ */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
+ "Copyright 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+/**
+ * Module parameter
+ */
+int ap_domain_index = -1; /* Adjunct Processor Domain Index */
+module_param_named(domain, ap_domain_index, int, 0000);
+MODULE_PARM_DESC(domain, "domain index for ap devices");
+EXPORT_SYMBOL(ap_domain_index);
+
+static int ap_thread_flag = 1;
+module_param_named(poll_thread, ap_thread_flag, int, 0000);
+MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 1 (on).");
+
+static struct device *ap_root_device = NULL;
+
+/**
+ * Workqueue & timer for bus rescan.
+ */
+static struct workqueue_struct *ap_work_queue;
+static struct timer_list ap_config_timer;
+static int ap_config_time = AP_CONFIG_TIME;
+static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
+
+/**
+ * Tasklet & timer for AP request polling.
+ */
+static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
+static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
+static atomic_t ap_poll_requests = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
+static struct task_struct *ap_poll_kthread = NULL;
+static DEFINE_MUTEX(ap_poll_thread_mutex);
+
+/**
+ * Test if ap instructions are available.
+ *
+ * Returns 0 if the ap instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+ register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
+ register unsigned long reg1 asm ("1") = -ENODEV;
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(
+ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
+ "0: la %1,0\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
+ return reg1;
+}
+
+/**
+ * Test adjunct processor queue.
+ * @qid: the ap queue number
+ * @queue_depth: pointer to queue depth value
+ * @device_type: pointer to device type value
+ *
+ * Returns ap queue status structure.
+ */
+static inline struct ap_queue_status
+ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
+{
+ register unsigned long reg0 asm ("0") = qid;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ *device_type = (int) (reg2 >> 24);
+ *queue_depth = (int) (reg2 & 0xff);
+ return reg1;
+}
+
+/**
+ * Reset adjunct processor queue.
+ * @qid: the ap queue number
+ *
+ * Returns ap queue status structure.
+ */
+static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
+{
+ register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(RAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ return reg1;
+}
+
+/**
+ * Send message to adjunct processor queue.
+ * @qid: the ap queue number
+ * @psmid: the program supplied message identifier
+ * @msg: the message text
+ * @length: the message length
+ *
+ * Returns ap queue status structure.
+ *
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ *
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+ typedef struct { char _[length]; } msgblock;
+ register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = (unsigned long) msg;
+ register unsigned long reg3 asm ("3") = (unsigned long) length;
+ register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+ register unsigned long reg5 asm ("5") = (unsigned int) psmid;
+
+ asm volatile (
+ "0: .long 0xb2ad0042\n" /* DQAP */
+ " brc 2,0b"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+ : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
+ : "cc" );
+ return reg1;
+}
+
+int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+ struct ap_queue_status status;
+
+ status = __ap_send(qid, psmid, msg, length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_Q_FULL:
+ return -EBUSY;
+ default: /* Device is gone. */
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(ap_send);
+
+/*
+ * Receive message from adjunct processor queue.
+ * @qid: the ap queue number
+ * @psmid: pointer to program supplied message identifier
+ * @msg: the message text
+ * @length: the message length
+ *
+ * Returns ap queue status structure.
+ *
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially. The response is incomplete, hence the
+ * DQAP is repeated.
+ *
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ *
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status
+__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+ typedef struct { char _[length]; } msgblock;
+ register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm("2") = 0UL;
+ register unsigned long reg4 asm("4") = (unsigned long) msg;
+ register unsigned long reg5 asm("5") = (unsigned long) length;
+ register unsigned long reg6 asm("6") = 0UL;
+ register unsigned long reg7 asm("7") = 0UL;
+
+
+ asm volatile(
+ "0: .long 0xb2ae0064\n"
+ " brc 6,0b\n"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2),
+ "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
+ "=m" (*(msgblock *) msg) : : "cc" );
+ *psmid = (((unsigned long long) reg6) << 32) + reg7;
+ return reg1;
+}
+
+int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+ struct ap_queue_status status;
+
+ status = __ap_recv(qid, psmid, msg, length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (status.queue_empty)
+ return -ENOENT;
+ return -EBUSY;
+ default:
+ return -ENODEV;
+ }
+}
+EXPORT_SYMBOL(ap_recv);
+
+/**
+ * Check if an AP queue is available. The test is repeated for
+ * AP_MAX_RESET times.
+ * @qid: the ap queue number
+ * @queue_depth: pointer to queue depth value
+ * @device_type: pointer to device type value
+ */
+static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
+{
+ struct ap_queue_status status;
+ int t_depth, t_device_type, rc, i;
+
+ rc = -EBUSY;
+ for (i = 0; i < AP_MAX_RESET; i++) {
+ status = ap_test_queue(qid, &t_depth, &t_device_type);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ *queue_depth = t_depth + 1;
+ *device_type = t_device_type;
+ rc = 0;
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ rc = -ENODEV;
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ break;
+ case AP_RESPONSE_DECONFIGURED:
+ rc = -ENODEV;
+ break;
+ case AP_RESPONSE_CHECKSTOPPED:
+ rc = -ENODEV;
+ break;
+ case AP_RESPONSE_BUSY:
+ break;
+ default:
+ BUG();
+ }
+ if (rc != -EBUSY)
+ break;
+ if (i < AP_MAX_RESET - 1)
+ udelay(5);
+ }
+ return rc;
+}
+
+/**
+ * Reset an AP queue and wait for it to become available again.
+ * @qid: the ap queue number
+ */
+static int ap_init_queue(ap_qid_t qid)
+{
+ struct ap_queue_status status;
+ int rc, dummy, i;
+
+ rc = -ENODEV;
+ status = ap_reset_queue(qid);
+ for (i = 0; i < AP_MAX_RESET; i++) {
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (status.queue_empty)
+ rc = 0;
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ i = AP_MAX_RESET; /* return with -ENODEV */
+ break;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ default:
+ break;
+ }
+ if (rc != -ENODEV)
+ break;
+ if (i < AP_MAX_RESET - 1) {
+ udelay(5);
+ status = ap_test_queue(qid, &dummy, &dummy);
+ }
+ }
+ return rc;
+}
+
+/**
+ * AP device related attributes.
+ */
+static ssize_t ap_hwtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
+}
+static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
+
+static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
+}
+static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
+
+static ssize_t ap_request_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ int rc;
+
+ spin_lock_bh(&ap_dev->lock);
+ rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
+ spin_unlock_bh(&ap_dev->lock);
+ return rc;
+}
+
+static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+
+static ssize_t ap_modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
+}
+
+static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
+
+static struct attribute *ap_dev_attrs[] = {
+ &dev_attr_hwtype.attr,
+ &dev_attr_depth.attr,
+ &dev_attr_request_count.attr,
+ &dev_attr_modalias.attr,
+ NULL
+};
+static struct attribute_group ap_dev_attr_group = {
+ .attrs = ap_dev_attrs
+};
+
+/**
+ * AP bus driver registration/unregistration.
+ */
+static int ap_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ struct ap_driver *ap_drv = to_ap_drv(drv);
+ struct ap_device_id *id;
+
+ /**
+ * Compare device type of the device with the list of
+ * supported types of the device_driver.
+ */
+ for (id = ap_drv->ids; id->match_flags; id++) {
+ if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
+ (id->dev_type != ap_dev->device_type))
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * uevent function for AP devices. It sets up a single environment
+ * variable DEV_TYPE which contains the hardware device type.
+ */
+static int ap_uevent (struct device *dev, char **envp, int num_envp,
+ char *buffer, int buffer_size)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ int length;
+
+ if (!ap_dev)
+ return -ENODEV;
+
+ /* Set up DEV_TYPE environment variable. */
+ envp[0] = buffer;
+ length = scnprintf(buffer, buffer_size, "DEV_TYPE=%04X",
+ ap_dev->device_type);
+ if (buffer_size - length <= 0)
+ return -ENOMEM;
+ envp[1] = 0;
+ return 0;
+}
+
+static struct bus_type ap_bus_type = {
+ .name = "ap",
+ .match = &ap_bus_match,
+ .uevent = &ap_uevent,
+};
+
+static int ap_device_probe(struct device *dev)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ struct ap_driver *ap_drv = to_ap_drv(dev->driver);
+ int rc;
+
+ ap_dev->drv = ap_drv;
+ rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
+ if (rc)
+ ap_dev->unregistered = 1;
+ return rc;
+}
+
+/**
+ * Flush all requests from the request/pending queue of an AP device.
+ * @ap_dev: pointer to the AP device.
+ */
+static inline void __ap_flush_queue(struct ap_device *ap_dev)
+{
+ struct ap_message *ap_msg, *next;
+
+ list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
+ list_del_init(&ap_msg->list);
+ ap_dev->pendingq_count--;
+ ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+ }
+ list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
+ list_del_init(&ap_msg->list);
+ ap_dev->requestq_count--;
+ ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+ }
+}
+
+void ap_flush_queue(struct ap_device *ap_dev)
+{
+ spin_lock_bh(&ap_dev->lock);
+ __ap_flush_queue(ap_dev);
+ spin_unlock_bh(&ap_dev->lock);
+}
+EXPORT_SYMBOL(ap_flush_queue);
+
+static int ap_device_remove(struct device *dev)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ struct ap_driver *ap_drv = ap_dev->drv;
+
+ spin_lock_bh(&ap_dev->lock);
+ __ap_flush_queue(ap_dev);
+ /**
+ * set ->unregistered to 1 while holding the lock. This prevents
+ * new messages to be put on the queue from now on.
+ */
+ ap_dev->unregistered = 1;
+ spin_unlock_bh(&ap_dev->lock);
+ if (ap_drv->remove)
+ ap_drv->remove(ap_dev);
+ return 0;
+}
+
+int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
+ char *name)
+{
+ struct device_driver *drv = &ap_drv->driver;
+
+ drv->bus = &ap_bus_type;
+ drv->probe = ap_device_probe;
+ drv->remove = ap_device_remove;
+ drv->owner = owner;
+ drv->name = name;
+ return driver_register(drv);
+}
+EXPORT_SYMBOL(ap_driver_register);
+
+void ap_driver_unregister(struct ap_driver *ap_drv)
+{
+ driver_unregister(&ap_drv->driver);
+}
+EXPORT_SYMBOL(ap_driver_unregister);
+
+/**
+ * AP bus attributes.
+ */
+static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+}
+
+static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
+
+static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+}
+
+static ssize_t ap_config_time_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int time;
+
+ if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
+ return -EINVAL;
+ ap_config_time = time;
+ if (!timer_pending(&ap_config_timer) ||
+ !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
+ ap_config_timer.expires = jiffies + ap_config_time * HZ;
+ add_timer(&ap_config_timer);
+ }
+ return count;
+}
+
+static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
+
+static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+}
+
+static ssize_t ap_poll_thread_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int flag, rc;
+
+ if (sscanf(buf, "%d\n", &flag) != 1)
+ return -EINVAL;
+ if (flag) {
+ rc = ap_poll_thread_start();
+ if (rc)
+ return rc;
+ }
+ else
+ ap_poll_thread_stop();
+ return count;
+}
+
+static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
+
+static struct bus_attribute *const ap_bus_attrs[] = {
+ &bus_attr_ap_domain,
+ &bus_attr_config_time,
+ &bus_attr_poll_thread,
+ NULL
+};
+
+/**
+ * Pick one of the 16 ap domains.
+ */
+static inline int ap_select_domain(void)
+{
+ int queue_depth, device_type, count, max_count, best_domain;
+ int rc, i, j;
+
+ /**
+ * We want to use a single domain. Either the one specified with
+ * the "domain=" parameter or the domain with the maximum number
+ * of devices.
+ */
+ if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
+ /* Domain has already been selected. */
+ return 0;
+ best_domain = -1;
+ max_count = 0;
+ for (i = 0; i < AP_DOMAINS; i++) {
+ count = 0;
+ for (j = 0; j < AP_DEVICES; j++) {
+ ap_qid_t qid = AP_MKQID(j, i);
+ rc = ap_query_queue(qid, &queue_depth, &device_type);
+ if (rc)
+ continue;
+ count++;
+ }
+ if (count > max_count) {
+ max_count = count;
+ best_domain = i;
+ }
+ }
+ if (best_domain >= 0){
+ ap_domain_index = best_domain;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * Find the device type if query queue returned a device type of 0.
+ * @ap_dev: pointer to the AP device.
+ */
+static int ap_probe_device_type(struct ap_device *ap_dev)
+{
+ static unsigned char msg[] = {
+ 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
+ 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
+ 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
+ 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
+ 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
+ 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
+ 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
+ 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
+ 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
+ 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
+ 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
+ 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
+ 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
+ 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
+ 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
+ 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
+ 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
+ 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
+ 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
+ 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
+ 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
+ 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
+ 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
+ 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
+ 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
+ };
+ struct ap_queue_status status;
+ unsigned long long psmid;
+ char *reply;
+ int rc, i;
+
+ reply = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!reply) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
+ msg, sizeof(msg));
+ if (status.response_code != AP_RESPONSE_NORMAL) {
+ rc = -ENODEV;
+ goto out_free;
+ }
+
+ /* Wait for the test message to complete. */
+ for (i = 0; i < 6; i++) {
+ mdelay(300);
+ status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
+ if (status.response_code == AP_RESPONSE_NORMAL &&
+ psmid == 0x0102030405060708ULL)
+ break;
+ }
+ if (i < 6) {
+ /* Got an answer. */
+ if (reply[0] == 0x00 && reply[1] == 0x86)
+ ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
+ else
+ ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
+ rc = 0;
+ } else
+ rc = -ENODEV;
+
+out_free:
+ free_page((unsigned long) reply);
+out:
+ return rc;
+}
+
+/**
+ * Scan the ap bus for new devices.
+ */
+static int __ap_scan_bus(struct device *dev, void *data)
+{
+ return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
+}
+
+static void ap_device_release(struct device *dev)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+
+ kfree(ap_dev);
+}
+
+static void ap_scan_bus(void *data)
+{
+ struct ap_device *ap_dev;
+ struct device *dev;
+ ap_qid_t qid;
+ int queue_depth, device_type;
+ int rc, i;
+
+ if (ap_select_domain() != 0)
+ return;
+ for (i = 0; i < AP_DEVICES; i++) {
+ qid = AP_MKQID(i, ap_domain_index);
+ dev = bus_find_device(&ap_bus_type, NULL,
+ (void *)(unsigned long)qid,
+ __ap_scan_bus);
+ if (dev) {
+ put_device(dev);
+ continue;
+ }
+ rc = ap_query_queue(qid, &queue_depth, &device_type);
+ if (rc)
+ continue;
+ rc = ap_init_queue(qid);
+ if (rc)
+ continue;
+ ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
+ if (!ap_dev)
+ break;
+ ap_dev->qid = qid;
+ ap_dev->queue_depth = queue_depth;
+ spin_lock_init(&ap_dev->lock);
+ INIT_LIST_HEAD(&ap_dev->pendingq);
+ INIT_LIST_HEAD(&ap_dev->requestq);
+ if (device_type == 0)
+ ap_probe_device_type(ap_dev);
+ else
+ ap_dev->device_type = device_type;
+
+ ap_dev->device.bus = &ap_bus_type;
+ ap_dev->device.parent = ap_root_device;
+ snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x",
+ AP_QID_DEVICE(ap_dev->qid));
+ ap_dev->device.release = ap_device_release;
+ rc = device_register(&ap_dev->device);
+ if (rc) {
+ kfree(ap_dev);
+ continue;
+ }
+ /* Add device attributes. */
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ap_dev_attr_group);
+ if (rc)
+ device_unregister(&ap_dev->device);
+ }
+}
+
+static void
+ap_config_timeout(unsigned long ptr)
+{
+ queue_work(ap_work_queue, &ap_config_work);
+ ap_config_timer.expires = jiffies + ap_config_time * HZ;
+ add_timer(&ap_config_timer);
+}
+
+/**
+ * Set up the timer to run the poll tasklet
+ */
+static inline void ap_schedule_poll_timer(void)
+{
+ if (timer_pending(&ap_poll_timer))
+ return;
+ mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME);
+}
+
+/**
+ * Receive pending reply messages from an AP device.
+ * @ap_dev: pointer to the AP device
+ * @flags: pointer to control flags, bit 2^0 is set if another poll is
+ * required, bit 2^1 is set if the poll timer needs to get armed
+ * Returns 0 if the device is still present, -ENODEV if not.
+ */
+static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
+{
+ struct ap_queue_status status;
+ struct ap_message *ap_msg;
+
+ if (ap_dev->queue_count <= 0)
+ return 0;
+ status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
+ ap_dev->reply->message, ap_dev->reply->length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ atomic_dec(&ap_poll_requests);
+ ap_dev->queue_count--;
+ list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
+ if (ap_msg->psmid != ap_dev->reply->psmid)
+ continue;
+ list_del_init(&ap_msg->list);
+ ap_dev->pendingq_count--;
+ ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
+ break;
+ }
+ if (ap_dev->queue_count > 0)
+ *flags |= 1;
+ break;
+ case AP_RESPONSE_NO_PENDING_REPLY:
+ if (status.queue_empty) {
+ /* The card shouldn't forget requests but who knows. */
+ ap_dev->queue_count = 0;
+ list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
+ ap_dev->requestq_count += ap_dev->pendingq_count;
+ ap_dev->pendingq_count = 0;
+ } else
+ *flags |= 2;
+ break;
+ default:
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * Send messages from the request queue to an AP device.
+ * @ap_dev: pointer to the AP device
+ * @flags: pointer to control flags, bit 2^0 is set if another poll is
+ * required, bit 2^1 is set if the poll timer needs to get armed
+ * Returns 0 if the device is still present, -ENODEV if not.
+ */
+static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
+{
+ struct ap_queue_status status;
+ struct ap_message *ap_msg;
+
+ if (ap_dev->requestq_count <= 0 ||
+ ap_dev->queue_count >= ap_dev->queue_depth)
+ return 0;
+ /* Start the next request on the queue. */
+ ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
+ status = __ap_send(ap_dev->qid, ap_msg->psmid,
+ ap_msg->message, ap_msg->length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ atomic_inc(&ap_poll_requests);
+ ap_dev->queue_count++;
+ list_move_tail(&ap_msg->list, &ap_dev->pendingq);
+ ap_dev->requestq_count--;
+ ap_dev->pendingq_count++;
+ if (ap_dev->queue_count < ap_dev->queue_depth &&
+ ap_dev->requestq_count > 0)
+ *flags |= 1;
+ *flags |= 2;
+ break;
+ case AP_RESPONSE_Q_FULL:
+ *flags |= 2;
+ break;
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ return -EINVAL;
+ default:
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * Poll AP device for pending replies and send new messages. If either
+ * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
+ * @ap_dev: pointer to the bus device
+ * @flags: pointer to control flags, bit 2^0 is set if another poll is
+ * required, bit 2^1 is set if the poll timer needs to get armed
+ * Returns 0.
+ */
+static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
+{
+ int rc;
+
+ rc = ap_poll_read(ap_dev, flags);
+ if (rc)
+ return rc;
+ return ap_poll_write(ap_dev, flags);
+}
+
+/**
+ * Queue a message to a device.
+ * @ap_dev: pointer to the AP device
+ * @ap_msg: the message to be queued
+ */
+static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
+{
+ struct ap_queue_status status;
+
+ if (list_empty(&ap_dev->requestq) &&
+ ap_dev->queue_count < ap_dev->queue_depth) {
+ status = __ap_send(ap_dev->qid, ap_msg->psmid,
+ ap_msg->message, ap_msg->length);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ list_add_tail(&ap_msg->list, &ap_dev->pendingq);
+ atomic_inc(&ap_poll_requests);
+ ap_dev->pendingq_count++;
+ ap_dev->queue_count++;
+ ap_dev->total_request_count++;
+ break;
+ case AP_RESPONSE_Q_FULL:
+ list_add_tail(&ap_msg->list, &ap_dev->requestq);
+ ap_dev->requestq_count++;
+ ap_dev->total_request_count++;
+ return -EBUSY;
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
+ return -EINVAL;
+ default: /* Device is gone. */
+ ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+ return -ENODEV;
+ }
+ } else {
+ list_add_tail(&ap_msg->list, &ap_dev->requestq);
+ ap_dev->requestq_count++;
+ ap_dev->total_request_count++;
+ return -EBUSY;
+ }
+ ap_schedule_poll_timer();
+ return 0;
+}
+
+void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_bh(&ap_dev->lock);
+ if (!ap_dev->unregistered) {
+ /* Make room on the queue by polling for finished requests. */
+ rc = ap_poll_queue(ap_dev, &flags);
+ if (!rc)
+ rc = __ap_queue_message(ap_dev, ap_msg);
+ if (!rc)
+ wake_up(&ap_poll_wait);
+ } else {
+ ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+ rc = 0;
+ }
+ spin_unlock_bh(&ap_dev->lock);
+ if (rc == -ENODEV)
+ device_unregister(&ap_dev->device);
+}
+EXPORT_SYMBOL(ap_queue_message);
+
+/**
+ * Cancel a crypto request. This is done by removing the request
+ * from the devive pendingq or requestq queue. Note that the
+ * request stays on the AP queue. When it finishes the message
+ * reply will be discarded because the psmid can't be found.
+ * @ap_dev: AP device that has the message queued
+ * @ap_msg: the message that is to be removed
+ */
+void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
+{
+ struct ap_message *tmp;
+
+ spin_lock_bh(&ap_dev->lock);
+ if (!list_empty(&ap_msg->list)) {
+ list_for_each_entry(tmp, &ap_dev->pendingq, list)
+ if (tmp->psmid == ap_msg->psmid) {
+ ap_dev->pendingq_count--;
+ goto found;
+ }
+ ap_dev->requestq_count--;
+ found:
+ list_del_init(&ap_msg->list);
+ }
+ spin_unlock_bh(&ap_dev->lock);
+}
+EXPORT_SYMBOL(ap_cancel_message);
+
+/**
+ * AP receive polling for finished AP requests
+ */
+static void ap_poll_timeout(unsigned long unused)
+{
+ tasklet_schedule(&ap_tasklet);
+}
+
+/**
+ * Poll all AP devices on the bus in a round robin fashion. Continue
+ * polling until bit 2^0 of the control flags is not set. If bit 2^1
+ * of the control flags has been set arm the poll timer.
+ */
+static int __ap_poll_all(struct device *dev, void *data)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ int rc;
+
+ spin_lock(&ap_dev->lock);
+ if (!ap_dev->unregistered) {
+ rc = ap_poll_queue(to_ap_dev(dev), (unsigned long *) data);
+ } else
+ rc = 0;
+ spin_unlock(&ap_dev->lock);
+ if (rc)
+ device_unregister(&ap_dev->device);
+ return 0;
+}
+
+static void ap_poll_all(unsigned long dummy)
+{
+ unsigned long flags;
+
+ do {
+ flags = 0;
+ bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
+ } while (flags & 1);
+ if (flags & 2)
+ ap_schedule_poll_timer();
+}
+
+/**
+ * AP bus poll thread. The purpose of this thread is to poll for
+ * finished requests in a loop if there is a "free" cpu - that is
+ * a cpu that doesn't have anything better to do. The polling stops
+ * as soon as there is another task or if all messages have been
+ * delivered.
+ */
+static int ap_poll_thread(void *data)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ int requests;
+
+ set_user_nice(current, -20);
+ while (1) {
+ if (need_resched()) {
+ schedule();
+ continue;
+ }
+ add_wait_queue(&ap_poll_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ requests = atomic_read(&ap_poll_requests);
+ if (requests <= 0)
+ schedule();
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ap_poll_wait, &wait);
+
+ local_bh_disable();
+ flags = 0;
+ bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
+ local_bh_enable();
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ap_poll_wait, &wait);
+ return 0;
+}
+
+static int ap_poll_thread_start(void)
+{
+ int rc;
+
+ mutex_lock(&ap_poll_thread_mutex);
+ if (!ap_poll_kthread) {
+ ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
+ rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
+ if (rc)
+ ap_poll_kthread = NULL;
+ }
+ else
+ rc = 0;
+ mutex_unlock(&ap_poll_thread_mutex);
+ return rc;
+}
+
+static void ap_poll_thread_stop(void)
+{
+ mutex_lock(&ap_poll_thread_mutex);
+ if (ap_poll_kthread) {
+ kthread_stop(ap_poll_kthread);
+ ap_poll_kthread = NULL;
+ }
+ mutex_unlock(&ap_poll_thread_mutex);
+}
+
+/**
+ * The module initialization code.
+ */
+int __init ap_module_init(void)
+{
+ int rc, i;
+
+ if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
+ printk(KERN_WARNING "Invalid param: domain = %d. "
+ " Not loading.\n", ap_domain_index);
+ return -EINVAL;
+ }
+ if (ap_instructions_available() != 0) {
+ printk(KERN_WARNING "AP instructions not installed.\n");
+ return -ENODEV;
+ }
+
+ /* Create /sys/bus/ap. */
+ rc = bus_register(&ap_bus_type);
+ if (rc)
+ goto out;
+ for (i = 0; ap_bus_attrs[i]; i++) {
+ rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
+ if (rc)
+ goto out_bus;
+ }
+
+ /* Create /sys/devices/ap. */
+ ap_root_device = s390_root_dev_register("ap");
+ rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
+ if (rc)
+ goto out_bus;
+
+ ap_work_queue = create_singlethread_workqueue("kapwork");
+ if (!ap_work_queue) {
+ rc = -ENOMEM;
+ goto out_root;
+ }
+
+ if (ap_select_domain() == 0)
+ ap_scan_bus(NULL);
+
+ /* Setup the ap bus rescan timer. */
+ init_timer(&ap_config_timer);
+ ap_config_timer.function = ap_config_timeout;
+ ap_config_timer.data = 0;
+ ap_config_timer.expires = jiffies + ap_config_time * HZ;
+ add_timer(&ap_config_timer);
+
+ /* Start the low priority AP bus poll thread. */
+ if (ap_thread_flag) {
+ rc = ap_poll_thread_start();
+ if (rc)
+ goto out_work;
+ }
+
+ return 0;
+
+out_work:
+ del_timer_sync(&ap_config_timer);
+ del_timer_sync(&ap_poll_timer);
+ destroy_workqueue(ap_work_queue);
+out_root:
+ s390_root_dev_unregister(ap_root_device);
+out_bus:
+ while (i--)
+ bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
+ bus_unregister(&ap_bus_type);
+out:
+ return rc;
+}
+
+static int __ap_match_all(struct device *dev, void *data)
+{
+ return 1;
+}
+
+/**
+ * The module termination code
+ */
+void ap_module_exit(void)
+{
+ int i;
+ struct device *dev;
+
+ ap_poll_thread_stop();
+ del_timer_sync(&ap_config_timer);
+ del_timer_sync(&ap_poll_timer);
+ destroy_workqueue(ap_work_queue);
+ s390_root_dev_unregister(ap_root_device);
+ while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
+ __ap_match_all)))
+ {
+ device_unregister(dev);
+ put_device(dev);
+ }
+ for (i = 0; ap_bus_attrs[i]; i++)
+ bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
+ bus_unregister(&ap_bus_type);
+}
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+module_init(ap_module_init);
+module_exit(ap_module_exit);
+#endif
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
new file mode 100644
index 00000000000..83b69c01cd6
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.h
@@ -0,0 +1,158 @@
+/*
+ * linux/drivers/s390/crypto/ap_bus.h
+ *
+ * Copyright (C) 2006 IBM Corporation
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * Adjunct processor bus header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _AP_BUS_H_
+#define _AP_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+
+#define AP_DEVICES 64 /* Number of AP devices. */
+#define AP_DOMAINS 16 /* Number of AP domains. */
+#define AP_MAX_RESET 90 /* Maximum number of resets. */
+#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
+#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
+
+extern int ap_domain_index;
+
+/**
+ * The ap_qid_t identifier of an ap queue. It contains a
+ * 6 bit device index and a 4 bit queue index (domain).
+ */
+typedef unsigned int ap_qid_t;
+
+#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15))
+#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
+#define AP_QID_QUEUE(_qid) ((_qid) & 15)
+
+/**
+ * The ap queue status word is returned by all three AP functions
+ * (PQAP, NQAP and DQAP). There's a set of flags in the first
+ * byte, followed by a 1 byte response code.
+ */
+struct ap_queue_status {
+ unsigned int queue_empty : 1;
+ unsigned int replies_waiting : 1;
+ unsigned int queue_full : 1;
+ unsigned int pad1 : 5;
+ unsigned int response_code : 8;
+ unsigned int pad2 : 16;
+};
+
+#define AP_RESPONSE_NORMAL 0x00
+#define AP_RESPONSE_Q_NOT_AVAIL 0x01
+#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
+#define AP_RESPONSE_DECONFIGURED 0x03
+#define AP_RESPONSE_CHECKSTOPPED 0x04
+#define AP_RESPONSE_BUSY 0x05
+#define AP_RESPONSE_Q_FULL 0x10
+#define AP_RESPONSE_NO_PENDING_REPLY 0x10
+#define AP_RESPONSE_INDEX_TOO_BIG 0x11
+#define AP_RESPONSE_NO_FIRST_PART 0x13
+#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+
+/**
+ * Known device types
+ */
+#define AP_DEVICE_TYPE_PCICC 3
+#define AP_DEVICE_TYPE_PCICA 4
+#define AP_DEVICE_TYPE_PCIXCC 5
+#define AP_DEVICE_TYPE_CEX2A 6
+#define AP_DEVICE_TYPE_CEX2C 7
+
+struct ap_device;
+struct ap_message;
+
+struct ap_driver {
+ struct device_driver driver;
+ struct ap_device_id *ids;
+
+ int (*probe)(struct ap_device *);
+ void (*remove)(struct ap_device *);
+ /* receive is called from tasklet context */
+ void (*receive)(struct ap_device *, struct ap_message *,
+ struct ap_message *);
+};
+
+#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
+
+int ap_driver_register(struct ap_driver *, struct module *, char *);
+void ap_driver_unregister(struct ap_driver *);
+
+struct ap_device {
+ struct device device;
+ struct ap_driver *drv; /* Pointer to AP device driver. */
+ spinlock_t lock; /* Per device lock. */
+
+ ap_qid_t qid; /* AP queue id. */
+ int queue_depth; /* AP queue depth.*/
+ int device_type; /* AP device type. */
+ int unregistered; /* marks AP device as unregistered */
+
+ int queue_count; /* # messages currently on AP queue. */
+
+ struct list_head pendingq; /* List of message sent to AP queue. */
+ int pendingq_count; /* # requests on pendingq list. */
+ struct list_head requestq; /* List of message yet to be sent. */
+ int requestq_count; /* # requests on requestq list. */
+ int total_request_count; /* # requests ever for this AP device. */
+
+ struct ap_message *reply; /* Per device reply message. */
+
+ void *private; /* ap driver private pointer. */
+};
+
+#define to_ap_dev(x) container_of((x), struct ap_device, device)
+
+struct ap_message {
+ struct list_head list; /* Request queueing. */
+ unsigned long long psmid; /* Message id. */
+ void *message; /* Pointer to message buffer. */
+ size_t length; /* Message length. */
+
+ void *private; /* ap driver private pointer. */
+};
+
+#define AP_DEVICE(dt) \
+ .dev_type=(dt), \
+ .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
+
+/**
+ * Note: don't use ap_send/ap_recv after using ap_queue_message
+ * for the first time. Otherwise the ap message queue will get
+ * confused.
+ */
+int ap_send(ap_qid_t, unsigned long long, void *, size_t);
+int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
+
+void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
+void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
+void ap_flush_queue(struct ap_device *ap_dev);
+
+int ap_module_init(void);
+void ap_module_exit(void);
+
+#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
deleted file mode 100644
index dbbcda3c846..00000000000
--- a/drivers/s390/crypto/z90common.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * linux/drivers/s390/crypto/z90common.h
- *
- * z90crypt 1.3.3
- *
- * Copyright (C) 2001, 2005 IBM Corporation
- * Author(s): Robert Burroughs (burrough@us.ibm.com)
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _Z90COMMON_H_
-#define _Z90COMMON_H_
-
-
-#define RESPBUFFSIZE 256
-#define PCI_FUNC_KEY_DECRYPT 0x5044
-#define PCI_FUNC_KEY_ENCRYPT 0x504B
-extern int ext_bitlens;
-
-enum devstat {
- DEV_GONE,
- DEV_ONLINE,
- DEV_QUEUE_FULL,
- DEV_EMPTY,
- DEV_NO_WORK,
- DEV_BAD_MESSAGE,
- DEV_TSQ_EXCEPTION,
- DEV_RSQ_EXCEPTION,
- DEV_SEN_EXCEPTION,
- DEV_REC_EXCEPTION
-};
-
-enum hdstat {
- HD_NOT_THERE,
- HD_BUSY,
- HD_DECONFIGURED,
- HD_CHECKSTOPPED,
- HD_ONLINE,
- HD_TSQ_EXCEPTION
-};
-
-#define Z90C_NO_DEVICES 1
-#define Z90C_AMBIGUOUS_DOMAIN 2
-#define Z90C_INCORRECT_DOMAIN 3
-#define ENOTINIT 4
-
-#define SEN_BUSY 7
-#define SEN_USER_ERROR 8
-#define SEN_QUEUE_FULL 11
-#define SEN_NOT_AVAIL 16
-#define SEN_PAD_ERROR 17
-#define SEN_RETRY 18
-#define SEN_RELEASED 24
-
-#define REC_EMPTY 4
-#define REC_BUSY 6
-#define REC_OPERAND_INV 8
-#define REC_OPERAND_SIZE 9
-#define REC_EVEN_MOD 10
-#define REC_NO_WORK 11
-#define REC_HARDWAR_ERR 12
-#define REC_NO_RESPONSE 13
-#define REC_RETRY_DEV 14
-#define REC_USER_GONE 15
-#define REC_BAD_MESSAGE 16
-#define REC_INVALID_PAD 17
-#define REC_USE_PCICA 18
-
-#define WRONG_DEVICE_TYPE 20
-
-#define REC_FATAL_ERROR 32
-#define SEN_FATAL_ERROR 33
-#define TSQ_FATAL_ERROR 34
-#define RSQ_FATAL_ERROR 35
-
-#define Z90CRYPT_NUM_TYPES 6
-#define PCICA 0
-#define PCICC 1
-#define PCIXCC_MCL2 2
-#define PCIXCC_MCL3 3
-#define CEX2C 4
-#define CEX2A 5
-#define NILDEV -1
-#define ANYDEV -1
-#define PCIXCC_UNK -2
-
-enum hdevice_type {
- PCICC_HW = 3,
- PCICA_HW = 4,
- PCIXCC_HW = 5,
- CEX2A_HW = 6,
- CEX2C_HW = 7
-};
-
-struct CPRBX {
- unsigned short cprb_len;
- unsigned char cprb_ver_id;
- unsigned char pad_000[3];
- unsigned char func_id[2];
- unsigned char cprb_flags[4];
- unsigned int req_parml;
- unsigned int req_datal;
- unsigned int rpl_msgbl;
- unsigned int rpld_parml;
- unsigned int rpl_datal;
- unsigned int rpld_datal;
- unsigned int req_extbl;
- unsigned char pad_001[4];
- unsigned int rpld_extbl;
- unsigned char req_parmb[16];
- unsigned char req_datab[16];
- unsigned char rpl_parmb[16];
- unsigned char rpl_datab[16];
- unsigned char req_extb[16];
- unsigned char rpl_extb[16];
- unsigned short ccp_rtcode;
- unsigned short ccp_rscode;
- unsigned int mac_data_len;
- unsigned char logon_id[8];
- unsigned char mac_value[8];
- unsigned char mac_content_flgs;
- unsigned char pad_002;
- unsigned short domain;
- unsigned char pad_003[12];
- unsigned char pad_004[36];
-};
-
-#ifndef DEV_NAME
-#define DEV_NAME "z90crypt"
-#endif
-#define PRINTK(fmt, args...) \
- printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
-#define PRINTKN(fmt, args...) \
- printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
-#define PRINTKW(fmt, args...) \
- printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
-#define PRINTKC(fmt, args...) \
- printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
-
-#ifdef Z90CRYPT_DEBUG
-#define PDEBUG(fmt, args...) \
- printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
-#else
-#define PDEBUG(fmt, args...) do {} while (0)
-#endif
-
-#define UMIN(a,b) ((a) < (b) ? (a) : (b))
-#define IS_EVEN(x) ((x) == (2 * ((x) / 2)))
-
-#endif
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
deleted file mode 100644
index 0ca1d126ccb..00000000000
--- a/drivers/s390/crypto/z90crypt.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * linux/drivers/s390/crypto/z90crypt.h
- *
- * z90crypt 1.3.3 (kernel-private header)
- *
- * Copyright (C) 2001, 2005 IBM Corporation
- * Author(s): Robert Burroughs (burrough@us.ibm.com)
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _Z90CRYPT_H_
-#define _Z90CRYPT_H_
-
-#include <asm/z90crypt.h>
-
-/**
- * local errno definitions
- */
-#define ENOBUFF 129 // filp->private_data->...>work_elem_p->buffer is NULL
-#define EWORKPEND 130 // user issues ioctl while another pending
-#define ERELEASED 131 // user released while ioctl pending
-#define EQUIESCE 132 // z90crypt quiescing (no more work allowed)
-#define ETIMEOUT 133 // request timed out
-#define EUNKNOWN 134 // some unrecognized error occured (retry may succeed)
-#define EGETBUFF 135 // Error getting buffer or hardware lacks capability
- // (retry in software)
-
-/**
- * DEPRECATED STRUCTURES
- */
-
-/**
- * This structure is DEPRECATED and the corresponding ioctl() has been
- * replaced with individual ioctl()s for each piece of data!
- * This structure will NOT survive past version 1.3.1, so switch to the
- * new ioctl()s.
- */
-#define MASK_LENGTH 64 // mask length
-struct ica_z90_status {
- int totalcount;
- int leedslitecount; // PCICA
- int leeds2count; // PCICC
- // int PCIXCCCount; is not in struct for backward compatibility
- int requestqWaitCount;
- int pendingqWaitCount;
- int totalOpenCount;
- int cryptoDomain;
- // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
- // 5=CEX2C
- unsigned char status[MASK_LENGTH];
- // qdepth: # work elements waiting for each device
- unsigned char qdepth[MASK_LENGTH];
-};
-
-#endif /* _Z90CRYPT_H_ */
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
deleted file mode 100644
index be60795f4a7..00000000000
--- a/drivers/s390/crypto/z90hardware.c
+++ /dev/null
@@ -1,2531 +0,0 @@
-/*
- * linux/drivers/s390/crypto/z90hardware.c
- *
- * z90crypt 1.3.3
- *
- * Copyright (C) 2001, 2005 IBM Corporation
- * Author(s): Robert Burroughs (burrough@us.ibm.com)
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <asm/uaccess.h>
-#include <linux/compiler.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include "z90crypt.h"
-#include "z90common.h"
-
-struct cca_token_hdr {
- unsigned char token_identifier;
- unsigned char version;
- unsigned short token_length;
- unsigned char reserved[4];
-};
-
-#define CCA_TKN_HDR_ID_EXT 0x1E
-
-struct cca_private_ext_ME_sec {
- unsigned char section_identifier;
- unsigned char version;
- unsigned short section_length;
- unsigned char private_key_hash[20];
- unsigned char reserved1[4];
- unsigned char key_format;
- unsigned char reserved2;
- unsigned char key_name_hash[20];
- unsigned char key_use_flags[4];
- unsigned char reserved3[6];
- unsigned char reserved4[24];
- unsigned char confounder[24];
- unsigned char exponent[128];
- unsigned char modulus[128];
-};
-
-#define CCA_PVT_USAGE_ALL 0x80
-
-struct cca_public_sec {
- unsigned char section_identifier;
- unsigned char version;
- unsigned short section_length;
- unsigned char reserved[2];
- unsigned short exponent_len;
- unsigned short modulus_bit_len;
- unsigned short modulus_byte_len;
- unsigned char exponent[3];
-};
-
-struct cca_private_ext_ME {
- struct cca_token_hdr pvtMEHdr;
- struct cca_private_ext_ME_sec pvtMESec;
- struct cca_public_sec pubMESec;
-};
-
-struct cca_public_key {
- struct cca_token_hdr pubHdr;
- struct cca_public_sec pubSec;
-};
-
-struct cca_pvt_ext_CRT_sec {
- unsigned char section_identifier;
- unsigned char version;
- unsigned short section_length;
- unsigned char private_key_hash[20];
- unsigned char reserved1[4];
- unsigned char key_format;
- unsigned char reserved2;
- unsigned char key_name_hash[20];
- unsigned char key_use_flags[4];
- unsigned short p_len;
- unsigned short q_len;
- unsigned short dp_len;
- unsigned short dq_len;
- unsigned short u_len;
- unsigned short mod_len;
- unsigned char reserved3[4];
- unsigned short pad_len;
- unsigned char reserved4[52];
- unsigned char confounder[8];
-};
-
-#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
-#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
-
-struct cca_private_ext_CRT {
- struct cca_token_hdr pvtCrtHdr;
- struct cca_pvt_ext_CRT_sec pvtCrtSec;
- struct cca_public_sec pubCrtSec;
-};
-
-struct ap_status_word {
- unsigned char q_stat_flags;
- unsigned char response_code;
- unsigned char reserved[2];
-};
-
-#define AP_Q_STATUS_EMPTY 0x80
-#define AP_Q_STATUS_REPLIES_WAITING 0x40
-#define AP_Q_STATUS_ARRAY_FULL 0x20
-
-#define AP_RESPONSE_NORMAL 0x00
-#define AP_RESPONSE_Q_NOT_AVAIL 0x01
-#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
-#define AP_RESPONSE_DECONFIGURED 0x03
-#define AP_RESPONSE_CHECKSTOPPED 0x04
-#define AP_RESPONSE_BUSY 0x05
-#define AP_RESPONSE_Q_FULL 0x10
-#define AP_RESPONSE_NO_PENDING_REPLY 0x10
-#define AP_RESPONSE_INDEX_TOO_BIG 0x11
-#define AP_RESPONSE_NO_FIRST_PART 0x13
-#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
-
-#define AP_MAX_CDX_BITL 4
-#define AP_RQID_RESERVED_BITL 4
-#define SKIP_BITL (AP_MAX_CDX_BITL + AP_RQID_RESERVED_BITL)
-
-struct type4_hdr {
- unsigned char reserved1;
- unsigned char msg_type_code;
- unsigned short msg_len;
- unsigned char request_code;
- unsigned char msg_fmt;
- unsigned short reserved2;
-};
-
-#define TYPE4_TYPE_CODE 0x04
-#define TYPE4_REQU_CODE 0x40
-
-#define TYPE4_SME_LEN 0x0188
-#define TYPE4_LME_LEN 0x0308
-#define TYPE4_SCR_LEN 0x01E0
-#define TYPE4_LCR_LEN 0x03A0
-
-#define TYPE4_SME_FMT 0x00
-#define TYPE4_LME_FMT 0x10
-#define TYPE4_SCR_FMT 0x40
-#define TYPE4_LCR_FMT 0x50
-
-struct type4_sme {
- struct type4_hdr header;
- unsigned char message[128];
- unsigned char exponent[128];
- unsigned char modulus[128];
-};
-
-struct type4_lme {
- struct type4_hdr header;
- unsigned char message[256];
- unsigned char exponent[256];
- unsigned char modulus[256];
-};
-
-struct type4_scr {
- struct type4_hdr header;
- unsigned char message[128];
- unsigned char dp[72];
- unsigned char dq[64];
- unsigned char p[72];
- unsigned char q[64];
- unsigned char u[72];
-};
-
-struct type4_lcr {
- struct type4_hdr header;
- unsigned char message[256];
- unsigned char dp[136];
- unsigned char dq[128];
- unsigned char p[136];
- unsigned char q[128];
- unsigned char u[136];
-};
-
-union type4_msg {
- struct type4_sme sme;
- struct type4_lme lme;
- struct type4_scr scr;
- struct type4_lcr lcr;
-};
-
-struct type84_hdr {
- unsigned char reserved1;
- unsigned char code;
- unsigned short len;
- unsigned char reserved2[4];
-};
-
-#define TYPE84_RSP_CODE 0x84
-
-struct type6_hdr {
- unsigned char reserved1;
- unsigned char type;
- unsigned char reserved2[2];
- unsigned char right[4];
- unsigned char reserved3[2];
- unsigned char reserved4[2];
- unsigned char apfs[4];
- unsigned int offset1;
- unsigned int offset2;
- unsigned int offset3;
- unsigned int offset4;
- unsigned char agent_id[16];
- unsigned char rqid[2];
- unsigned char reserved5[2];
- unsigned char function_code[2];
- unsigned char reserved6[2];
- unsigned int ToCardLen1;
- unsigned int ToCardLen2;
- unsigned int ToCardLen3;
- unsigned int ToCardLen4;
- unsigned int FromCardLen1;
- unsigned int FromCardLen2;
- unsigned int FromCardLen3;
- unsigned int FromCardLen4;
-};
-
-struct CPRB {
- unsigned char cprb_len[2];
- unsigned char cprb_ver_id;
- unsigned char pad_000;
- unsigned char srpi_rtcode[4];
- unsigned char srpi_verb;
- unsigned char flags;
- unsigned char func_id[2];
- unsigned char checkpoint_flag;
- unsigned char resv2;
- unsigned char req_parml[2];
- unsigned char req_parmp[4];
- unsigned char req_datal[4];
- unsigned char req_datap[4];
- unsigned char rpl_parml[2];
- unsigned char pad_001[2];
- unsigned char rpl_parmp[4];
- unsigned char rpl_datal[4];
- unsigned char rpl_datap[4];
- unsigned char ccp_rscode[2];
- unsigned char ccp_rtcode[2];
- unsigned char repd_parml[2];
- unsigned char mac_data_len[2];
- unsigned char repd_datal[4];
- unsigned char req_pc[2];
- unsigned char res_origin[8];
- unsigned char mac_value[8];
- unsigned char logon_id[8];
- unsigned char usage_domain[2];
- unsigned char resv3[18];
- unsigned char svr_namel[2];
- unsigned char svr_name[8];
-};
-
-struct type6_msg {
- struct type6_hdr header;
- struct CPRB CPRB;
-};
-
-struct type86_hdr {
- unsigned char reserved1;
- unsigned char type;
- unsigned char format;
- unsigned char reserved2;
- unsigned char reply_code;
- unsigned char reserved3[3];
-};
-
-#define TYPE86_RSP_CODE 0x86
-#define TYPE86_FMT2 0x02
-
-struct type86_fmt2_msg {
- struct type86_hdr header;
- unsigned char reserved[4];
- unsigned char apfs[4];
- unsigned int count1;
- unsigned int offset1;
- unsigned int count2;
- unsigned int offset2;
- unsigned int count3;
- unsigned int offset3;
- unsigned int count4;
- unsigned int offset4;
-};
-
-static struct type6_hdr static_type6_hdr = {
- 0x00,
- 0x06,
- {0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00},
- {0x00,0x00},
- {0x00,0x00,0x00,0x00},
- 0x00000058,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
- 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
- {0x00,0x00},
- {0x00,0x00},
- {0x50,0x44},
- {0x00,0x00},
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
-};
-
-static struct type6_hdr static_type6_hdrX = {
- 0x00,
- 0x06,
- {0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00},
- {0x00,0x00},
- {0x00,0x00,0x00,0x00},
- 0x00000058,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- {0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00},
- {0x00,0x00},
- {0x50,0x44},
- {0x00,0x00},
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
-};
-
-static struct CPRB static_cprb = {
- {0x70,0x00},
- 0x41,
- 0x00,
- {0x00,0x00,0x00,0x00},
- 0x00,
- 0x00,
- {0x54,0x32},
- 0x01,
- 0x00,
- {0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00},
- {0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00},
- {0x00,0x00},
- {0x00,0x00},
- {0x00,0x00},
- {0x00,0x00,0x00,0x00},
- {0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00},
- {0x08,0x00},
- {0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20}
-};
-
-struct function_and_rules_block {
- unsigned char function_code[2];
- unsigned char ulen[2];
- unsigned char only_rule[8];
-};
-
-static struct function_and_rules_block static_pkd_function_and_rules = {
- {0x50,0x44},
- {0x0A,0x00},
- {'P','K','C','S','-','1','.','2'}
-};
-
-static struct function_and_rules_block static_pke_function_and_rules = {
- {0x50,0x4B},
- {0x0A,0x00},
- {'P','K','C','S','-','1','.','2'}
-};
-
-struct T6_keyBlock_hdr {
- unsigned char blen[2];
- unsigned char ulen[2];
- unsigned char flags[2];
-};
-
-static struct T6_keyBlock_hdr static_T6_keyBlock_hdr = {
- {0x89,0x01},
- {0x87,0x01},
- {0x00}
-};
-
-static struct CPRBX static_cprbx = {
- 0x00DC,
- 0x02,
- {0x00,0x00,0x00},
- {0x54,0x32},
- {0x00,0x00,0x00,0x00},
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- {0x00,0x00,0x00,0x00},
- 0x00000000,
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- 0x0000,
- 0x0000,
- 0x00000000,
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- 0x00,
- 0x00,
- 0x0000,
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
-};
-
-static struct function_and_rules_block static_pkd_function_and_rulesX_MCL2 = {
- {0x50,0x44},
- {0x00,0x0A},
- {'P','K','C','S','-','1','.','2'}
-};
-
-static struct function_and_rules_block static_pke_function_and_rulesX_MCL2 = {
- {0x50,0x4B},
- {0x00,0x0A},
- {'Z','E','R','O','-','P','A','D'}
-};
-
-static struct function_and_rules_block static_pkd_function_and_rulesX = {
- {0x50,0x44},
- {0x00,0x0A},
- {'Z','E','R','O','-','P','A','D'}
-};
-
-static struct function_and_rules_block static_pke_function_and_rulesX = {
- {0x50,0x4B},
- {0x00,0x0A},
- {'M','R','P',' ',' ',' ',' ',' '}
-};
-
-static unsigned char static_PKE_function_code[2] = {0x50, 0x4B};
-
-struct T6_keyBlock_hdrX {
- unsigned short blen;
- unsigned short ulen;
- unsigned char flags[2];
-};
-
-static unsigned char static_pad[256] = {
-0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
-0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
-0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
-0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
-0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
-0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
-0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
-0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
-0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
-0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
-0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
-0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
-0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
-0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
-0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
-0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
-};
-
-static struct cca_private_ext_ME static_pvt_me_key = {
- {
- 0x1E,
- 0x00,
- 0x0183,
- {0x00,0x00,0x00,0x00}
- },
-
- {
- 0x02,
- 0x00,
- 0x016C,
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00},
- 0x00,
- 0x00,
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00},
- {0x80,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
- {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
- },
-
- {
- 0x04,
- 0x00,
- 0x000F,
- {0x00,0x00},
- 0x0003,
- 0x0000,
- 0x0000,
- {0x01,0x00,0x01}
- }
-};
-
-static struct cca_public_key static_public_key = {
- {
- 0x1E,
- 0x00,
- 0x0000,
- {0x00,0x00,0x00,0x00}
- },
-
- {
- 0x04,
- 0x00,
- 0x0000,
- {0x00,0x00},
- 0x0000,
- 0x0000,
- 0x0000,
- {0x01,0x00,0x01}
- }
-};
-
-#define FIXED_TYPE6_ME_LEN 0x0000025F
-
-#define FIXED_TYPE6_ME_EN_LEN 0x000000F0
-
-#define FIXED_TYPE6_ME_LENX 0x000002CB
-
-#define FIXED_TYPE6_ME_EN_LENX 0x0000015C
-
-static struct cca_public_sec static_cca_pub_sec = {
- 0x04,
- 0x00,
- 0x000f,
- {0x00,0x00},
- 0x0003,
- 0x0000,
- 0x0000,
- {0x01,0x00,0x01}
-};
-
-#define FIXED_TYPE6_CR_LEN 0x00000177
-
-#define FIXED_TYPE6_CR_LENX 0x000001E3
-
-#define MAX_RESPONSE_SIZE 0x00000710
-
-#define MAX_RESPONSEX_SIZE 0x0000077C
-
-#define RESPONSE_CPRB_SIZE 0x000006B8
-#define RESPONSE_CPRBX_SIZE 0x00000724
-
-struct type50_hdr {
- u8 reserved1;
- u8 msg_type_code;
- u16 msg_len;
- u8 reserved2;
- u8 ignored;
- u16 reserved3;
-};
-
-#define TYPE50_TYPE_CODE 0x50
-
-#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg))
-#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg))
-#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg))
-#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg))
-
-#define TYPE50_MEB1_FMT 0x0001
-#define TYPE50_MEB2_FMT 0x0002
-#define TYPE50_CRB1_FMT 0x0011
-#define TYPE50_CRB2_FMT 0x0012
-
-struct type50_meb1_msg {
- struct type50_hdr header;
- u16 keyblock_type;
- u8 reserved[6];
- u8 exponent[128];
- u8 modulus[128];
- u8 message[128];
-};
-
-struct type50_meb2_msg {
- struct type50_hdr header;
- u16 keyblock_type;
- u8 reserved[6];
- u8 exponent[256];
- u8 modulus[256];
- u8 message[256];
-};
-
-struct type50_crb1_msg {
- struct type50_hdr header;
- u16 keyblock_type;
- u8 reserved[6];
- u8 p[64];
- u8 q[64];
- u8 dp[64];
- u8 dq[64];
- u8 u[64];
- u8 message[128];
-};
-
-struct type50_crb2_msg {
- struct type50_hdr header;
- u16 keyblock_type;
- u8 reserved[6];
- u8 p[128];
- u8 q[128];
- u8 dp[128];
- u8 dq[128];
- u8 u[128];
- u8 message[256];
-};
-
-union type50_msg {
- struct type50_meb1_msg meb1;
- struct type50_meb2_msg meb2;
- struct type50_crb1_msg crb1;
- struct type50_crb2_msg crb2;
-};
-
-struct type80_hdr {
- u8 reserved1;
- u8 type;
- u16 len;
- u8 code;
- u8 reserved2[3];
- u8 reserved3[8];
-};
-
-#define TYPE80_RSP_CODE 0x80
-
-struct error_hdr {
- unsigned char reserved1;
- unsigned char type;
- unsigned char reserved2[2];
- unsigned char reply_code;
- unsigned char reserved3[3];
-};
-
-#define TYPE82_RSP_CODE 0x82
-#define TYPE88_RSP_CODE 0x88
-
-#define REP82_ERROR_MACHINE_FAILURE 0x10
-#define REP82_ERROR_PREEMPT_FAILURE 0x12
-#define REP82_ERROR_CHECKPT_FAILURE 0x14
-#define REP82_ERROR_MESSAGE_TYPE 0x20
-#define REP82_ERROR_INVALID_COMM_CD 0x21
-#define REP82_ERROR_INVALID_MSG_LEN 0x23
-#define REP82_ERROR_RESERVD_FIELD 0x24
-#define REP82_ERROR_FORMAT_FIELD 0x29
-#define REP82_ERROR_INVALID_COMMAND 0x30
-#define REP82_ERROR_MALFORMED_MSG 0x40
-#define REP82_ERROR_RESERVED_FIELDO 0x50
-#define REP82_ERROR_WORD_ALIGNMENT 0x60
-#define REP82_ERROR_MESSAGE_LENGTH 0x80
-#define REP82_ERROR_OPERAND_INVALID 0x82
-#define REP82_ERROR_OPERAND_SIZE 0x84
-#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
-#define REP82_ERROR_RESERVED_FIELD 0x88
-#define REP82_ERROR_TRANSPORT_FAIL 0x90
-#define REP82_ERROR_PACKET_TRUNCATED 0xA0
-#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
-
-#define REP88_ERROR_MODULE_FAILURE 0x10
-#define REP88_ERROR_MODULE_TIMEOUT 0x11
-#define REP88_ERROR_MODULE_NOTINIT 0x13
-#define REP88_ERROR_MODULE_NOTAVAIL 0x14
-#define REP88_ERROR_MODULE_DISABLED 0x15
-#define REP88_ERROR_MODULE_IN_DIAGN 0x17
-#define REP88_ERROR_FASTPATH_DISABLD 0x19
-#define REP88_ERROR_MESSAGE_TYPE 0x20
-#define REP88_ERROR_MESSAGE_MALFORMD 0x22
-#define REP88_ERROR_MESSAGE_LENGTH 0x23
-#define REP88_ERROR_RESERVED_FIELD 0x24
-#define REP88_ERROR_KEY_TYPE 0x34
-#define REP88_ERROR_INVALID_KEY 0x82
-#define REP88_ERROR_OPERAND 0x84
-#define REP88_ERROR_OPERAND_EVEN_MOD 0x85
-
-#define CALLER_HEADER 12
-
-static inline int
-testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
-{
- int ccode;
-
- asm volatile
-#ifdef CONFIG_64BIT
- (" llgfr 0,%4 \n"
- " slgr 1,1 \n"
- " lgr 2,1 \n"
- "0: .long 0xb2af0000 \n"
- "1: ipm %0 \n"
- " srl %0,28 \n"
- " iihh %0,0 \n"
- " iihl %0,0 \n"
- " lgr %1,1 \n"
- " lgr %3,2 \n"
- " srl %3,24 \n"
- " sll 2,24 \n"
- " srl 2,24 \n"
- " lgr %2,2 \n"
- "2: \n"
- ".section .fixup,\"ax\" \n"
- "3: \n"
- " lhi %0,%h5 \n"
- " jg 2b \n"
- ".previous \n"
- ".section __ex_table,\"a\" \n"
- " .align 8 \n"
- " .quad 0b,3b \n"
- " .quad 1b,3b \n"
- ".previous"
- :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
- :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
- :"cc","0","1","2","memory");
-#else
- (" lr 0,%4 \n"
- " slr 1,1 \n"
- " lr 2,1 \n"
- "0: .long 0xb2af0000 \n"
- "1: ipm %0 \n"
- " srl %0,28 \n"
- " lr %1,1 \n"
- " lr %3,2 \n"
- " srl %3,24 \n"
- " sll 2,24 \n"
- " srl 2,24 \n"
- " lr %2,2 \n"
- "2: \n"
- ".section .fixup,\"ax\" \n"
- "3: \n"
- " lhi %0,%h5 \n"
- " bras 1,4f \n"
- " .long 2b \n"
- "4: \n"
- " l 1,0(1) \n"
- " br 1 \n"
- ".previous \n"
- ".section __ex_table,\"a\" \n"
- " .align 4 \n"
- " .long 0b,3b \n"
- " .long 1b,3b \n"
- ".previous"
- :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
- :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
- :"cc","0","1","2","memory");
-#endif
- return ccode;
-}
-
-static inline int
-resetq(int q_nr, struct ap_status_word *stat_p)
-{
- int ccode;
-
- asm volatile
-#ifdef CONFIG_64BIT
- (" llgfr 0,%2 \n"
- " lghi 1,1 \n"
- " sll 1,24 \n"
- " or 0,1 \n"
- " slgr 1,1 \n"
- " lgr 2,1 \n"
- "0: .long 0xb2af0000 \n"
- "1: ipm %0 \n"
- " srl %0,28 \n"
- " iihh %0,0 \n"
- " iihl %0,0 \n"
- " lgr %1,1 \n"
- "2: \n"
- ".section .fixup,\"ax\" \n"
- "3: \n"
- " lhi %0,%h3 \n"
- " jg 2b \n"
- ".previous \n"
- ".section __ex_table,\"a\" \n"
- " .align 8 \n"
- " .quad 0b,3b \n"
- " .quad 1b,3b \n"
- ".previous"
- :"=d" (ccode),"=d" (*stat_p)
- :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
- :"cc","0","1","2","memory");
-#else
- (" lr 0,%2 \n"
- " lhi 1,1 \n"
- " sll 1,24 \n"
- " or 0,1 \n"
- " slr 1,1 \n"
- " lr 2,1 \n"
- "0: .long 0xb2af0000 \n"
- "1: ipm %0 \n"
- " srl %0,28 \n"
- " lr %1,1 \n"
- "2: \n"
- ".section .fixup,\"ax\" \n"
- "3: \n"
- " lhi %0,%h3 \n"
- " bras 1,4f \n"
- " .long 2b \n"
- "4: \n"
- " l 1,0(1) \n"
- " br 1 \n"
- ".previous \n"
- ".section __ex_table,\"a\" \n"
- " .align 4 \n"
- " .long 0b,3b \n"
- " .long 1b,3b \n"
- ".previous"
- :"=d" (ccode),"=d" (*stat_p)
- :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
- :"cc","0","1","2","memory");
-#endif
- return ccode;
-}
-
-static inline int
-sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
-{
- int ccode;
-
- asm volatile
-#ifdef CONFIG_64BIT
- (" lgr 6,%3 \n"
- " llgfr 7,%2 \n"
- " llgt 0,0(6) \n"
- " lghi 1,64 \n"
- " sll 1,24 \n"
- " or 0,1 \n"
- " la 6,4(6) \n"
- " llgt 2,0(6) \n"
- " llgt 3,4(6) \n"
- " la 6,8(6) \n"
- " slr 1,1 \n"
- "0: .long 0xb2ad0026 \n"
- "1: brc 2,0b \n"
- " ipm %0 \n"
- " srl %0,28 \n"
- " iihh %0,0 \n"
- " iihl %0,0 \n"
- " lgr %1,1 \n"
- "2: \n"
- ".section .fixup,\"ax\" \n"
- "3: \n"
- " lhi %0,%h4 \n"
- " jg 2b \n"
- ".previous \n"
- ".section __ex_table,\"a\" \n"
- " .align 8 \n"
- " .quad 0b,3b \n"
- " .quad 1b,3b \n"
- ".previous"
- :"=d" (ccode),"=d" (*stat)
- :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
- :"cc","0","1","2","3","6","7","memory");
-#else
- (" lr 6,%3 \n"
- " lr 7,%2 \n"
- " l 0,0(6) \n"
- " lhi 1,64 \n"
- " sll 1,24 \n"
- " or 0,1 \n"
- " la 6,4(6) \n"
- " l 2,0(6) \n"
- " l 3,4(6) \n"
- " la 6,8(6) \n"
- " slr 1,1 \n"
- "0: .long 0xb2ad0026 \n"
- "1: brc 2,0b \n"
- " ipm %0 \n"
- " srl %0,28 \n"
- " lr %1,1 \n"
- "2: \n"
- ".section .fixup,\"ax\" \n"
- "3: \n"
- " lhi %0,%h4 \n"
- " bras 1,4f \n"
- " .long 2b \n"
- "4: \n"
- " l 1,0(1) \n"
- " br 1 \n"
- ".previous \n"
- ".section __ex_table,\"a\" \n"
- " .align 4 \n"
- " .long 0b,3b \n"
- " .long 1b,3b \n"
- ".previous"
- :"=d" (ccode),"=d" (*stat)
- :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
- :"cc","0","1","2","3","6","7","memory");
-#endif
- return ccode;
-}
-
-static inline int
-rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
- struct ap_status_word *st)
-{
- int ccode;
-
- asm volatile
-#ifdef CONFIG_64BIT
- (" llgfr 0,%2 \n"
- " lgr 3,%4 \n"
- " lgr 6,%3 \n"
- " llgfr 7,%5 \n"
- " lghi 1,128 \n"
- " sll 1,24 \n"
- " or 0,1 \n"
- " slgr 1,1 \n"
- " lgr 2,1 \n"
- " lgr 4,1 \n"
- " lgr 5,1 \n"
- "0: .long 0xb2ae0046 \n"
- "1: brc 2,0b \n"
- " brc 4,0b \n"
- " ipm %0 \n"
- " srl %0,28 \n"
- " iihh %0,0 \n"
- " iihl %0,0 \n"
- " lgr %1,1 \n"
- " st 4,0(3) \n"
- " st 5,4(3) \n"
- "2: \n"
- ".section .fixup,\"ax\" \n"
- "3: \n"
- " lhi %0,%h6 \n"
- " jg 2b \n"
- ".previous \n"
- ".section __ex_table,\"a\" \n"
- " .align 8 \n"
- " .quad 0b,3b \n"
- " .quad 1b,3b \n"
- ".previous"
- :"=d"(ccode),"=d"(*st)
- :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
- :"cc","0","1","2","3","4","5","6","7","memory");
-#else
- (" lr 0,%2 \n"
- " lr 3,%4 \n"
- " lr 6,%3 \n"
- " lr 7,%5 \n"
- " lhi 1,128 \n"
- " sll 1,24 \n"
- " or 0,1 \n"
- " slr 1,1 \n"
- " lr 2,1 \n"
- " lr 4,1 \n"
- " lr 5,1 \n"
- "0: .long 0xb2ae0046 \n"
- "1: brc 2,0b \n"
- " brc 4,0b \n"
- " ipm %0 \n"
- " srl %0,28 \n"
- " lr %1,1 \n"
- " st 4,0(3) \n"
- " st 5,4(3) \n"
- "2: \n"
- ".section .fixup,\"ax\" \n"
- "3: \n"
- " lhi %0,%h6 \n"
- " bras 1,4f \n"
- " .long 2b \n"
- "4: \n"
- " l 1,0(1) \n"
- " br 1 \n"
- ".previous \n"
- ".section __ex_table,\"a\" \n"
- " .align 4 \n"
- " .long 0b,3b \n"
- " .long 1b,3b \n"
- ".previous"
- :"=d"(ccode),"=d"(*st)
- :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
- :"cc","0","1","2","3","4","5","6","7","memory");
-#endif
- return ccode;
-}
-
-static inline void
-itoLe2(int *i_p, unsigned char *lechars)
-{
- *lechars = *((unsigned char *) i_p + sizeof(int) - 1);
- *(lechars + 1) = *((unsigned char *) i_p + sizeof(int) - 2);
-}
-
-static inline void
-le2toI(unsigned char *lechars, int *i_p)
-{
- unsigned char *ic_p;
- *i_p = 0;
- ic_p = (unsigned char *) i_p;
- *(ic_p + 2) = *(lechars + 1);
- *(ic_p + 3) = *(lechars);
-}
-
-static inline int
-is_empty(unsigned char *ptr, int len)
-{
- return !memcmp(ptr, (unsigned char *) &static_pvt_me_key+60, len);
-}
-
-enum hdstat
-query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
-{
- int q_nr, i, t_depth, t_dev_type;
- enum devstat ccode;
- struct ap_status_word stat_word;
- enum hdstat stat;
- int break_out;
-
- q_nr = (deviceNr << SKIP_BITL) + cdx;
- stat = HD_BUSY;
- ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
- PDEBUG("ccode %d response_code %02X\n", ccode, stat_word.response_code);
- break_out = 0;
- for (i = 0; i < resetNr; i++) {
- if (ccode > 3) {
- PRINTKC("Exception testing device %d\n", i);
- return HD_TSQ_EXCEPTION;
- }
- switch (ccode) {
- case 0:
- PDEBUG("t_dev_type %d\n", t_dev_type);
- break_out = 1;
- stat = HD_ONLINE;
- *q_depth = t_depth + 1;
- switch (t_dev_type) {
- case PCICA_HW:
- *dev_type = PCICA;
- break;
- case PCICC_HW:
- *dev_type = PCICC;
- break;
- case PCIXCC_HW:
- *dev_type = PCIXCC_UNK;
- break;
- case CEX2C_HW:
- *dev_type = CEX2C;
- break;
- case CEX2A_HW:
- *dev_type = CEX2A;
- break;
- default:
- *dev_type = NILDEV;
- break;
- }
- PDEBUG("available device %d: Q depth = %d, dev "
- "type = %d, stat = %02X%02X%02X%02X\n",
- deviceNr, *q_depth, *dev_type,
- stat_word.q_stat_flags,
- stat_word.response_code,
- stat_word.reserved[0],
- stat_word.reserved[1]);
- break;
- case 3:
- switch (stat_word.response_code) {
- case AP_RESPONSE_NORMAL:
- stat = HD_ONLINE;
- break_out = 1;
- *q_depth = t_depth + 1;
- *dev_type = t_dev_type;
- PDEBUG("cc3, available device "
- "%d: Q depth = %d, dev "
- "type = %d, stat = "
- "%02X%02X%02X%02X\n",
- deviceNr, *q_depth,
- *dev_type,
- stat_word.q_stat_flags,
- stat_word.response_code,
- stat_word.reserved[0],
- stat_word.reserved[1]);
- break;
- case AP_RESPONSE_Q_NOT_AVAIL:
- stat = HD_NOT_THERE;
- break_out = 1;
- break;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- PDEBUG("device %d in reset\n",
- deviceNr);
- break;
- case AP_RESPONSE_DECONFIGURED:
- stat = HD_DECONFIGURED;
- break_out = 1;
- break;
- case AP_RESPONSE_CHECKSTOPPED:
- stat = HD_CHECKSTOPPED;
- break_out = 1;
- break;
- case AP_RESPONSE_BUSY:
- PDEBUG("device %d busy\n",
- deviceNr);
- break;
- default:
- break;
- }
- break;
- default:
- stat = HD_NOT_THERE;
- break_out = 1;
- break;
- }
- if (break_out)
- break;
-
- udelay(5);
-
- ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
- }
- return stat;
-}
-
-enum devstat
-reset_device(int deviceNr, int cdx, int resetNr)
-{
- int q_nr, ccode = 0, dummy_qdepth, dummy_devType, i;
- struct ap_status_word stat_word;
- enum devstat stat;
- int break_out;
-
- q_nr = (deviceNr << SKIP_BITL) + cdx;
- stat = DEV_GONE;
- ccode = resetq(q_nr, &stat_word);
- if (ccode > 3)
- return DEV_RSQ_EXCEPTION;
-
- break_out = 0;
- for (i = 0; i < resetNr; i++) {
- switch (ccode) {
- case 0:
- stat = DEV_ONLINE;
- if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
- break_out = 1;
- break;
- case 3:
- switch (stat_word.response_code) {
- case AP_RESPONSE_NORMAL:
- stat = DEV_ONLINE;
- if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
- break_out = 1;
- break;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- stat = DEV_GONE;
- break_out = 1;
- break;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_BUSY:
- default:
- break;
- }
- break;
- default:
- stat = DEV_GONE;
- break_out = 1;
- break;
- }
- if (break_out == 1)
- break;
- udelay(5);
-
- ccode = testq(q_nr, &dummy_qdepth, &dummy_devType, &stat_word);
- if (ccode > 3) {
- stat = DEV_TSQ_EXCEPTION;
- break;
- }
- }
- PDEBUG("Number of testq's needed for reset: %d\n", i);
-
- if (i >= resetNr) {
- stat = DEV_GONE;
- }
-
- return stat;
-}
-
-#ifdef DEBUG_HYDRA_MSGS
-static inline void
-print_buffer(unsigned char *buffer, int bufflen)
-{
- int i;
- for (i = 0; i < bufflen; i += 16) {
- PRINTK("%04X: %02X%02X%02X%02X %02X%02X%02X%02X "
- "%02X%02X%02X%02X %02X%02X%02X%02X\n", i,
- buffer[i+0], buffer[i+1], buffer[i+2], buffer[i+3],
- buffer[i+4], buffer[i+5], buffer[i+6], buffer[i+7],
- buffer[i+8], buffer[i+9], buffer[i+10], buffer[i+11],
- buffer[i+12], buffer[i+13], buffer[i+14], buffer[i+15]);
- }
-}
-#endif
-
-enum devstat
-send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext)
-{
- struct ap_status_word stat_word;
- enum devstat stat;
- int ccode;
- u32 *q_nr_p = (u32 *)msg_ext;
-
- *q_nr_p = (dev_nr << SKIP_BITL) + cdx;
- PDEBUG("msg_len passed to sen: %d\n", msg_len);
- PDEBUG("q number passed to sen: %02x%02x%02x%02x\n",
- msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]);
- stat = DEV_GONE;
-
-#ifdef DEBUG_HYDRA_MSGS
- PRINTK("Request header: %02X%02X%02X%02X %02X%02X%02X%02X "
- "%02X%02X%02X%02X\n",
- msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3],
- msg_ext[4], msg_ext[5], msg_ext[6], msg_ext[7],
- msg_ext[8], msg_ext[9], msg_ext[10], msg_ext[11]);
- print_buffer(msg_ext+CALLER_HEADER, msg_len);
-#endif
-
- ccode = sen(msg_len, msg_ext, &stat_word);
- if (ccode > 3)
- return DEV_SEN_EXCEPTION;
-
- PDEBUG("nq cc: %u, st: %02x%02x%02x%02x\n",
- ccode, stat_word.q_stat_flags, stat_word.response_code,
- stat_word.reserved[0], stat_word.reserved[1]);
- switch (ccode) {
- case 0:
- stat = DEV_ONLINE;
- break;
- case 1:
- stat = DEV_GONE;
- break;
- case 3:
- switch (stat_word.response_code) {
- case AP_RESPONSE_NORMAL:
- stat = DEV_ONLINE;
- break;
- case AP_RESPONSE_Q_FULL:
- stat = DEV_QUEUE_FULL;
- break;
- default:
- stat = DEV_GONE;
- break;
- }
- break;
- default:
- stat = DEV_GONE;
- break;
- }
-
- return stat;
-}
-
-enum devstat
-receive_from_AP(int dev_nr, int cdx, int resplen, unsigned char *resp,
- unsigned char *psmid)
-{
- int ccode;
- struct ap_status_word stat_word;
- enum devstat stat;
-
- memset(resp, 0x00, 8);
-
- ccode = rec((dev_nr << SKIP_BITL) + cdx, resplen, resp, psmid,
- &stat_word);
- if (ccode > 3)
- return DEV_REC_EXCEPTION;
-
- PDEBUG("dq cc: %u, st: %02x%02x%02x%02x\n",
- ccode, stat_word.q_stat_flags, stat_word.response_code,
- stat_word.reserved[0], stat_word.reserved[1]);
-
- stat = DEV_GONE;
- switch (ccode) {
- case 0:
- stat = DEV_ONLINE;
-#ifdef DEBUG_HYDRA_MSGS
- print_buffer(resp, resplen);
-#endif
- break;
- case 3:
- switch (stat_word.response_code) {
- case AP_RESPONSE_NORMAL:
- stat = DEV_ONLINE;
- break;
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
- stat = DEV_EMPTY;
- else
- stat = DEV_NO_WORK;
- break;
- case AP_RESPONSE_INDEX_TOO_BIG:
- case AP_RESPONSE_NO_FIRST_PART:
- case AP_RESPONSE_MESSAGE_TOO_BIG:
- stat = DEV_BAD_MESSAGE;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
-
- return stat;
-}
-
-static inline int
-pad_msg(unsigned char *buffer, int totalLength, int msgLength)
-{
- int pad_len;
-
- for (pad_len = 0; pad_len < (totalLength - msgLength); pad_len++)
- if (buffer[pad_len] != 0x00)
- break;
- pad_len -= 3;
- if (pad_len < 8)
- return SEN_PAD_ERROR;
-
- buffer[0] = 0x00;
- buffer[1] = 0x02;
-
- memcpy(buffer+2, static_pad, pad_len);
-
- buffer[pad_len + 2] = 0x00;
-
- return 0;
-}
-
-static inline int
-is_common_public_key(unsigned char *key, int len)
-{
- int i;
-
- for (i = 0; i < len; i++)
- if (key[i])
- break;
- key += i;
- len -= i;
- if (((len == 1) && (key[0] == 3)) ||
- ((len == 3) && (key[0] == 1) && (key[1] == 0) && (key[2] == 1)))
- return 1;
-
- return 0;
-}
-
-static int
-ICAMEX_msg_to_type4MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
- union type4_msg *z90cMsg_p)
-{
- int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
- unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
- union type4_msg *tmp_type4_msg;
-
- mod_len = icaMex_p->inputdatalength;
-
- msg_size = ((mod_len <= 128) ? TYPE4_SME_LEN : TYPE4_LME_LEN) +
- CALLER_HEADER;
-
- memset(z90cMsg_p, 0, msg_size);
-
- tmp_type4_msg = (union type4_msg *)
- ((unsigned char *) z90cMsg_p + CALLER_HEADER);
-
- tmp_type4_msg->sme.header.msg_type_code = TYPE4_TYPE_CODE;
- tmp_type4_msg->sme.header.request_code = TYPE4_REQU_CODE;
-
- if (mod_len <= 128) {
- tmp_type4_msg->sme.header.msg_fmt = TYPE4_SME_FMT;
- tmp_type4_msg->sme.header.msg_len = TYPE4_SME_LEN;
- mod_tgt = tmp_type4_msg->sme.modulus;
- mod_tgt_len = sizeof(tmp_type4_msg->sme.modulus);
- exp_tgt = tmp_type4_msg->sme.exponent;
- exp_tgt_len = sizeof(tmp_type4_msg->sme.exponent);
- inp_tgt = tmp_type4_msg->sme.message;
- inp_tgt_len = sizeof(tmp_type4_msg->sme.message);
- } else {
- tmp_type4_msg->lme.header.msg_fmt = TYPE4_LME_FMT;
- tmp_type4_msg->lme.header.msg_len = TYPE4_LME_LEN;
- mod_tgt = tmp_type4_msg->lme.modulus;
- mod_tgt_len = sizeof(tmp_type4_msg->lme.modulus);
- exp_tgt = tmp_type4_msg->lme.exponent;
- exp_tgt_len = sizeof(tmp_type4_msg->lme.exponent);
- inp_tgt = tmp_type4_msg->lme.message;
- inp_tgt_len = sizeof(tmp_type4_msg->lme.message);
- }
-
- mod_tgt += (mod_tgt_len - mod_len);
- if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
- return SEN_RELEASED;
- if (is_empty(mod_tgt, mod_len))
- return SEN_USER_ERROR;
- exp_tgt += (exp_tgt_len - mod_len);
- if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
- return SEN_RELEASED;
- if (is_empty(exp_tgt, mod_len))
- return SEN_USER_ERROR;
- inp_tgt += (inp_tgt_len - mod_len);
- if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
- return SEN_RELEASED;
- if (is_empty(inp_tgt, mod_len))
- return SEN_USER_ERROR;
-
- *z90cMsg_l_p = msg_size - CALLER_HEADER;
-
- return 0;
-}
-
-static int
-ICACRT_msg_to_type4CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
- int *z90cMsg_l_p, union type4_msg *z90cMsg_p)
-{
- int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
- dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len;
- unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt;
- union type4_msg *tmp_type4_msg;
-
- mod_len = icaMsg_p->inputdatalength;
- short_len = mod_len / 2;
- long_len = mod_len / 2 + 8;
-
- tmp_size = ((mod_len <= 128) ? TYPE4_SCR_LEN : TYPE4_LCR_LEN) +
- CALLER_HEADER;
-
- memset(z90cMsg_p, 0, tmp_size);
-
- tmp_type4_msg = (union type4_msg *)
- ((unsigned char *) z90cMsg_p + CALLER_HEADER);
-
- tmp_type4_msg->scr.header.msg_type_code = TYPE4_TYPE_CODE;
- tmp_type4_msg->scr.header.request_code = TYPE4_REQU_CODE;
- if (mod_len <= 128) {
- tmp_type4_msg->scr.header.msg_fmt = TYPE4_SCR_FMT;
- tmp_type4_msg->scr.header.msg_len = TYPE4_SCR_LEN;
- p_tgt = tmp_type4_msg->scr.p;
- p_tgt_len = sizeof(tmp_type4_msg->scr.p);
- q_tgt = tmp_type4_msg->scr.q;
- q_tgt_len = sizeof(tmp_type4_msg->scr.q);
- dp_tgt = tmp_type4_msg->scr.dp;
- dp_tgt_len = sizeof(tmp_type4_msg->scr.dp);
- dq_tgt = tmp_type4_msg->scr.dq;
- dq_tgt_len = sizeof(tmp_type4_msg->scr.dq);
- u_tgt = tmp_type4_msg->scr.u;
- u_tgt_len = sizeof(tmp_type4_msg->scr.u);
- inp_tgt = tmp_type4_msg->scr.message;
- inp_tgt_len = sizeof(tmp_type4_msg->scr.message);
- } else {
- tmp_type4_msg->lcr.header.msg_fmt = TYPE4_LCR_FMT;
- tmp_type4_msg->lcr.header.msg_len = TYPE4_LCR_LEN;
- p_tgt = tmp_type4_msg->lcr.p;
- p_tgt_len = sizeof(tmp_type4_msg->lcr.p);
- q_tgt = tmp_type4_msg->lcr.q;
- q_tgt_len = sizeof(tmp_type4_msg->lcr.q);
- dp_tgt = tmp_type4_msg->lcr.dp;
- dp_tgt_len = sizeof(tmp_type4_msg->lcr.dp);
- dq_tgt = tmp_type4_msg->lcr.dq;
- dq_tgt_len = sizeof(tmp_type4_msg->lcr.dq);
- u_tgt = tmp_type4_msg->lcr.u;
- u_tgt_len = sizeof(tmp_type4_msg->lcr.u);
- inp_tgt = tmp_type4_msg->lcr.message;
- inp_tgt_len = sizeof(tmp_type4_msg->lcr.message);
- }
-
- p_tgt += (p_tgt_len - long_len);
- if (copy_from_user(p_tgt, icaMsg_p->np_prime, long_len))
- return SEN_RELEASED;
- if (is_empty(p_tgt, long_len))
- return SEN_USER_ERROR;
- q_tgt += (q_tgt_len - short_len);
- if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
- return SEN_RELEASED;
- if (is_empty(q_tgt, short_len))
- return SEN_USER_ERROR;
- dp_tgt += (dp_tgt_len - long_len);
- if (copy_from_user(dp_tgt, icaMsg_p->bp_key, long_len))
- return SEN_RELEASED;
- if (is_empty(dp_tgt, long_len))
- return SEN_USER_ERROR;
- dq_tgt += (dq_tgt_len - short_len);
- if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
- return SEN_RELEASED;
- if (is_empty(dq_tgt, short_len))
- return SEN_USER_ERROR;
- u_tgt += (u_tgt_len - long_len);
- if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv, long_len))
- return SEN_RELEASED;
- if (is_empty(u_tgt, long_len))
- return SEN_USER_ERROR;
- inp_tgt += (inp_tgt_len - mod_len);
- if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
- return SEN_RELEASED;
- if (is_empty(inp_tgt, mod_len))
- return SEN_USER_ERROR;
-
- *z90cMsg_l_p = tmp_size - CALLER_HEADER;
-
- return 0;
-}
-
-static int
-ICAMEX_msg_to_type6MEX_de_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
- int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
-{
- int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
- unsigned char *temp;
- struct type6_hdr *tp6Hdr_p;
- struct CPRB *cprb_p;
- struct cca_private_ext_ME *key_p;
- static int deprecated_msg_count = 0;
-
- mod_len = icaMsg_p->inputdatalength;
- tmp_size = FIXED_TYPE6_ME_LEN + mod_len;
- total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
- parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
- tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
-
- memset(z90cMsg_p, 0, tmp_size);
-
- temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
- memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
- tp6Hdr_p = (struct type6_hdr *)temp;
- tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
- tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
-
- temp += sizeof(struct type6_hdr);
- memcpy(temp, &static_cprb, sizeof(struct CPRB));
- cprb_p = (struct CPRB *) temp;
- cprb_p->usage_domain[0]= (unsigned char)cdx;
- itoLe2(&parmBlock_l, cprb_p->req_parml);
- itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
-
- temp += sizeof(struct CPRB);
- memcpy(temp, &static_pkd_function_and_rules,
- sizeof(struct function_and_rules_block));
-
- temp += sizeof(struct function_and_rules_block);
- vud_len = 2 + icaMsg_p->inputdatalength;
- itoLe2(&vud_len, temp);
-
- temp += 2;
- if (copy_from_user(temp, icaMsg_p->inputdata, mod_len))
- return SEN_RELEASED;
- if (is_empty(temp, mod_len))
- return SEN_USER_ERROR;
-
- temp += mod_len;
- memcpy(temp, &static_T6_keyBlock_hdr, sizeof(struct T6_keyBlock_hdr));
-
- temp += sizeof(struct T6_keyBlock_hdr);
- memcpy(temp, &static_pvt_me_key, sizeof(struct cca_private_ext_ME));
- key_p = (struct cca_private_ext_ME *)temp;
- temp = key_p->pvtMESec.exponent + sizeof(key_p->pvtMESec.exponent)
- - mod_len;
- if (copy_from_user(temp, icaMsg_p->b_key, mod_len))
- return SEN_RELEASED;
- if (is_empty(temp, mod_len))
- return SEN_USER_ERROR;
-
- if (is_common_public_key(temp, mod_len)) {
- if (deprecated_msg_count < 20) {
- PRINTK("Common public key used for modex decrypt\n");
- deprecated_msg_count++;
- if (deprecated_msg_count == 20)
- PRINTK("No longer issuing messages about common"
- " public key for modex decrypt.\n");
- }
- return SEN_NOT_AVAIL;
- }
-
- temp = key_p->pvtMESec.modulus + sizeof(key_p->pvtMESec.modulus)
- - mod_len;
- if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
- return SEN_RELEASED;
- if (is_empty(temp, mod_len))
- return SEN_USER_ERROR;
-
- key_p->pubMESec.modulus_bit_len = 8 * mod_len;
-
- *z90cMsg_l_p = tmp_size - CALLER_HEADER;
-
- return 0;
-}
-
-static int
-ICAMEX_msg_to_type6MEX_en_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
- int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
-{
- int mod_len, vud_len, exp_len, key_len;
- int pad_len, tmp_size, total_CPRB_len, parmBlock_l, i;
- unsigned char *temp_exp, *exp_p, *temp;
- struct type6_hdr *tp6Hdr_p;
- struct CPRB *cprb_p;
- struct cca_public_key *key_p;
- struct T6_keyBlock_hdr *keyb_p;
-
- temp_exp = kmalloc(256, GFP_KERNEL);
- if (!temp_exp)
- return EGETBUFF;
- mod_len = icaMsg_p->inputdatalength;
- if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
- kfree(temp_exp);
- return SEN_RELEASED;
- }
- if (is_empty(temp_exp, mod_len)) {
- kfree(temp_exp);
- return SEN_USER_ERROR;
- }
-
- exp_p = temp_exp;
- for (i = 0; i < mod_len; i++)
- if (exp_p[i])
- break;
- if (i >= mod_len) {
- kfree(temp_exp);
- return SEN_USER_ERROR;
- }
-
- exp_len = mod_len - i;
- exp_p += i;
-
- PDEBUG("exp_len after computation: %08x\n", exp_len);
- tmp_size = FIXED_TYPE6_ME_EN_LEN + 2 * mod_len + exp_len;
- total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
- parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
- tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
-
- vud_len = 2 + mod_len;
- memset(z90cMsg_p, 0, tmp_size);
-
- temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
- memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
- tp6Hdr_p = (struct type6_hdr *)temp;
- tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
- tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
- memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
- sizeof(static_PKE_function_code));
- temp += sizeof(struct type6_hdr);
- memcpy(temp, &static_cprb, sizeof(struct CPRB));
- cprb_p = (struct CPRB *) temp;
- cprb_p->usage_domain[0]= (unsigned char)cdx;
- itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
- temp += sizeof(struct CPRB);
- memcpy(temp, &static_pke_function_and_rules,
- sizeof(struct function_and_rules_block));
- temp += sizeof(struct function_and_rules_block);
- temp += 2;
- if (copy_from_user(temp, icaMsg_p->inputdata, mod_len)) {
- kfree(temp_exp);
- return SEN_RELEASED;
- }
- if (is_empty(temp, mod_len)) {
- kfree(temp_exp);
- return SEN_USER_ERROR;
- }
- if ((temp[0] != 0x00) || (temp[1] != 0x02)) {
- kfree(temp_exp);
- return SEN_NOT_AVAIL;
- }
- for (i = 2; i < mod_len; i++)
- if (temp[i] == 0x00)
- break;
- if ((i < 9) || (i > (mod_len - 2))) {
- kfree(temp_exp);
- return SEN_NOT_AVAIL;
- }
- pad_len = i + 1;
- vud_len = mod_len - pad_len;
- memmove(temp, temp+pad_len, vud_len);
- temp -= 2;
- vud_len += 2;
- itoLe2(&vud_len, temp);
- temp += (vud_len);
- keyb_p = (struct T6_keyBlock_hdr *)temp;
- temp += sizeof(struct T6_keyBlock_hdr);
- memcpy(temp, &static_public_key, sizeof(static_public_key));
- key_p = (struct cca_public_key *)temp;
- temp = key_p->pubSec.exponent;
- memcpy(temp, exp_p, exp_len);
- kfree(temp_exp);
- temp += exp_len;
- if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
- return SEN_RELEASED;
- if (is_empty(temp, mod_len))
- return SEN_USER_ERROR;
- key_p->pubSec.modulus_bit_len = 8 * mod_len;
- key_p->pubSec.modulus_byte_len = mod_len;
- key_p->pubSec.exponent_len = exp_len;
- key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
- key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
- key_p->pubHdr.token_length = key_len;
- key_len += 4;
- itoLe2(&key_len, keyb_p->ulen);
- key_len += 2;
- itoLe2(&key_len, keyb_p->blen);
- parmBlock_l -= pad_len;
- itoLe2(&parmBlock_l, cprb_p->req_parml);
- *z90cMsg_l_p = tmp_size - CALLER_HEADER;
-
- return 0;
-}
-
-static int
-ICACRT_msg_to_type6CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
- int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
-{
- int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
- int long_len, pad_len, keyPartsLen, tmp_l;
- unsigned char *tgt_p, *temp;
- struct type6_hdr *tp6Hdr_p;
- struct CPRB *cprb_p;
- struct cca_token_hdr *keyHdr_p;
- struct cca_pvt_ext_CRT_sec *pvtSec_p;
- struct cca_public_sec *pubSec_p;
-
- mod_len = icaMsg_p->inputdatalength;
- short_len = mod_len / 2;
- long_len = 8 + short_len;
- keyPartsLen = 3 * long_len + 2 * short_len;
- pad_len = (8 - (keyPartsLen % 8)) % 8;
- keyPartsLen += pad_len + mod_len;
- tmp_size = FIXED_TYPE6_CR_LEN + keyPartsLen + mod_len;
- total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
- parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
- vud_len = 2 + mod_len;
- tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
-
- memset(z90cMsg_p, 0, tmp_size);
- tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
- memcpy(tgt_p, &static_type6_hdr, sizeof(struct type6_hdr));
- tp6Hdr_p = (struct type6_hdr *)tgt_p;
- tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
- tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
- tgt_p += sizeof(struct type6_hdr);
- cprb_p = (struct CPRB *) tgt_p;
- memcpy(tgt_p, &static_cprb, sizeof(struct CPRB));
- cprb_p->usage_domain[0]= *((unsigned char *)(&(cdx))+3);
- itoLe2(&parmBlock_l, cprb_p->req_parml);
- memcpy(cprb_p->rpl_parml, cprb_p->req_parml,
- sizeof(cprb_p->req_parml));
- tgt_p += sizeof(struct CPRB);
- memcpy(tgt_p, &static_pkd_function_and_rules,
- sizeof(struct function_and_rules_block));
- tgt_p += sizeof(struct function_and_rules_block);
- itoLe2(&vud_len, tgt_p);
- tgt_p += 2;
- if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, mod_len))
- return SEN_USER_ERROR;
- tgt_p += mod_len;
- tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
- sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
- itoLe2(&tmp_l, tgt_p);
- temp = tgt_p + 2;
- tmp_l -= 2;
- itoLe2(&tmp_l, temp);
- tgt_p += sizeof(struct T6_keyBlock_hdr);
- keyHdr_p = (struct cca_token_hdr *)tgt_p;
- keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
- tmp_l -= 4;
- keyHdr_p->token_length = tmp_l;
- tgt_p += sizeof(struct cca_token_hdr);
- pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
- pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
- pvtSec_p->section_length =
- sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
- pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
- pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
- pvtSec_p->p_len = long_len;
- pvtSec_p->q_len = short_len;
- pvtSec_p->dp_len = long_len;
- pvtSec_p->dq_len = short_len;
- pvtSec_p->u_len = long_len;
- pvtSec_p->mod_len = mod_len;
- pvtSec_p->pad_len = pad_len;
- tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
- if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, long_len))
- return SEN_USER_ERROR;
- tgt_p += long_len;
- if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, short_len))
- return SEN_USER_ERROR;
- tgt_p += short_len;
- if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, long_len))
- return SEN_USER_ERROR;
- tgt_p += long_len;
- if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, short_len))
- return SEN_USER_ERROR;
- tgt_p += short_len;
- if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, long_len))
- return SEN_USER_ERROR;
- tgt_p += long_len;
- tgt_p += pad_len;
- memset(tgt_p, 0xFF, mod_len);
- tgt_p += mod_len;
- memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
- pubSec_p = (struct cca_public_sec *) tgt_p;
- pubSec_p->modulus_bit_len = 8 * mod_len;
- *z90cMsg_l_p = tmp_size - CALLER_HEADER;
-
- return 0;
-}
-
-static int
-ICAMEX_msg_to_type6MEX_msgX(struct ica_rsa_modexpo *icaMsg_p, int cdx,
- int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
- int dev_type)
-{
- int mod_len, exp_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
- int key_len, i;
- unsigned char *temp_exp, *tgt_p, *temp, *exp_p;
- struct type6_hdr *tp6Hdr_p;
- struct CPRBX *cprbx_p;
- struct cca_public_key *key_p;
- struct T6_keyBlock_hdrX *keyb_p;
-
- temp_exp = kmalloc(256, GFP_KERNEL);
- if (!temp_exp)
- return EGETBUFF;
- mod_len = icaMsg_p->inputdatalength;
- if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
- kfree(temp_exp);
- return SEN_RELEASED;
- }
- if (is_empty(temp_exp, mod_len)) {
- kfree(temp_exp);
- return SEN_USER_ERROR;
- }
- exp_p = temp_exp;
- for (i = 0; i < mod_len; i++)
- if (exp_p[i])
- break;
- if (i >= mod_len) {
- kfree(temp_exp);
- return SEN_USER_ERROR;
- }
- exp_len = mod_len - i;
- exp_p += i;
- PDEBUG("exp_len after computation: %08x\n", exp_len);
- tmp_size = FIXED_TYPE6_ME_EN_LENX + 2 * mod_len + exp_len;
- total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
- parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
- tmp_size = tmp_size + CALLER_HEADER;
- vud_len = 2 + mod_len;
- memset(z90cMsg_p, 0, tmp_size);
- tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
- memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
- tp6Hdr_p = (struct type6_hdr *)tgt_p;
- tp6Hdr_p->ToCardLen1 = total_CPRB_len;
- tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
- memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
- sizeof(static_PKE_function_code));
- tgt_p += sizeof(struct type6_hdr);
- memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
- cprbx_p = (struct CPRBX *) tgt_p;
- cprbx_p->domain = (unsigned short)cdx;
- cprbx_p->rpl_msgbl = RESPONSE_CPRBX_SIZE;
- tgt_p += sizeof(struct CPRBX);
- if (dev_type == PCIXCC_MCL2)
- memcpy(tgt_p, &static_pke_function_and_rulesX_MCL2,
- sizeof(struct function_and_rules_block));
- else
- memcpy(tgt_p, &static_pke_function_and_rulesX,
- sizeof(struct function_and_rules_block));
- tgt_p += sizeof(struct function_and_rules_block);
-
- tgt_p += 2;
- if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) {
- kfree(temp_exp);
- return SEN_RELEASED;
- }
- if (is_empty(tgt_p, mod_len)) {
- kfree(temp_exp);
- return SEN_USER_ERROR;
- }
- tgt_p -= 2;
- *((short *)tgt_p) = (short) vud_len;
- tgt_p += vud_len;
- keyb_p = (struct T6_keyBlock_hdrX *)tgt_p;
- tgt_p += sizeof(struct T6_keyBlock_hdrX);
- memcpy(tgt_p, &static_public_key, sizeof(static_public_key));
- key_p = (struct cca_public_key *)tgt_p;
- temp = key_p->pubSec.exponent;
- memcpy(temp, exp_p, exp_len);
- kfree(temp_exp);
- temp += exp_len;
- if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
- return SEN_RELEASED;
- if (is_empty(temp, mod_len))
- return SEN_USER_ERROR;
- key_p->pubSec.modulus_bit_len = 8 * mod_len;
- key_p->pubSec.modulus_byte_len = mod_len;
- key_p->pubSec.exponent_len = exp_len;
- key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
- key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
- key_p->pubHdr.token_length = key_len;
- key_len += 4;
- keyb_p->ulen = (unsigned short)key_len;
- key_len += 2;
- keyb_p->blen = (unsigned short)key_len;
- cprbx_p->req_parml = parmBlock_l;
- *z90cMsg_l_p = tmp_size - CALLER_HEADER;
-
- return 0;
-}
-
-static int
-ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
- int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
- int dev_type)
-{
- int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
- int long_len, pad_len, keyPartsLen, tmp_l;
- unsigned char *tgt_p, *temp;
- struct type6_hdr *tp6Hdr_p;
- struct CPRBX *cprbx_p;
- struct cca_token_hdr *keyHdr_p;
- struct cca_pvt_ext_CRT_sec *pvtSec_p;
- struct cca_public_sec *pubSec_p;
-
- mod_len = icaMsg_p->inputdatalength;
- short_len = mod_len / 2;
- long_len = 8 + short_len;
- keyPartsLen = 3 * long_len + 2 * short_len;
- pad_len = (8 - (keyPartsLen % 8)) % 8;
- keyPartsLen += pad_len + mod_len;
- tmp_size = FIXED_TYPE6_CR_LENX + keyPartsLen + mod_len;
- total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
- parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
- vud_len = 2 + mod_len;
- tmp_size = tmp_size + CALLER_HEADER;
- memset(z90cMsg_p, 0, tmp_size);
- tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
- memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
- tp6Hdr_p = (struct type6_hdr *)tgt_p;
- tp6Hdr_p->ToCardLen1 = total_CPRB_len;
- tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
- tgt_p += sizeof(struct type6_hdr);
- cprbx_p = (struct CPRBX *) tgt_p;
- memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
- cprbx_p->domain = (unsigned short)cdx;
- cprbx_p->req_parml = parmBlock_l;
- cprbx_p->rpl_msgbl = parmBlock_l;
- tgt_p += sizeof(struct CPRBX);
- if (dev_type == PCIXCC_MCL2)
- memcpy(tgt_p, &static_pkd_function_and_rulesX_MCL2,
- sizeof(struct function_and_rules_block));
- else
- memcpy(tgt_p, &static_pkd_function_and_rulesX,
- sizeof(struct function_and_rules_block));
- tgt_p += sizeof(struct function_and_rules_block);
- *((short *)tgt_p) = (short) vud_len;
- tgt_p += 2;
- if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, mod_len))
- return SEN_USER_ERROR;
- tgt_p += mod_len;
- tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
- sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
- *((short *)tgt_p) = (short) tmp_l;
- temp = tgt_p + 2;
- tmp_l -= 2;
- *((short *)temp) = (short) tmp_l;
- tgt_p += sizeof(struct T6_keyBlock_hdr);
- keyHdr_p = (struct cca_token_hdr *)tgt_p;
- keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
- tmp_l -= 4;
- keyHdr_p->token_length = tmp_l;
- tgt_p += sizeof(struct cca_token_hdr);
- pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
- pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
- pvtSec_p->section_length =
- sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
- pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
- pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
- pvtSec_p->p_len = long_len;
- pvtSec_p->q_len = short_len;
- pvtSec_p->dp_len = long_len;
- pvtSec_p->dq_len = short_len;
- pvtSec_p->u_len = long_len;
- pvtSec_p->mod_len = mod_len;
- pvtSec_p->pad_len = pad_len;
- tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
- if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, long_len))
- return SEN_USER_ERROR;
- tgt_p += long_len;
- if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, short_len))
- return SEN_USER_ERROR;
- tgt_p += short_len;
- if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, long_len))
- return SEN_USER_ERROR;
- tgt_p += long_len;
- if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, short_len))
- return SEN_USER_ERROR;
- tgt_p += short_len;
- if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
- return SEN_RELEASED;
- if (is_empty(tgt_p, long_len))
- return SEN_USER_ERROR;
- tgt_p += long_len;
- tgt_p += pad_len;
- memset(tgt_p, 0xFF, mod_len);
- tgt_p += mod_len;
- memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
- pubSec_p = (struct cca_public_sec *) tgt_p;
- pubSec_p->modulus_bit_len = 8 * mod_len;
- *z90cMsg_l_p = tmp_size - CALLER_HEADER;
-
- return 0;
-}
-
-static int
-ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
- union type50_msg *z90cMsg_p)
-{
- int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
- unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
- union type50_msg *tmp_type50_msg;
-
- mod_len = icaMex_p->inputdatalength;
-
- msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) +
- CALLER_HEADER;
-
- memset(z90cMsg_p, 0, msg_size);
-
- tmp_type50_msg = (union type50_msg *)
- ((unsigned char *) z90cMsg_p + CALLER_HEADER);
-
- tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE;
-
- if (mod_len <= 128) {
- tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN;
- tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT;
- mod_tgt = tmp_type50_msg->meb1.modulus;
- mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus);
- exp_tgt = tmp_type50_msg->meb1.exponent;
- exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent);
- inp_tgt = tmp_type50_msg->meb1.message;
- inp_tgt_len = sizeof(tmp_type50_msg->meb1.message);
- } else {
- tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN;
- tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT;
- mod_tgt = tmp_type50_msg->meb2.modulus;
- mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus);
- exp_tgt = tmp_type50_msg->meb2.exponent;
- exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent);
- inp_tgt = tmp_type50_msg->meb2.message;
- inp_tgt_len = sizeof(tmp_type50_msg->meb2.message);
- }
-
- mod_tgt += (mod_tgt_len - mod_len);
- if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
- return SEN_RELEASED;
- if (is_empty(mod_tgt, mod_len))
- return SEN_USER_ERROR;
- exp_tgt += (exp_tgt_len - mod_len);
- if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
- return SEN_RELEASED;
- if (is_empty(exp_tgt, mod_len))
- return SEN_USER_ERROR;
- inp_tgt += (inp_tgt_len - mod_len);
- if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
- return SEN_RELEASED;
- if (is_empty(inp_tgt, mod_len))
- return SEN_USER_ERROR;
-
- *z90cMsg_l_p = msg_size - CALLER_HEADER;
-
- return 0;
-}
-
-static int
-ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
- int *z90cMsg_l_p, union type50_msg *z90cMsg_p)
-{
- int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
- dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset;
- unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt,
- temp[8];
- union type50_msg *tmp_type50_msg;
-
- mod_len = icaMsg_p->inputdatalength;
- short_len = mod_len / 2;
- long_len = mod_len / 2 + 8;
- long_offset = 0;
-
- if (long_len > 128) {
- memset(temp, 0x00, sizeof(temp));
- if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128))
- return SEN_RELEASED;
- if (!is_empty(temp, 8))
- return SEN_NOT_AVAIL;
- if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128))
- return SEN_RELEASED;
- if (!is_empty(temp, 8))
- return SEN_NOT_AVAIL;
- if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128))
- return SEN_RELEASED;
- if (!is_empty(temp, 8))
- return SEN_NOT_AVAIL;
- long_offset = long_len - 128;
- long_len = 128;
- }
-
- tmp_size = ((long_len <= 64) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) +
- CALLER_HEADER;
-
- memset(z90cMsg_p, 0, tmp_size);
-
- tmp_type50_msg = (union type50_msg *)
- ((unsigned char *) z90cMsg_p + CALLER_HEADER);
-
- tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE;
- if (long_len <= 64) {
- tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN;
- tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT;
- p_tgt = tmp_type50_msg->crb1.p;
- p_tgt_len = sizeof(tmp_type50_msg->crb1.p);
- q_tgt = tmp_type50_msg->crb1.q;
- q_tgt_len = sizeof(tmp_type50_msg->crb1.q);
- dp_tgt = tmp_type50_msg->crb1.dp;
- dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp);
- dq_tgt = tmp_type50_msg->crb1.dq;
- dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq);
- u_tgt = tmp_type50_msg->crb1.u;
- u_tgt_len = sizeof(tmp_type50_msg->crb1.u);
- inp_tgt = tmp_type50_msg->crb1.message;
- inp_tgt_len = sizeof(tmp_type50_msg->crb1.message);
- } else {
- tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN;
- tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT;
- p_tgt = tmp_type50_msg->crb2.p;
- p_tgt_len = sizeof(tmp_type50_msg->crb2.p);
- q_tgt = tmp_type50_msg->crb2.q;
- q_tgt_len = sizeof(tmp_type50_msg->crb2.q);
- dp_tgt = tmp_type50_msg->crb2.dp;
- dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp);
- dq_tgt = tmp_type50_msg->crb2.dq;
- dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq);
- u_tgt = tmp_type50_msg->crb2.u;
- u_tgt_len = sizeof(tmp_type50_msg->crb2.u);
- inp_tgt = tmp_type50_msg->crb2.message;
- inp_tgt_len = sizeof(tmp_type50_msg->crb2.message);
- }
-
- p_tgt += (p_tgt_len - long_len);
- if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len))
- return SEN_RELEASED;
- if (is_empty(p_tgt, long_len))
- return SEN_USER_ERROR;
- q_tgt += (q_tgt_len - short_len);
- if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
- return SEN_RELEASED;
- if (is_empty(q_tgt, short_len))
- return SEN_USER_ERROR;
- dp_tgt += (dp_tgt_len - long_len);
- if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len))
- return SEN_RELEASED;
- if (is_empty(dp_tgt, long_len))
- return SEN_USER_ERROR;
- dq_tgt += (dq_tgt_len - short_len);
- if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
- return SEN_RELEASED;
- if (is_empty(dq_tgt, short_len))
- return SEN_USER_ERROR;
- u_tgt += (u_tgt_len - long_len);
- if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len))
- return SEN_RELEASED;
- if (is_empty(u_tgt, long_len))
- return SEN_USER_ERROR;
- inp_tgt += (inp_tgt_len - mod_len);
- if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
- return SEN_RELEASED;
- if (is_empty(inp_tgt, mod_len))
- return SEN_USER_ERROR;
-
- *z90cMsg_l_p = tmp_size - CALLER_HEADER;
-
- return 0;
-}
-
-int
-convert_request(unsigned char *buffer, int func, unsigned short function,
- int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
-{
- if (dev_type == PCICA) {
- if (func == ICARSACRT)
- return ICACRT_msg_to_type4CRT_msg(
- (struct ica_rsa_modexpo_crt *) buffer,
- msg_l_p, (union type4_msg *) msg_p);
- else
- return ICAMEX_msg_to_type4MEX_msg(
- (struct ica_rsa_modexpo *) buffer,
- msg_l_p, (union type4_msg *) msg_p);
- }
- if (dev_type == PCICC) {
- if (func == ICARSACRT)
- return ICACRT_msg_to_type6CRT_msg(
- (struct ica_rsa_modexpo_crt *) buffer,
- cdx, msg_l_p, (struct type6_msg *)msg_p);
- if (function == PCI_FUNC_KEY_ENCRYPT)
- return ICAMEX_msg_to_type6MEX_en_msg(
- (struct ica_rsa_modexpo *) buffer,
- cdx, msg_l_p, (struct type6_msg *) msg_p);
- else
- return ICAMEX_msg_to_type6MEX_de_msg(
- (struct ica_rsa_modexpo *) buffer,
- cdx, msg_l_p, (struct type6_msg *) msg_p);
- }
- if ((dev_type == PCIXCC_MCL2) ||
- (dev_type == PCIXCC_MCL3) ||
- (dev_type == CEX2C)) {
- if (func == ICARSACRT)
- return ICACRT_msg_to_type6CRT_msgX(
- (struct ica_rsa_modexpo_crt *) buffer,
- cdx, msg_l_p, (struct type6_msg *) msg_p,
- dev_type);
- else
- return ICAMEX_msg_to_type6MEX_msgX(
- (struct ica_rsa_modexpo *) buffer,
- cdx, msg_l_p, (struct type6_msg *) msg_p,
- dev_type);
- }
- if (dev_type == CEX2A) {
- if (func == ICARSACRT)
- return ICACRT_msg_to_type50CRT_msg(
- (struct ica_rsa_modexpo_crt *) buffer,
- msg_l_p, (union type50_msg *) msg_p);
- else
- return ICAMEX_msg_to_type50MEX_msg(
- (struct ica_rsa_modexpo *) buffer,
- msg_l_p, (union type50_msg *) msg_p);
- }
-
- return 0;
-}
-
-int ext_bitlens_msg_count = 0;
-static inline void
-unset_ext_bitlens(void)
-{
- if (!ext_bitlens_msg_count) {
- PRINTK("Unable to use coprocessors for extended bitlengths. "
- "Using PCICAs/CEX2As (if present) for extended "
- "bitlengths. This is not an error.\n");
- ext_bitlens_msg_count++;
- }
- ext_bitlens = 0;
-}
-
-int
-convert_response(unsigned char *response, unsigned char *buffer,
- int *respbufflen_p, unsigned char *resp_buff)
-{
- struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
- struct error_hdr *errh_p = (struct error_hdr *) response;
- struct type80_hdr *t80h_p = (struct type80_hdr *) response;
- struct type84_hdr *t84h_p = (struct type84_hdr *) response;
- struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
- int reply_code, service_rc, service_rs, src_l;
- unsigned char *src_p, *tgt_p;
- struct CPRB *cprb_p;
- struct CPRBX *cprbx_p;
-
- src_p = 0;
- reply_code = 0;
- service_rc = 0;
- service_rs = 0;
- src_l = 0;
- switch (errh_p->type) {
- case TYPE82_RSP_CODE:
- case TYPE88_RSP_CODE:
- reply_code = errh_p->reply_code;
- src_p = (unsigned char *)errh_p;
- PRINTK("Hardware error: Type %02X Message Header: "
- "%02x%02x%02x%02x%02x%02x%02x%02x\n",
- errh_p->type,
- src_p[0], src_p[1], src_p[2], src_p[3],
- src_p[4], src_p[5], src_p[6], src_p[7]);
- break;
- case TYPE80_RSP_CODE:
- src_l = icaMsg_p->outputdatalength;
- src_p = response + (int)t80h_p->len - src_l;
- break;
- case TYPE84_RSP_CODE:
- src_l = icaMsg_p->outputdatalength;
- src_p = response + (int)t84h_p->len - src_l;
- break;
- case TYPE86_RSP_CODE:
- reply_code = t86m_p->header.reply_code;
- if (reply_code != 0)
- break;
- cprb_p = (struct CPRB *)
- (response + sizeof(struct type86_fmt2_msg));
- cprbx_p = (struct CPRBX *) cprb_p;
- if (cprb_p->cprb_ver_id != 0x02) {
- le2toI(cprb_p->ccp_rtcode, &service_rc);
- if (service_rc != 0) {
- le2toI(cprb_p->ccp_rscode, &service_rs);
- if ((service_rc == 8) && (service_rs == 66))
- PDEBUG("Bad block format on PCICC\n");
- else if ((service_rc == 8) && (service_rs == 65))
- PDEBUG("Probably an even modulus on "
- "PCICC\n");
- else if ((service_rc == 8) && (service_rs == 770)) {
- PDEBUG("Invalid key length on PCICC\n");
- unset_ext_bitlens();
- return REC_USE_PCICA;
- }
- else if ((service_rc == 8) && (service_rs == 783)) {
- PDEBUG("Extended bitlengths not enabled"
- "on PCICC\n");
- unset_ext_bitlens();
- return REC_USE_PCICA;
- }
- else
- PRINTK("service rc/rs (PCICC): %d/%d\n",
- service_rc, service_rs);
- return REC_OPERAND_INV;
- }
- src_p = (unsigned char *)cprb_p + sizeof(struct CPRB);
- src_p += 4;
- le2toI(src_p, &src_l);
- src_l -= 2;
- src_p += 2;
- } else {
- service_rc = (int)cprbx_p->ccp_rtcode;
- if (service_rc != 0) {
- service_rs = (int) cprbx_p->ccp_rscode;
- if ((service_rc == 8) && (service_rs == 66))
- PDEBUG("Bad block format on PCIXCC\n");
- else if ((service_rc == 8) && (service_rs == 65))
- PDEBUG("Probably an even modulus on "
- "PCIXCC\n");
- else if ((service_rc == 8) && (service_rs == 770)) {
- PDEBUG("Invalid key length on PCIXCC\n");
- unset_ext_bitlens();
- return REC_USE_PCICA;
- }
- else if ((service_rc == 8) && (service_rs == 783)) {
- PDEBUG("Extended bitlengths not enabled"
- "on PCIXCC\n");
- unset_ext_bitlens();
- return REC_USE_PCICA;
- }
- else
- PRINTK("service rc/rs (PCIXCC): %d/%d\n",
- service_rc, service_rs);
- return REC_OPERAND_INV;
- }
- src_p = (unsigned char *)
- cprbx_p + sizeof(struct CPRBX);
- src_p += 4;
- src_l = (int)(*((short *) src_p));
- src_l -= 2;
- src_p += 2;
- }
- break;
- default:
- src_p = (unsigned char *)errh_p;
- PRINTK("Unrecognized Message Header: "
- "%02x%02x%02x%02x%02x%02x%02x%02x\n",
- src_p[0], src_p[1], src_p[2], src_p[3],
- src_p[4], src_p[5], src_p[6], src_p[7]);
- return REC_BAD_MESSAGE;
- }
-
- if (reply_code)
- switch (reply_code) {
- case REP82_ERROR_MACHINE_FAILURE:
- if (errh_p->type == TYPE82_RSP_CODE)
- PRINTKW("Machine check failure\n");
- else
- PRINTKW("Module failure\n");
- return REC_HARDWAR_ERR;
- case REP82_ERROR_OPERAND_INVALID:
- return REC_OPERAND_INV;
- case REP88_ERROR_MESSAGE_MALFORMD:
- PRINTKW("Message malformed\n");
- return REC_OPERAND_INV;
- case REP82_ERROR_OPERAND_SIZE:
- return REC_OPERAND_SIZE;
- case REP82_ERROR_EVEN_MOD_IN_OPND:
- return REC_EVEN_MOD;
- case REP82_ERROR_MESSAGE_TYPE:
- return WRONG_DEVICE_TYPE;
- case REP82_ERROR_TRANSPORT_FAIL:
- PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n",
- t86m_p->apfs[0], t86m_p->apfs[1],
- t86m_p->apfs[2], t86m_p->apfs[3]);
- return REC_HARDWAR_ERR;
- default:
- PRINTKW("reply code = %d\n", reply_code);
- return REC_HARDWAR_ERR;
- }
-
- if (service_rc != 0)
- return REC_OPERAND_INV;
-
- if ((src_l > icaMsg_p->outputdatalength) ||
- (src_l > RESPBUFFSIZE) ||
- (src_l <= 0))
- return REC_OPERAND_SIZE;
-
- PDEBUG("Length returned = %d\n", src_l);
- tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l;
- memcpy(tgt_p, src_p, src_l);
- if ((errh_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) {
- memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l);
- if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l))
- return REC_INVALID_PAD;
- }
- *respbufflen_p = icaMsg_p->outputdatalength;
- if (*respbufflen_p == 0)
- PRINTK("Zero *respbufflen_p\n");
-
- return 0;
-}
-
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
deleted file mode 100644
index b2f20ab8431..00000000000
--- a/drivers/s390/crypto/z90main.c
+++ /dev/null
@@ -1,3379 +0,0 @@
-/*
- * linux/drivers/s390/crypto/z90main.c
- *
- * z90crypt 1.3.3
- *
- * Copyright (C) 2001, 2005 IBM Corporation
- * Author(s): Robert Burroughs (burrough@us.ibm.com)
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <asm/uaccess.h> // copy_(from|to)_user
-#include <linux/compat.h>
-#include <linux/compiler.h>
-#include <linux/delay.h> // mdelay
-#include <linux/init.h>
-#include <linux/interrupt.h> // for tasklets
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/proc_fs.h>
-#include <linux/syscalls.h>
-#include "z90crypt.h"
-#include "z90common.h"
-
-/**
- * Defaults that may be modified.
- */
-
-/**
- * You can specify a different minor at compile time.
- */
-#ifndef Z90CRYPT_MINOR
-#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
-#endif
-
-/**
- * You can specify a different domain at compile time or on the insmod
- * command line.
- */
-#ifndef DOMAIN_INDEX
-#define DOMAIN_INDEX -1
-#endif
-
-/**
- * This is the name under which the device is registered in /proc/modules.
- */
-#define REG_NAME "z90crypt"
-
-/**
- * Cleanup should run every CLEANUPTIME seconds and should clean up requests
- * older than CLEANUPTIME seconds in the past.
- */
-#ifndef CLEANUPTIME
-#define CLEANUPTIME 15
-#endif
-
-/**
- * Config should run every CONFIGTIME seconds
- */
-#ifndef CONFIGTIME
-#define CONFIGTIME 30
-#endif
-
-/**
- * The first execution of the config task should take place
- * immediately after initialization
- */
-#ifndef INITIAL_CONFIGTIME
-#define INITIAL_CONFIGTIME 1
-#endif
-
-/**
- * Reader should run every READERTIME milliseconds
- * With the 100Hz patch for s390, z90crypt can lock the system solid while
- * under heavy load. We'll try to avoid that.
- */
-#ifndef READERTIME
-#if HZ > 1000
-#define READERTIME 2
-#else
-#define READERTIME 10
-#endif
-#endif
-
-/**
- * turn long device array index into device pointer
- */
-#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
-
-/**
- * turn short device array index into long device array index
- */
-#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
-
-/**
- * turn short device array index into device pointer
- */
-#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
-
-/**
- * Status for a work-element
- */
-#define STAT_DEFAULT 0x00 // request has not been processed
-
-#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
- // else, device is determined each write
-#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
- // before being sent to the hardware.
-#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
-// 0x20 // UNUSED state
-#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
-#define STAT_NOWORK 0x00 // bits off: no work on any queue
-#define STAT_RDWRMASK 0x30 // mask for bits 5-4
-
-/**
- * Macros to check the status RDWRMASK
- */
-#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
-#define SET_RDWRMASK(statbyte, newval) \
- {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
-
-/**
- * Audit Trail. Progress of a Work element
- * audit[0]: Unless noted otherwise, these bits are all set by the process
- */
-#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
-#define FP_BUFFREQ 0x40 // Low Level buffer requested
-#define FP_BUFFGOT 0x20 // Low Level buffer obtained
-#define FP_SENT 0x10 // Work element sent to a crypto device
- // (may be set by process or by reader task)
-#define FP_PENDING 0x08 // Work element placed on pending queue
- // (may be set by process or by reader task)
-#define FP_REQUEST 0x04 // Work element placed on request queue
-#define FP_ASLEEP 0x02 // Work element about to sleep
-#define FP_AWAKE 0x01 // Work element has been awakened
-
-/**
- * audit[1]: These bits are set by the reader task and/or the cleanup task
- */
-#define FP_NOTPENDING 0x80 // Work element removed from pending queue
-#define FP_AWAKENING 0x40 // Caller about to be awakened
-#define FP_TIMEDOUT 0x20 // Caller timed out
-#define FP_RESPSIZESET 0x10 // Response size copied to work element
-#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
-#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
-#define FP_REMREQUEST 0x02 // Work element removed from request queue
-#define FP_SIGNALED 0x01 // Work element was awakened by a signal
-
-/**
- * audit[2]: unused
- */
-
-/**
- * state of the file handle in private_data.status
- */
-#define STAT_OPEN 0
-#define STAT_CLOSED 1
-
-/**
- * PID() expands to the process ID of the current process
- */
-#define PID() (current->pid)
-
-/**
- * Selected Constants. The number of APs and the number of devices
- */
-#ifndef Z90CRYPT_NUM_APS
-#define Z90CRYPT_NUM_APS 64
-#endif
-#ifndef Z90CRYPT_NUM_DEVS
-#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
-#endif
-
-/**
- * Buffer size for receiving responses. The maximum Response Size
- * is actually the maximum request size, since in an error condition
- * the request itself may be returned unchanged.
- */
-#define MAX_RESPONSE_SIZE 0x0000077C
-
-/**
- * A count and status-byte mask
- */
-struct status {
- int st_count; // # of enabled devices
- int disabled_count; // # of disabled devices
- int user_disabled_count; // # of devices disabled via proc fs
- unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
-};
-
-/**
- * The array of device indexes is a mechanism for fast indexing into
- * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
- * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
- * z90CDeviceIndex[2] is 47.
- */
-struct device_x {
- int device_index[Z90CRYPT_NUM_DEVS];
-};
-
-/**
- * All devices are arranged in a single array: 64 APs
- */
-struct device {
- int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
- // PCIXCC_MCL3, CEX2C, CEX2A
- enum devstat dev_stat; // current device status
- int dev_self_x; // Index in array
- int disabled; // Set when device is in error
- int user_disabled; // Set when device is disabled by user
- int dev_q_depth; // q depth
- unsigned char * dev_resp_p; // Response buffer address
- int dev_resp_l; // Response Buffer length
- int dev_caller_count; // Number of callers
- int dev_total_req_cnt; // # requests for device since load
- struct list_head dev_caller_list; // List of callers
-};
-
-/**
- * There's a struct status and a struct device_x for each device type.
- */
-struct hdware_block {
- struct status hdware_mask;
- struct status type_mask[Z90CRYPT_NUM_TYPES];
- struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
- unsigned char device_type_array[Z90CRYPT_NUM_APS];
-};
-
-/**
- * z90crypt is the topmost data structure in the hierarchy.
- */
-struct z90crypt {
- int max_count; // Nr of possible crypto devices
- struct status mask;
- int q_depth_array[Z90CRYPT_NUM_DEVS];
- int dev_type_array[Z90CRYPT_NUM_DEVS];
- struct device_x overall_device_x; // array device indexes
- struct device * device_p[Z90CRYPT_NUM_DEVS];
- int terminating;
- int domain_established;// TRUE: domain has been found
- int cdx; // Crypto Domain Index
- int len; // Length of this data structure
- struct hdware_block *hdware_info;
-};
-
-/**
- * An array of these structures is pointed to from dev_caller
- * The length of the array depends on the device type. For APs,
- * there are 8.
- *
- * The caller buffer is allocated to the user at OPEN. At WRITE,
- * it contains the request; at READ, the response. The function
- * send_to_crypto_device converts the request to device-dependent
- * form and use the caller's OPEN-allocated buffer for the response.
- *
- * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
- * because that points to it, see the discussion in z90hardware.c.
- * Search for "extended request message block".
- */
-struct caller {
- int caller_buf_l; // length of original request
- unsigned char * caller_buf_p; // Original request on WRITE
- int caller_dev_dep_req_l; // len device dependent request
- unsigned char * caller_dev_dep_req_p; // Device dependent form
- unsigned char caller_id[8]; // caller-supplied message id
- struct list_head caller_liste;
- unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
-};
-
-/**
- * Function prototypes from z90hardware.c
- */
-enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
- int *dev_type);
-enum devstat reset_device(int deviceNr, int cdx, int resetNr);
-enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
-enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
- unsigned char *resp, unsigned char *psmid);
-int convert_request(unsigned char *buffer, int func, unsigned short function,
- int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
-int convert_response(unsigned char *response, unsigned char *buffer,
- int *respbufflen_p, unsigned char *resp_buff);
-
-/**
- * Low level function prototypes
- */
-static int create_z90crypt(int *cdx_p);
-static int refresh_z90crypt(int *cdx_p);
-static int find_crypto_devices(struct status *deviceMask);
-static int create_crypto_device(int index);
-static int destroy_crypto_device(int index);
-static void destroy_z90crypt(void);
-static int refresh_index_array(struct status *status_str,
- struct device_x *index_array);
-static int probe_device_type(struct device *devPtr);
-static int probe_PCIXCC_type(struct device *devPtr);
-
-/**
- * proc fs definitions
- */
-static struct proc_dir_entry *z90crypt_entry;
-
-/**
- * data structures
- */
-
-/**
- * work_element.opener points back to this structure
- */
-struct priv_data {
- pid_t opener_pid;
- unsigned char status; // 0: open 1: closed
-};
-
-/**
- * A work element is allocated for each request
- */
-struct work_element {
- struct priv_data *priv_data;
- pid_t pid;
- int devindex; // index of device processing this w_e
- // (If request did not specify device,
- // -1 until placed onto a queue)
- int devtype;
- struct list_head liste; // used for requestq and pendingq
- char buffer[128]; // local copy of user request
- int buff_size; // size of the buffer for the request
- char resp_buff[RESPBUFFSIZE];
- int resp_buff_size;
- char __user * resp_addr; // address of response in user space
- unsigned int funccode; // function code of request
- wait_queue_head_t waitq;
- unsigned long requestsent; // time at which the request was sent
- atomic_t alarmrung; // wake-up signal
- unsigned char caller_id[8]; // pid + counter, for this w_e
- unsigned char status[1]; // bits to mark status of the request
- unsigned char audit[3]; // record of work element's progress
- unsigned char * requestptr; // address of request buffer
- int retcode; // return code of request
-};
-
-/**
- * High level function prototypes
- */
-static int z90crypt_open(struct inode *, struct file *);
-static int z90crypt_release(struct inode *, struct file *);
-static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t z90crypt_write(struct file *, const char __user *,
- size_t, loff_t *);
-static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-static void z90crypt_reader_task(unsigned long);
-static void z90crypt_schedule_reader_task(unsigned long);
-static void z90crypt_config_task(unsigned long);
-static void z90crypt_cleanup_task(unsigned long);
-
-static int z90crypt_status(char *, char **, off_t, int, int *, void *);
-static int z90crypt_status_write(struct file *, const char __user *,
- unsigned long, void *);
-
-/**
- * Storage allocated at initialization and used throughout the life of
- * this insmod
- */
-static int domain = DOMAIN_INDEX;
-static struct z90crypt z90crypt;
-static int quiesce_z90crypt;
-static spinlock_t queuespinlock;
-static struct list_head request_list;
-static int requestq_count;
-static struct list_head pending_list;
-static int pendingq_count;
-
-static struct tasklet_struct reader_tasklet;
-static struct timer_list reader_timer;
-static struct timer_list config_timer;
-static struct timer_list cleanup_timer;
-static atomic_t total_open;
-static atomic_t z90crypt_step;
-
-static struct file_operations z90crypt_fops = {
- .owner = THIS_MODULE,
- .read = z90crypt_read,
- .write = z90crypt_write,
- .unlocked_ioctl = z90crypt_unlocked_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = z90crypt_compat_ioctl,
-#endif
- .open = z90crypt_open,
- .release = z90crypt_release
-};
-
-static struct miscdevice z90crypt_misc_device = {
- .minor = Z90CRYPT_MINOR,
- .name = DEV_NAME,
- .fops = &z90crypt_fops,
-};
-
-/**
- * Documentation values.
- */
-MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
- "and Jochen Roehrig");
-MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2005 IBM Corporation");
-MODULE_LICENSE("GPL");
-module_param(domain, int, 0);
-MODULE_PARM_DESC(domain, "domain index for device");
-
-#ifdef CONFIG_COMPAT
-/**
- * ioctl32 conversion routines
- */
-struct ica_rsa_modexpo_32 { // For 32-bit callers
- compat_uptr_t inputdata;
- unsigned int inputdatalength;
- compat_uptr_t outputdata;
- unsigned int outputdatalength;
- compat_uptr_t b_key;
- compat_uptr_t n_modulus;
-};
-
-static long
-trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
- struct ica_rsa_modexpo_32 mex32k;
- struct ica_rsa_modexpo __user *mex64;
- long ret = 0;
- unsigned int i;
-
- if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
- return -EFAULT;
- mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
- if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
- return -EFAULT;
- if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
- return -EFAULT;
- if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
- __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
- __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
- __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
- __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
- __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
- return -EFAULT;
- ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
- if (!ret)
- if (__get_user(i, &mex64->outputdatalength) ||
- __put_user(i, &mex32u->outputdatalength))
- ret = -EFAULT;
- return ret;
-}
-
-struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
- compat_uptr_t inputdata;
- unsigned int inputdatalength;
- compat_uptr_t outputdata;
- unsigned int outputdatalength;
- compat_uptr_t bp_key;
- compat_uptr_t bq_key;
- compat_uptr_t np_prime;
- compat_uptr_t nq_prime;
- compat_uptr_t u_mult_inv;
-};
-
-static long
-trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
- struct ica_rsa_modexpo_crt_32 crt32k;
- struct ica_rsa_modexpo_crt __user *crt64;
- long ret = 0;
- unsigned int i;
-
- if (!access_ok(VERIFY_WRITE, crt32u,
- sizeof(struct ica_rsa_modexpo_crt_32)))
- return -EFAULT;
- crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
- if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
- return -EFAULT;
- if (copy_from_user(&crt32k, crt32u,
- sizeof(struct ica_rsa_modexpo_crt_32)))
- return -EFAULT;
- if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
- __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
- __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
- __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
- __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
- __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
- __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
- __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
- __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
- return -EFAULT;
- ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
- if (!ret)
- if (__get_user(i, &crt64->outputdatalength) ||
- __put_user(i, &crt32u->outputdatalength))
- ret = -EFAULT;
- return ret;
-}
-
-static long
-z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case ICAZ90STATUS:
- case Z90QUIESCE:
- case Z90STAT_TOTALCOUNT:
- case Z90STAT_PCICACOUNT:
- case Z90STAT_PCICCCOUNT:
- case Z90STAT_PCIXCCCOUNT:
- case Z90STAT_PCIXCCMCL2COUNT:
- case Z90STAT_PCIXCCMCL3COUNT:
- case Z90STAT_CEX2CCOUNT:
- case Z90STAT_REQUESTQ_COUNT:
- case Z90STAT_PENDINGQ_COUNT:
- case Z90STAT_TOTALOPEN_COUNT:
- case Z90STAT_DOMAIN_INDEX:
- case Z90STAT_STATUS_MASK:
- case Z90STAT_QDEPTH_MASK:
- case Z90STAT_PERDEV_REQCNT:
- return z90crypt_unlocked_ioctl(filp, cmd, arg);
- case ICARSAMODEXPO:
- return trans_modexpo32(filp, cmd, arg);
- case ICARSACRT:
- return trans_modexpo_crt32(filp, cmd, arg);
- default:
- return -ENOIOCTLCMD;
- }
-}
-#endif
-
-/**
- * The module initialization code.
- */
-static int __init
-z90crypt_init_module(void)
-{
- int result, nresult;
- struct proc_dir_entry *entry;
-
- PDEBUG("PID %d\n", PID());
-
- if ((domain < -1) || (domain > 15)) {
- PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
- return -EINVAL;
- }
-
- /* Register as misc device with given minor (or get a dynamic one). */
- result = misc_register(&z90crypt_misc_device);
- if (result < 0) {
- PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
- z90crypt_misc_device.minor, result);
- return result;
- }
-
- PDEBUG("Registered " DEV_NAME " with result %d\n", result);
-
- result = create_z90crypt(&domain);
- if (result != 0) {
- PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
- domain, result);
- result = -ENOMEM;
- goto init_module_cleanup;
- }
-
- if (result == 0) {
- PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
- z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
- __DATE__, __TIME__);
- PDEBUG("create_z90crypt (domain index %d) successful.\n",
- domain);
- } else
- PRINTK("No devices at startup\n");
-
- /* Initialize globals. */
- spin_lock_init(&queuespinlock);
-
- INIT_LIST_HEAD(&pending_list);
- pendingq_count = 0;
-
- INIT_LIST_HEAD(&request_list);
- requestq_count = 0;
-
- quiesce_z90crypt = 0;
-
- atomic_set(&total_open, 0);
- atomic_set(&z90crypt_step, 0);
-
- /* Set up the cleanup task. */
- init_timer(&cleanup_timer);
- cleanup_timer.function = z90crypt_cleanup_task;
- cleanup_timer.data = 0;
- cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
- add_timer(&cleanup_timer);
-
- /* Set up the proc file system */
- entry = create_proc_entry("driver/z90crypt", 0644, 0);
- if (entry) {
- entry->nlink = 1;
- entry->data = 0;
- entry->read_proc = z90crypt_status;
- entry->write_proc = z90crypt_status_write;
- }
- else
- PRINTK("Couldn't create z90crypt proc entry\n");
- z90crypt_entry = entry;
-
- /* Set up the configuration task. */
- init_timer(&config_timer);
- config_timer.function = z90crypt_config_task;
- config_timer.data = 0;
- config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
- add_timer(&config_timer);
-
- /* Set up the reader task */
- tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
- init_timer(&reader_timer);
- reader_timer.function = z90crypt_schedule_reader_task;
- reader_timer.data = 0;
- reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
- add_timer(&reader_timer);
-
- return 0; // success
-
-init_module_cleanup:
- if ((nresult = misc_deregister(&z90crypt_misc_device)))
- PRINTK("misc_deregister failed with %d.\n", nresult);
- else
- PDEBUG("misc_deregister successful.\n");
-
- return result; // failure
-}
-
-/**
- * The module termination code
- */
-static void __exit
-z90crypt_cleanup_module(void)
-{
- int nresult;
-
- PDEBUG("PID %d\n", PID());
-
- remove_proc_entry("driver/z90crypt", 0);
-
- if ((nresult = misc_deregister(&z90crypt_misc_device)))
- PRINTK("misc_deregister failed with %d.\n", nresult);
- else
- PDEBUG("misc_deregister successful.\n");
-
- /* Remove the tasks */
- tasklet_kill(&reader_tasklet);
- del_timer(&reader_timer);
- del_timer(&config_timer);
- del_timer(&cleanup_timer);
-
- destroy_z90crypt();
-
- PRINTKN("Unloaded.\n");
-}
-
-/**
- * Functions running under a process id
- *
- * The I/O functions:
- * z90crypt_open
- * z90crypt_release
- * z90crypt_read
- * z90crypt_write
- * z90crypt_unlocked_ioctl
- * z90crypt_status
- * z90crypt_status_write
- * disable_card
- * enable_card
- *
- * Helper functions:
- * z90crypt_rsa
- * z90crypt_prepare
- * z90crypt_send
- * z90crypt_process_results
- *
- */
-static int
-z90crypt_open(struct inode *inode, struct file *filp)
-{
- struct priv_data *private_data_p;
-
- if (quiesce_z90crypt)
- return -EQUIESCE;
-
- private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL);
- if (!private_data_p) {
- PRINTK("Memory allocate failed\n");
- return -ENOMEM;
- }
-
- private_data_p->status = STAT_OPEN;
- private_data_p->opener_pid = PID();
- filp->private_data = private_data_p;
- atomic_inc(&total_open);
-
- return 0;
-}
-
-static int
-z90crypt_release(struct inode *inode, struct file *filp)
-{
- struct priv_data *private_data_p = filp->private_data;
-
- PDEBUG("PID %d (filp %p)\n", PID(), filp);
-
- private_data_p->status = STAT_CLOSED;
- memset(private_data_p, 0, sizeof(struct priv_data));
- kfree(private_data_p);
- atomic_dec(&total_open);
-
- return 0;
-}
-
-/*
- * there are two read functions, of which compile options will choose one
- * without USE_GET_RANDOM_BYTES
- * => read() always returns -EPERM;
- * otherwise
- * => read() uses get_random_bytes() kernel function
- */
-#ifndef USE_GET_RANDOM_BYTES
-/**
- * z90crypt_read will not be supported beyond z90crypt 1.3.1
- */
-static ssize_t
-z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
-{
- PDEBUG("filp %p (PID %d)\n", filp, PID());
- return -EPERM;
-}
-#else // we want to use get_random_bytes
-/**
- * read() just returns a string of random bytes. Since we have no way
- * to generate these cryptographically, we just execute get_random_bytes
- * for the length specified.
- */
-#include <linux/random.h>
-static ssize_t
-z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
-{
- unsigned char *temp_buff;
-
- PDEBUG("filp %p (PID %d)\n", filp, PID());
-
- if (quiesce_z90crypt)
- return -EQUIESCE;
- if (count < 0) {
- PRINTK("Requested random byte count negative: %ld\n", count);
- return -EINVAL;
- }
- if (count > RESPBUFFSIZE) {
- PDEBUG("count[%d] > RESPBUFFSIZE", count);
- return -EINVAL;
- }
- if (count == 0)
- return 0;
- temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
- if (!temp_buff) {
- PRINTK("Memory allocate failed\n");
- return -ENOMEM;
- }
- get_random_bytes(temp_buff, count);
-
- if (copy_to_user(buf, temp_buff, count) != 0) {
- kfree(temp_buff);
- return -EFAULT;
- }
- kfree(temp_buff);
- return count;
-}
-#endif
-
-/**
- * Write is is not allowed
- */
-static ssize_t
-z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
-{
- PDEBUG("filp %p (PID %d)\n", filp, PID());
- return -EPERM;
-}
-
-/**
- * New status functions
- */
-static inline int
-get_status_totalcount(void)
-{
- return z90crypt.hdware_info->hdware_mask.st_count;
-}
-
-static inline int
-get_status_PCICAcount(void)
-{
- return z90crypt.hdware_info->type_mask[PCICA].st_count;
-}
-
-static inline int
-get_status_PCICCcount(void)
-{
- return z90crypt.hdware_info->type_mask[PCICC].st_count;
-}
-
-static inline int
-get_status_PCIXCCcount(void)
-{
- return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
- z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
-}
-
-static inline int
-get_status_PCIXCCMCL2count(void)
-{
- return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
-}
-
-static inline int
-get_status_PCIXCCMCL3count(void)
-{
- return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
-}
-
-static inline int
-get_status_CEX2Ccount(void)
-{
- return z90crypt.hdware_info->type_mask[CEX2C].st_count;
-}
-
-static inline int
-get_status_CEX2Acount(void)
-{
- return z90crypt.hdware_info->type_mask[CEX2A].st_count;
-}
-
-static inline int
-get_status_requestq_count(void)
-{
- return requestq_count;
-}
-
-static inline int
-get_status_pendingq_count(void)
-{
- return pendingq_count;
-}
-
-static inline int
-get_status_totalopen_count(void)
-{
- return atomic_read(&total_open);
-}
-
-static inline int
-get_status_domain_index(void)
-{
- return z90crypt.cdx;
-}
-
-static inline unsigned char *
-get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
-{
- int i, ix;
-
- memcpy(status, z90crypt.hdware_info->device_type_array,
- Z90CRYPT_NUM_APS);
-
- for (i = 0; i < get_status_totalcount(); i++) {
- ix = SHRT2LONG(i);
- if (LONG2DEVPTR(ix)->user_disabled)
- status[ix] = 0x0d;
- }
-
- return status;
-}
-
-static inline unsigned char *
-get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
-{
- int i, ix;
-
- memset(qdepth, 0, Z90CRYPT_NUM_APS);
-
- for (i = 0; i < get_status_totalcount(); i++) {
- ix = SHRT2LONG(i);
- qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
- }
-
- return qdepth;
-}
-
-static inline unsigned int *
-get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
-{
- int i, ix;
-
- memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
-
- for (i = 0; i < get_status_totalcount(); i++) {
- ix = SHRT2LONG(i);
- reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
- }
-
- return reqcnt;
-}
-
-static inline void
-init_work_element(struct work_element *we_p,
- struct priv_data *priv_data, pid_t pid)
-{
- int step;
-
- we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
- /* Come up with a unique id for this caller. */
- step = atomic_inc_return(&z90crypt_step);
- memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
- memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
- we_p->pid = pid;
- we_p->priv_data = priv_data;
- we_p->status[0] = STAT_DEFAULT;
- we_p->audit[0] = 0x00;
- we_p->audit[1] = 0x00;
- we_p->audit[2] = 0x00;
- we_p->resp_buff_size = 0;
- we_p->retcode = 0;
- we_p->devindex = -1;
- we_p->devtype = -1;
- atomic_set(&we_p->alarmrung, 0);
- init_waitqueue_head(&we_p->waitq);
- INIT_LIST_HEAD(&(we_p->liste));
-}
-
-static inline int
-allocate_work_element(struct work_element **we_pp,
- struct priv_data *priv_data_p, pid_t pid)
-{
- struct work_element *we_p;
-
- we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
- if (!we_p)
- return -ENOMEM;
- init_work_element(we_p, priv_data_p, pid);
- *we_pp = we_p;
- return 0;
-}
-
-static inline void
-remove_device(struct device *device_p)
-{
- if (!device_p || (device_p->disabled != 0))
- return;
- device_p->disabled = 1;
- z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
- z90crypt.hdware_info->hdware_mask.disabled_count++;
-}
-
-/**
- * Bitlength limits for each card
- *
- * There are new MCLs which allow more bitlengths. See the table for details.
- * The MCL must be applied and the newer bitlengths enabled for these to work.
- *
- * Card Type Old limit New limit
- * PCICA ??-2048 same (the lower limit is less than 128 bit...)
- * PCICC 512-1024 512-2048
- * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
- * PCIXCC_MCL3 ----- 128-2048
- * CEX2C 512-2048 128-2048
- * CEX2A ??-2048 same (the lower limit is less than 128 bit...)
- *
- * ext_bitlens (extended bitlengths) is a global, since you should not apply an
- * MCL to just one card in a machine. We assume, at first, that all cards have
- * these capabilities.
- */
-int ext_bitlens = 1; // This is global
-#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
-#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
-#define PCICC_MIN_MOD_SIZE 64 // 512 bits
-#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
-#define MAX_MOD_SIZE 256 // 2048 bits
-
-static inline int
-select_device_type(int *dev_type_p, int bytelength)
-{
- static int count = 0;
- int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
- index_to_use;
- struct status *stat;
- if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
- (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
- (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
- (*dev_type_p != ANYDEV))
- return -1;
- if (*dev_type_p != ANYDEV) {
- stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
- if (stat->st_count >
- (stat->disabled_count + stat->user_disabled_count))
- return 0;
- return -1;
- }
-
- /**
- * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
- * speed.
- *
- * PCICA and CEX2A do NOT co-exist, so it would be either one or the
- * other present.
- */
- stat = &z90crypt.hdware_info->type_mask[PCICA];
- PCICA_avail = stat->st_count -
- (stat->disabled_count + stat->user_disabled_count);
- stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
- PCIXCC_MCL3_avail = stat->st_count -
- (stat->disabled_count + stat->user_disabled_count);
- stat = &z90crypt.hdware_info->type_mask[CEX2C];
- CEX2C_avail = stat->st_count -
- (stat->disabled_count + stat->user_disabled_count);
- stat = &z90crypt.hdware_info->type_mask[CEX2A];
- CEX2A_avail = stat->st_count -
- (stat->disabled_count + stat->user_disabled_count);
- if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
- /**
- * bitlength is a factor, PCICA or CEX2A are the most capable,
- * even with the new MCL for PCIXCC.
- */
- if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
- (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
- if (PCICA_avail) {
- *dev_type_p = PCICA;
- return 0;
- }
- if (CEX2A_avail) {
- *dev_type_p = CEX2A;
- return 0;
- }
- return -1;
- }
-
- index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
- CEX2C_avail + CEX2A_avail);
- if (index_to_use < PCICA_avail)
- *dev_type_p = PCICA;
- else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
- *dev_type_p = PCIXCC_MCL3;
- else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
- CEX2C_avail))
- *dev_type_p = CEX2C;
- else
- *dev_type_p = CEX2A;
- count++;
- return 0;
- }
-
- /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
- if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
- return -1;
- stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
- if (stat->st_count >
- (stat->disabled_count + stat->user_disabled_count)) {
- *dev_type_p = PCIXCC_MCL2;
- return 0;
- }
-
- /**
- * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
- * (if we don't have the MCL applied and the newer bitlengths enabled)
- * cannot go to a PCICC
- */
- if ((bytelength < PCICC_MIN_MOD_SIZE) ||
- (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
- return -1;
- }
- stat = &z90crypt.hdware_info->type_mask[PCICC];
- if (stat->st_count >
- (stat->disabled_count + stat->user_disabled_count)) {
- *dev_type_p = PCICC;
- return 0;
- }
-
- return -1;
-}
-
-/**
- * Try the selected number, then the selected type (can be ANYDEV)
- */
-static inline int
-select_device(int *dev_type_p, int *device_nr_p, int bytelength)
-{
- int i, indx, devTp, low_count, low_indx;
- struct device_x *index_p;
- struct device *dev_ptr;
-
- PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
- if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
- PDEBUG("trying index = %d\n", *device_nr_p);
- dev_ptr = z90crypt.device_p[*device_nr_p];
-
- if (dev_ptr &&
- (dev_ptr->dev_stat != DEV_GONE) &&
- (dev_ptr->disabled == 0) &&
- (dev_ptr->user_disabled == 0)) {
- PDEBUG("selected by number, index = %d\n",
- *device_nr_p);
- *dev_type_p = dev_ptr->dev_type;
- return *device_nr_p;
- }
- }
- *device_nr_p = -1;
- PDEBUG("trying type = %d\n", *dev_type_p);
- devTp = *dev_type_p;
- if (select_device_type(&devTp, bytelength) == -1) {
- PDEBUG("failed to select by type\n");
- return -1;
- }
- PDEBUG("selected type = %d\n", devTp);
- index_p = &z90crypt.hdware_info->type_x_addr[devTp];
- low_count = 0x0000FFFF;
- low_indx = -1;
- for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
- indx = index_p->device_index[i];
- dev_ptr = z90crypt.device_p[indx];
- if (dev_ptr &&
- (dev_ptr->dev_stat != DEV_GONE) &&
- (dev_ptr->disabled == 0) &&
- (dev_ptr->user_disabled == 0) &&
- (devTp == dev_ptr->dev_type) &&
- (low_count > dev_ptr->dev_caller_count)) {
- low_count = dev_ptr->dev_caller_count;
- low_indx = indx;
- }
- }
- *device_nr_p = low_indx;
- return low_indx;
-}
-
-static inline int
-send_to_crypto_device(struct work_element *we_p)
-{
- struct caller *caller_p;
- struct device *device_p;
- int dev_nr;
- int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
-
- if (!we_p->requestptr)
- return SEN_FATAL_ERROR;
- caller_p = (struct caller *)we_p->requestptr;
- dev_nr = we_p->devindex;
- if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
- if (z90crypt.hdware_info->hdware_mask.st_count != 0)
- return SEN_RETRY;
- else
- return SEN_NOT_AVAIL;
- }
- we_p->devindex = dev_nr;
- device_p = z90crypt.device_p[dev_nr];
- if (!device_p)
- return SEN_NOT_AVAIL;
- if (device_p->dev_type != we_p->devtype)
- return SEN_RETRY;
- if (device_p->dev_caller_count >= device_p->dev_q_depth)
- return SEN_QUEUE_FULL;
- PDEBUG("device number prior to send: %d\n", dev_nr);
- switch (send_to_AP(dev_nr, z90crypt.cdx,
- caller_p->caller_dev_dep_req_l,
- caller_p->caller_dev_dep_req_p)) {
- case DEV_SEN_EXCEPTION:
- PRINTKC("Exception during send to device %d\n", dev_nr);
- z90crypt.terminating = 1;
- return SEN_FATAL_ERROR;
- case DEV_GONE:
- PRINTK("Device %d not available\n", dev_nr);
- remove_device(device_p);
- return SEN_NOT_AVAIL;
- case DEV_EMPTY:
- return SEN_NOT_AVAIL;
- case DEV_NO_WORK:
- return SEN_FATAL_ERROR;
- case DEV_BAD_MESSAGE:
- return SEN_USER_ERROR;
- case DEV_QUEUE_FULL:
- return SEN_QUEUE_FULL;
- default:
- case DEV_ONLINE:
- break;
- }
- list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
- device_p->dev_caller_count++;
- return 0;
-}
-
-/**
- * Send puts the user's work on one of two queues:
- * the pending queue if the send was successful
- * the request queue if the send failed because device full or busy
- */
-static inline int
-z90crypt_send(struct work_element *we_p, const char *buf)
-{
- int rv;
-
- PDEBUG("PID %d\n", PID());
-
- if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
- PDEBUG("PID %d tried to send more work but has outstanding "
- "work.\n", PID());
- return -EWORKPEND;
- }
- we_p->devindex = -1; // Reset device number
- spin_lock_irq(&queuespinlock);
- rv = send_to_crypto_device(we_p);
- switch (rv) {
- case 0:
- we_p->requestsent = jiffies;
- we_p->audit[0] |= FP_SENT;
- list_add_tail(&we_p->liste, &pending_list);
- ++pendingq_count;
- we_p->audit[0] |= FP_PENDING;
- break;
- case SEN_BUSY:
- case SEN_QUEUE_FULL:
- rv = 0;
- we_p->devindex = -1; // any device will do
- we_p->requestsent = jiffies;
- list_add_tail(&we_p->liste, &request_list);
- ++requestq_count;
- we_p->audit[0] |= FP_REQUEST;
- break;
- case SEN_RETRY:
- rv = -ERESTARTSYS;
- break;
- case SEN_NOT_AVAIL:
- PRINTK("*** No devices available.\n");
- rv = we_p->retcode = -ENODEV;
- we_p->status[0] |= STAT_FAILED;
- break;
- case REC_OPERAND_INV:
- case REC_OPERAND_SIZE:
- case REC_EVEN_MOD:
- case REC_INVALID_PAD:
- rv = we_p->retcode = -EINVAL;
- we_p->status[0] |= STAT_FAILED;
- break;
- default:
- we_p->retcode = rv;
- we_p->status[0] |= STAT_FAILED;
- break;
- }
- if (rv != -ERESTARTSYS)
- SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
- spin_unlock_irq(&queuespinlock);
- if (rv == 0)
- tasklet_schedule(&reader_tasklet);
- return rv;
-}
-
-/**
- * process_results copies the user's work from kernel space.
- */
-static inline int
-z90crypt_process_results(struct work_element *we_p, char __user *buf)
-{
- int rv;
-
- PDEBUG("we_p %p (PID %d)\n", we_p, PID());
-
- LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
- SET_RDWRMASK(we_p->status[0], STAT_READPEND);
-
- rv = 0;
- if (!we_p->buffer) {
- PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
- we_p, PID());
- rv = -ENOBUFF;
- }
-
- if (!rv)
- if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
- PDEBUG("copy_to_user failed: rv = %d\n", rv);
- rv = -EFAULT;
- }
-
- if (!rv)
- rv = we_p->retcode;
- if (!rv)
- if (we_p->resp_buff_size
- && copy_to_user(we_p->resp_addr, we_p->resp_buff,
- we_p->resp_buff_size))
- rv = -EFAULT;
-
- SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
- return rv;
-}
-
-static unsigned char NULL_psmid[8] =
-{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-/**
- * Used in device configuration functions
- */
-#define MAX_RESET 90
-
-/**
- * This is used only for PCICC support
- */
-static inline int
-is_PKCS11_padded(unsigned char *buffer, int length)
-{
- int i;
- if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
- return 0;
- for (i = 2; i < length; i++)
- if (buffer[i] != 0xFF)
- break;
- if ((i < 10) || (i == length))
- return 0;
- if (buffer[i] != 0x00)
- return 0;
- return 1;
-}
-
-/**
- * This is used only for PCICC support
- */
-static inline int
-is_PKCS12_padded(unsigned char *buffer, int length)
-{
- int i;
- if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
- return 0;
- for (i = 2; i < length; i++)
- if (buffer[i] == 0x00)
- break;
- if ((i < 10) || (i == length))
- return 0;
- if (buffer[i] != 0x00)
- return 0;
- return 1;
-}
-
-/**
- * builds struct caller and converts message from generic format to
- * device-dependent format
- * func is ICARSAMODEXPO or ICARSACRT
- * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
- */
-static inline int
-build_caller(struct work_element *we_p, short function)
-{
- int rv;
- struct caller *caller_p = (struct caller *)we_p->requestptr;
-
- if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
- (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
- (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
- return SEN_NOT_AVAIL;
-
- memcpy(caller_p->caller_id, we_p->caller_id,
- sizeof(caller_p->caller_id));
- caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
- caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
- caller_p->caller_buf_p = we_p->buffer;
- INIT_LIST_HEAD(&(caller_p->caller_liste));
-
- rv = convert_request(we_p->buffer, we_p->funccode, function,
- z90crypt.cdx, we_p->devtype,
- &caller_p->caller_dev_dep_req_l,
- caller_p->caller_dev_dep_req_p);
- if (rv) {
- if (rv == SEN_NOT_AVAIL)
- PDEBUG("request can't be processed on hdwr avail\n");
- else
- PRINTK("Error from convert_request: %d\n", rv);
- }
- else
- memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
- return rv;
-}
-
-static inline void
-unbuild_caller(struct device *device_p, struct caller *caller_p)
-{
- if (!caller_p)
- return;
- if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
- if (!list_empty(&caller_p->caller_liste)) {
- list_del_init(&caller_p->caller_liste);
- device_p->dev_caller_count--;
- }
- memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
-}
-
-static inline int
-get_crypto_request_buffer(struct work_element *we_p)
-{
- struct ica_rsa_modexpo *mex_p;
- struct ica_rsa_modexpo_crt *crt_p;
- unsigned char *temp_buffer;
- short function;
- int rv;
-
- mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
- crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
-
- PDEBUG("device type input = %d\n", we_p->devtype);
-
- if (z90crypt.terminating)
- return REC_NO_RESPONSE;
- if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
- PRINTK("psmid zeroes\n");
- return SEN_FATAL_ERROR;
- }
- if (!we_p->buffer) {
- PRINTK("buffer pointer NULL\n");
- return SEN_USER_ERROR;
- }
- if (!we_p->requestptr) {
- PRINTK("caller pointer NULL\n");
- return SEN_USER_ERROR;
- }
-
- if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
- (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
- (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
- (we_p->devtype != ANYDEV)) {
- PRINTK("invalid device type\n");
- return SEN_USER_ERROR;
- }
-
- if ((mex_p->inputdatalength < 1) ||
- (mex_p->inputdatalength > MAX_MOD_SIZE)) {
- PRINTK("inputdatalength[%d] is not valid\n",
- mex_p->inputdatalength);
- return SEN_USER_ERROR;
- }
-
- if (mex_p->outputdatalength < mex_p->inputdatalength) {
- PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
- mex_p->outputdatalength, mex_p->inputdatalength);
- return SEN_USER_ERROR;
- }
-
- if (!mex_p->inputdata || !mex_p->outputdata) {
- PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
- mex_p->outputdata, mex_p->inputdata);
- return SEN_USER_ERROR;
- }
-
- /**
- * As long as outputdatalength is big enough, we can set the
- * outputdatalength equal to the inputdatalength, since that is the
- * number of bytes we will copy in any case
- */
- mex_p->outputdatalength = mex_p->inputdatalength;
-
- rv = 0;
- switch (we_p->funccode) {
- case ICARSAMODEXPO:
- if (!mex_p->b_key || !mex_p->n_modulus)
- rv = SEN_USER_ERROR;
- break;
- case ICARSACRT:
- if (!IS_EVEN(crt_p->inputdatalength)) {
- PRINTK("inputdatalength[%d] is odd, CRT form\n",
- crt_p->inputdatalength);
- rv = SEN_USER_ERROR;
- break;
- }
- if (!crt_p->bp_key ||
- !crt_p->bq_key ||
- !crt_p->np_prime ||
- !crt_p->nq_prime ||
- !crt_p->u_mult_inv) {
- PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
- crt_p->bp_key, crt_p->bq_key,
- crt_p->np_prime, crt_p->nq_prime,
- crt_p->u_mult_inv);
- rv = SEN_USER_ERROR;
- }
- break;
- default:
- PRINTK("bad func = %d\n", we_p->funccode);
- rv = SEN_USER_ERROR;
- break;
- }
- if (rv != 0)
- return rv;
-
- if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
- return SEN_NOT_AVAIL;
-
- temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
- sizeof(struct caller);
- if (copy_from_user(temp_buffer, mex_p->inputdata,
- mex_p->inputdatalength) != 0)
- return SEN_RELEASED;
-
- function = PCI_FUNC_KEY_ENCRYPT;
- switch (we_p->devtype) {
- /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
- case PCICA:
- case CEX2A:
- function = PCI_FUNC_KEY_ENCRYPT;
- break;
- /**
- * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
- * operation, and all CRT forms with a PKCS-1.2 format decrypt.
- * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
- * mod-expo operation
- */
- case PCIXCC_MCL2:
- if (we_p->funccode == ICARSAMODEXPO)
- function = PCI_FUNC_KEY_ENCRYPT;
- else
- function = PCI_FUNC_KEY_DECRYPT;
- break;
- case PCIXCC_MCL3:
- case CEX2C:
- if (we_p->funccode == ICARSAMODEXPO)
- function = PCI_FUNC_KEY_ENCRYPT;
- else
- function = PCI_FUNC_KEY_DECRYPT;
- break;
- /**
- * PCICC does everything as a PKCS-1.2 format request
- */
- case PCICC:
- /* PCICC cannot handle input that is is PKCS#1.1 padded */
- if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
- return SEN_NOT_AVAIL;
- }
- if (we_p->funccode == ICARSAMODEXPO) {
- if (is_PKCS12_padded(temp_buffer,
- mex_p->inputdatalength))
- function = PCI_FUNC_KEY_ENCRYPT;
- else
- function = PCI_FUNC_KEY_DECRYPT;
- } else
- /* all CRT forms are decrypts */
- function = PCI_FUNC_KEY_DECRYPT;
- break;
- }
- PDEBUG("function: %04x\n", function);
- rv = build_caller(we_p, function);
- PDEBUG("rv from build_caller = %d\n", rv);
- return rv;
-}
-
-static inline int
-z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
- const char __user *buffer)
-{
- int rv;
-
- we_p->devindex = -1;
- if (funccode == ICARSAMODEXPO)
- we_p->buff_size = sizeof(struct ica_rsa_modexpo);
- else
- we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
-
- if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
- return -EFAULT;
-
- we_p->audit[0] |= FP_COPYFROM;
- SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
- we_p->funccode = funccode;
- we_p->devtype = -1;
- we_p->audit[0] |= FP_BUFFREQ;
- rv = get_crypto_request_buffer(we_p);
- switch (rv) {
- case 0:
- we_p->audit[0] |= FP_BUFFGOT;
- break;
- case SEN_USER_ERROR:
- rv = -EINVAL;
- break;
- case SEN_QUEUE_FULL:
- rv = 0;
- break;
- case SEN_RELEASED:
- rv = -EFAULT;
- break;
- case REC_NO_RESPONSE:
- rv = -ENODEV;
- break;
- case SEN_NOT_AVAIL:
- case EGETBUFF:
- rv = -EGETBUFF;
- break;
- default:
- PRINTK("rv = %d\n", rv);
- rv = -EGETBUFF;
- break;
- }
- if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
- SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
- return rv;
-}
-
-static inline void
-purge_work_element(struct work_element *we_p)
-{
- struct list_head *lptr;
-
- spin_lock_irq(&queuespinlock);
- list_for_each(lptr, &request_list) {
- if (lptr == &we_p->liste) {
- list_del_init(lptr);
- requestq_count--;
- break;
- }
- }
- list_for_each(lptr, &pending_list) {
- if (lptr == &we_p->liste) {
- list_del_init(lptr);
- pendingq_count--;
- break;
- }
- }
- spin_unlock_irq(&queuespinlock);
-}
-
-/**
- * Build the request and send it.
- */
-static inline int
-z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
- unsigned int cmd, unsigned long arg)
-{
- struct work_element *we_p;
- int rv;
-
- if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
- PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
- return rv;
- }
- if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
- PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
- if (!rv)
- if ((rv = z90crypt_send(we_p, (const char *)arg)))
- PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
- if (!rv) {
- we_p->audit[0] |= FP_ASLEEP;
- wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
- we_p->audit[0] |= FP_AWAKE;
- rv = we_p->retcode;
- }
- if (!rv)
- rv = z90crypt_process_results(we_p, (char __user *)arg);
-
- if ((we_p->status[0] & STAT_FAILED)) {
- switch (rv) {
- /**
- * EINVAL *after* receive is almost always a padding error or
- * length error issued by a coprocessor (not an accelerator).
- * We convert this return value to -EGETBUFF which should
- * trigger a fallback to software.
- */
- case -EINVAL:
- if ((we_p->devtype != PCICA) &&
- (we_p->devtype != CEX2A))
- rv = -EGETBUFF;
- break;
- case -ETIMEOUT:
- if (z90crypt.mask.st_count > 0)
- rv = -ERESTARTSYS; // retry with another
- else
- rv = -ENODEV; // no cards left
- /* fall through to clean up request queue */
- case -ERESTARTSYS:
- case -ERELEASED:
- switch (CHK_RDWRMASK(we_p->status[0])) {
- case STAT_WRITTEN:
- purge_work_element(we_p);
- break;
- case STAT_READPEND:
- case STAT_NOWORK:
- default:
- break;
- }
- break;
- default:
- we_p->status[0] ^= STAT_FAILED;
- break;
- }
- }
- free_page((long)we_p);
- return rv;
-}
-
-/**
- * This function is a little long, but it's really just one large switch
- * statement.
- */
-static long
-z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct priv_data *private_data_p = filp->private_data;
- unsigned char *status;
- unsigned char *qdepth;
- unsigned int *reqcnt;
- struct ica_z90_status *pstat;
- int ret, i, loopLim, tempstat;
- static int deprecated_msg_count1 = 0;
- static int deprecated_msg_count2 = 0;
-
- PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
- PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
- cmd,
- !_IOC_DIR(cmd) ? "NO"
- : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
- : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
- : "WR")),
- _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
-
- if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
- PRINTK("cmd 0x%08X contains bad magic\n", cmd);
- return -ENOTTY;
- }
-
- ret = 0;
- switch (cmd) {
- case ICARSAMODEXPO:
- case ICARSACRT:
- if (quiesce_z90crypt) {
- ret = -EQUIESCE;
- break;
- }
- ret = -ENODEV; // Default if no devices
- loopLim = z90crypt.hdware_info->hdware_mask.st_count -
- (z90crypt.hdware_info->hdware_mask.disabled_count +
- z90crypt.hdware_info->hdware_mask.user_disabled_count);
- for (i = 0; i < loopLim; i++) {
- ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
- if (ret != -ERESTARTSYS)
- break;
- }
- if (ret == -ERESTARTSYS)
- ret = -ENODEV;
- break;
-
- case Z90STAT_TOTALCOUNT:
- tempstat = get_status_totalcount();
- if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_PCICACOUNT:
- tempstat = get_status_PCICAcount();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_PCICCCOUNT:
- tempstat = get_status_PCICCcount();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_PCIXCCMCL2COUNT:
- tempstat = get_status_PCIXCCMCL2count();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_PCIXCCMCL3COUNT:
- tempstat = get_status_PCIXCCMCL3count();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_CEX2CCOUNT:
- tempstat = get_status_CEX2Ccount();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_CEX2ACOUNT:
- tempstat = get_status_CEX2Acount();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_REQUESTQ_COUNT:
- tempstat = get_status_requestq_count();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_PENDINGQ_COUNT:
- tempstat = get_status_pendingq_count();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_TOTALOPEN_COUNT:
- tempstat = get_status_totalopen_count();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_DOMAIN_INDEX:
- tempstat = get_status_domain_index();
- if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90STAT_STATUS_MASK:
- status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
- if (!status) {
- PRINTK("kmalloc for status failed!\n");
- ret = -ENOMEM;
- break;
- }
- get_status_status_mask(status);
- if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
- != 0)
- ret = -EFAULT;
- kfree(status);
- break;
-
- case Z90STAT_QDEPTH_MASK:
- qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
- if (!qdepth) {
- PRINTK("kmalloc for qdepth failed!\n");
- ret = -ENOMEM;
- break;
- }
- get_status_qdepth_mask(qdepth);
- if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
- ret = -EFAULT;
- kfree(qdepth);
- break;
-
- case Z90STAT_PERDEV_REQCNT:
- reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
- if (!reqcnt) {
- PRINTK("kmalloc for reqcnt failed!\n");
- ret = -ENOMEM;
- break;
- }
- get_status_perdevice_reqcnt(reqcnt);
- if (copy_to_user((char __user *) arg, reqcnt,
- Z90CRYPT_NUM_APS * sizeof(int)) != 0)
- ret = -EFAULT;
- kfree(reqcnt);
- break;
-
- /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
- case ICAZ90STATUS:
- if (deprecated_msg_count1 < 20) {
- PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
- deprecated_msg_count1++;
- if (deprecated_msg_count1 == 20)
- PRINTK("No longer issuing messages related to "
- "deprecated call to ICAZ90STATUS.\n");
- }
-
- pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
- if (!pstat) {
- PRINTK("kmalloc for pstat failed!\n");
- ret = -ENOMEM;
- break;
- }
-
- pstat->totalcount = get_status_totalcount();
- pstat->leedslitecount = get_status_PCICAcount();
- pstat->leeds2count = get_status_PCICCcount();
- pstat->requestqWaitCount = get_status_requestq_count();
- pstat->pendingqWaitCount = get_status_pendingq_count();
- pstat->totalOpenCount = get_status_totalopen_count();
- pstat->cryptoDomain = get_status_domain_index();
- get_status_status_mask(pstat->status);
- get_status_qdepth_mask(pstat->qdepth);
-
- if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
- sizeof(struct ica_z90_status)) != 0)
- ret = -EFAULT;
- kfree(pstat);
- break;
-
- /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
- case Z90STAT_PCIXCCCOUNT:
- if (deprecated_msg_count2 < 20) {
- PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
- deprecated_msg_count2++;
- if (deprecated_msg_count2 == 20)
- PRINTK("No longer issuing messages about depre"
- "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
- }
-
- tempstat = get_status_PCIXCCcount();
- if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
- ret = -EFAULT;
- break;
-
- case Z90QUIESCE:
- if (current->euid != 0) {
- PRINTK("QUIESCE fails: euid %d\n",
- current->euid);
- ret = -EACCES;
- } else {
- PRINTK("QUIESCE device from PID %d\n", PID());
- quiesce_z90crypt = 1;
- }
- break;
-
- default:
- /* user passed an invalid IOCTL number */
- PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
- ret = -ENOTTY;
- break;
- }
-
- return ret;
-}
-
-static inline int
-sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
-{
- int hl, i;
-
- hl = 0;
- for (i = 0; i < len; i++)
- hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
- hl += sprintf(outaddr+hl, " ");
-
- return hl;
-}
-
-static inline int
-sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
-{
- int hl, inl, c, cx;
-
- hl = sprintf(outaddr, " ");
- inl = 0;
- for (c = 0; c < (len / 16); c++) {
- hl += sprintcl(outaddr+hl, addr+inl, 16);
- inl += 16;
- }
-
- cx = len%16;
- if (cx) {
- hl += sprintcl(outaddr+hl, addr+inl, cx);
- inl += cx;
- }
-
- hl += sprintf(outaddr+hl, "\n");
-
- return hl;
-}
-
-static inline int
-sprinthx(unsigned char *title, unsigned char *outaddr,
- unsigned char *addr, unsigned int len)
-{
- int hl, inl, r, rx;
-
- hl = sprintf(outaddr, "\n%s\n", title);
- inl = 0;
- for (r = 0; r < (len / 64); r++) {
- hl += sprintrw(outaddr+hl, addr+inl, 64);
- inl += 64;
- }
- rx = len % 64;
- if (rx) {
- hl += sprintrw(outaddr+hl, addr+inl, rx);
- inl += rx;
- }
-
- hl += sprintf(outaddr+hl, "\n");
-
- return hl;
-}
-
-static inline int
-sprinthx4(unsigned char *title, unsigned char *outaddr,
- unsigned int *array, unsigned int len)
-{
- int hl, r;
-
- hl = sprintf(outaddr, "\n%s\n", title);
-
- for (r = 0; r < len; r++) {
- if ((r % 8) == 0)
- hl += sprintf(outaddr+hl, " ");
- hl += sprintf(outaddr+hl, "%08X ", array[r]);
- if ((r % 8) == 7)
- hl += sprintf(outaddr+hl, "\n");
- }
-
- hl += sprintf(outaddr+hl, "\n");
-
- return hl;
-}
-
-static int
-z90crypt_status(char *resp_buff, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- unsigned char *workarea;
- int len;
-
- /* resp_buff is a page. Use the right half for a work area */
- workarea = resp_buff+2000;
- len = 0;
- len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
- z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
- len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
- get_status_domain_index());
- len += sprintf(resp_buff+len, "Total device count: %d\n",
- get_status_totalcount());
- len += sprintf(resp_buff+len, "PCICA count: %d\n",
- get_status_PCICAcount());
- len += sprintf(resp_buff+len, "PCICC count: %d\n",
- get_status_PCICCcount());
- len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
- get_status_PCIXCCMCL2count());
- len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
- get_status_PCIXCCMCL3count());
- len += sprintf(resp_buff+len, "CEX2C count: %d\n",
- get_status_CEX2Ccount());
- len += sprintf(resp_buff+len, "CEX2A count: %d\n",
- get_status_CEX2Acount());
- len += sprintf(resp_buff+len, "requestq count: %d\n",
- get_status_requestq_count());
- len += sprintf(resp_buff+len, "pendingq count: %d\n",
- get_status_pendingq_count());
- len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
- get_status_totalopen_count());
- len += sprinthx(
- "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
- "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
- resp_buff+len,
- get_status_status_mask(workarea),
- Z90CRYPT_NUM_APS);
- len += sprinthx("Waiting work element counts",
- resp_buff+len,
- get_status_qdepth_mask(workarea),
- Z90CRYPT_NUM_APS);
- len += sprinthx4(
- "Per-device successfully completed request counts",
- resp_buff+len,
- get_status_perdevice_reqcnt((unsigned int *)workarea),
- Z90CRYPT_NUM_APS);
- *eof = 1;
- memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
- return len;
-}
-
-static inline void
-disable_card(int card_index)
-{
- struct device *devp;
-
- devp = LONG2DEVPTR(card_index);
- if (!devp || devp->user_disabled)
- return;
- devp->user_disabled = 1;
- z90crypt.hdware_info->hdware_mask.user_disabled_count++;
- if (devp->dev_type == -1)
- return;
- z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
-}
-
-static inline void
-enable_card(int card_index)
-{
- struct device *devp;
-
- devp = LONG2DEVPTR(card_index);
- if (!devp || !devp->user_disabled)
- return;
- devp->user_disabled = 0;
- z90crypt.hdware_info->hdware_mask.user_disabled_count--;
- if (devp->dev_type == -1)
- return;
- z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
-}
-
-static int
-z90crypt_status_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- int j, eol;
- unsigned char *lbuf, *ptr;
- unsigned int local_count;
-
-#define LBUFSIZE 1200
- lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
- if (!lbuf) {
- PRINTK("kmalloc failed!\n");
- return 0;
- }
-
- if (count <= 0)
- return 0;
-
- local_count = UMIN((unsigned int)count, LBUFSIZE-1);
-
- if (copy_from_user(lbuf, buffer, local_count) != 0) {
- kfree(lbuf);
- return -EFAULT;
- }
-
- lbuf[local_count] = '\0';
-
- ptr = strstr(lbuf, "Online devices");
- if (ptr == 0) {
- PRINTK("Unable to parse data (missing \"Online devices\")\n");
- kfree(lbuf);
- return count;
- }
-
- ptr = strstr(ptr, "\n");
- if (ptr == 0) {
- PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
- kfree(lbuf);
- return count;
- }
- ptr++;
-
- if (strstr(ptr, "Waiting work element counts") == NULL) {
- PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
- kfree(lbuf);
- return count;
- }
-
- j = 0;
- eol = 0;
- while ((j < 64) && (*ptr != '\0')) {
- switch (*ptr) {
- case '\t':
- case ' ':
- break;
- case '\n':
- default:
- eol = 1;
- break;
- case '0': // no device
- case '1': // PCICA
- case '2': // PCICC
- case '3': // PCIXCC_MCL2
- case '4': // PCIXCC_MCL3
- case '5': // CEX2C
- case '6': // CEX2A
- j++;
- break;
- case 'd':
- case 'D':
- disable_card(j);
- j++;
- break;
- case 'e':
- case 'E':
- enable_card(j);
- j++;
- break;
- }
- if (eol)
- break;
- ptr++;
- }
-
- kfree(lbuf);
- return count;
-}
-
-/**
- * Functions that run under a timer, with no process id
- *
- * The task functions:
- * z90crypt_reader_task
- * helper_send_work
- * helper_handle_work_element
- * helper_receive_rc
- * z90crypt_config_task
- * z90crypt_cleanup_task
- *
- * Helper functions:
- * z90crypt_schedule_reader_timer
- * z90crypt_schedule_reader_task
- * z90crypt_schedule_config_task
- * z90crypt_schedule_cleanup_task
- */
-static inline int
-receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
- unsigned char *buff, unsigned char __user **dest_p_p)
-{
- int dv, rv;
- struct device *dev_ptr;
- struct caller *caller_p;
- struct ica_rsa_modexpo *icaMsg_p;
- struct list_head *ptr, *tptr;
-
- memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
-
- if (z90crypt.terminating)
- return REC_FATAL_ERROR;
-
- caller_p = 0;
- dev_ptr = z90crypt.device_p[index];
- rv = 0;
- do {
- if (!dev_ptr || dev_ptr->disabled) {
- rv = REC_NO_WORK; // a disabled device can't return work
- break;
- }
- if (dev_ptr->dev_self_x != index) {
- PRINTKC("Corrupt dev ptr\n");
- z90crypt.terminating = 1;
- rv = REC_FATAL_ERROR;
- break;
- }
- if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
- dv = DEV_REC_EXCEPTION;
- PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
- dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
- } else {
- PDEBUG("Dequeue called for device %d\n", index);
- dv = receive_from_AP(index, z90crypt.cdx,
- dev_ptr->dev_resp_l,
- dev_ptr->dev_resp_p, psmid);
- }
- switch (dv) {
- case DEV_REC_EXCEPTION:
- rv = REC_FATAL_ERROR;
- z90crypt.terminating = 1;
- PRINTKC("Exception in receive from device %d\n",
- index);
- break;
- case DEV_ONLINE:
- rv = 0;
- break;
- case DEV_EMPTY:
- rv = REC_EMPTY;
- break;
- case DEV_NO_WORK:
- rv = REC_NO_WORK;
- break;
- case DEV_BAD_MESSAGE:
- case DEV_GONE:
- case REC_HARDWAR_ERR:
- default:
- rv = REC_NO_RESPONSE;
- break;
- }
- if (rv)
- break;
- if (dev_ptr->dev_caller_count <= 0) {
- rv = REC_USER_GONE;
- break;
- }
-
- list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
- caller_p = list_entry(ptr, struct caller, caller_liste);
- if (!memcmp(caller_p->caller_id, psmid,
- sizeof(caller_p->caller_id))) {
- if (!list_empty(&caller_p->caller_liste)) {
- list_del_init(ptr);
- dev_ptr->dev_caller_count--;
- break;
- }
- }
- caller_p = 0;
- }
- if (!caller_p) {
- PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
- "%02X%02X%02X in device list\n",
- psmid[0], psmid[1], psmid[2], psmid[3],
- psmid[4], psmid[5], psmid[6], psmid[7]);
- rv = REC_USER_GONE;
- break;
- }
-
- PDEBUG("caller_p after successful receive: %p\n", caller_p);
- rv = convert_response(dev_ptr->dev_resp_p,
- caller_p->caller_buf_p, buff_len_p, buff);
- switch (rv) {
- case REC_USE_PCICA:
- break;
- case REC_OPERAND_INV:
- case REC_OPERAND_SIZE:
- case REC_EVEN_MOD:
- case REC_INVALID_PAD:
- PDEBUG("device %d: 'user error' %d\n", index, rv);
- break;
- case WRONG_DEVICE_TYPE:
- case REC_HARDWAR_ERR:
- case REC_BAD_MESSAGE:
- PRINTKW("device %d: hardware error %d\n", index, rv);
- rv = REC_NO_RESPONSE;
- break;
- default:
- PDEBUG("device %d: rv = %d\n", index, rv);
- break;
- }
- } while (0);
-
- switch (rv) {
- case 0:
- PDEBUG("Successful receive from device %d\n", index);
- icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
- *dest_p_p = icaMsg_p->outputdata;
- if (*buff_len_p == 0)
- PRINTK("Zero *buff_len_p\n");
- break;
- case REC_NO_RESPONSE:
- PRINTKW("Removing device %d from availability\n", index);
- remove_device(dev_ptr);
- break;
- }
-
- if (caller_p)
- unbuild_caller(dev_ptr, caller_p);
-
- return rv;
-}
-
-static inline void
-helper_send_work(int index)
-{
- struct work_element *rq_p;
- int rv;
-
- if (list_empty(&request_list))
- return;
- requestq_count--;
- rq_p = list_entry(request_list.next, struct work_element, liste);
- list_del_init(&rq_p->liste);
- rq_p->audit[1] |= FP_REMREQUEST;
- if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
- rq_p->devindex = SHRT2LONG(index);
- rv = send_to_crypto_device(rq_p);
- if (rv == 0) {
- rq_p->requestsent = jiffies;
- rq_p->audit[0] |= FP_SENT;
- list_add_tail(&rq_p->liste, &pending_list);
- ++pendingq_count;
- rq_p->audit[0] |= FP_PENDING;
- } else {
- switch (rv) {
- case REC_OPERAND_INV:
- case REC_OPERAND_SIZE:
- case REC_EVEN_MOD:
- case REC_INVALID_PAD:
- rq_p->retcode = -EINVAL;
- break;
- case SEN_NOT_AVAIL:
- case SEN_RETRY:
- case REC_NO_RESPONSE:
- default:
- if (z90crypt.mask.st_count > 1)
- rq_p->retcode =
- -ERESTARTSYS;
- else
- rq_p->retcode = -ENODEV;
- break;
- }
- rq_p->status[0] |= STAT_FAILED;
- rq_p->audit[1] |= FP_AWAKENING;
- atomic_set(&rq_p->alarmrung, 1);
- wake_up(&rq_p->waitq);
- }
- } else {
- if (z90crypt.mask.st_count > 1)
- rq_p->retcode = -ERESTARTSYS;
- else
- rq_p->retcode = -ENODEV;
- rq_p->status[0] |= STAT_FAILED;
- rq_p->audit[1] |= FP_AWAKENING;
- atomic_set(&rq_p->alarmrung, 1);
- wake_up(&rq_p->waitq);
- }
-}
-
-static inline void
-helper_handle_work_element(int index, unsigned char psmid[8], int rc,
- int buff_len, unsigned char *buff,
- unsigned char __user *resp_addr)
-{
- struct work_element *pq_p;
- struct list_head *lptr, *tptr;
-
- pq_p = 0;
- list_for_each_safe(lptr, tptr, &pending_list) {
- pq_p = list_entry(lptr, struct work_element, liste);
- if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
- list_del_init(lptr);
- pendingq_count--;
- pq_p->audit[1] |= FP_NOTPENDING;
- break;
- }
- pq_p = 0;
- }
-
- if (!pq_p) {
- PRINTK("device %d has work but no caller exists on pending Q\n",
- SHRT2LONG(index));
- return;
- }
-
- switch (rc) {
- case 0:
- pq_p->resp_buff_size = buff_len;
- pq_p->audit[1] |= FP_RESPSIZESET;
- if (buff_len) {
- pq_p->resp_addr = resp_addr;
- pq_p->audit[1] |= FP_RESPADDRCOPIED;
- memcpy(pq_p->resp_buff, buff, buff_len);
- pq_p->audit[1] |= FP_RESPBUFFCOPIED;
- }
- break;
- case REC_OPERAND_INV:
- case REC_OPERAND_SIZE:
- case REC_EVEN_MOD:
- case REC_INVALID_PAD:
- PDEBUG("-EINVAL after application error %d\n", rc);
- pq_p->retcode = -EINVAL;
- pq_p->status[0] |= STAT_FAILED;
- break;
- case REC_USE_PCICA:
- pq_p->retcode = -ERESTARTSYS;
- pq_p->status[0] |= STAT_FAILED;
- break;
- case REC_NO_RESPONSE:
- default:
- if (z90crypt.mask.st_count > 1)
- pq_p->retcode = -ERESTARTSYS;
- else
- pq_p->retcode = -ENODEV;
- pq_p->status[0] |= STAT_FAILED;
- break;
- }
- if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
- pq_p->audit[1] |= FP_AWAKENING;
- atomic_set(&pq_p->alarmrung, 1);
- wake_up(&pq_p->waitq);
- }
-}
-
-/**
- * return TRUE if the work element should be removed from the queue
- */
-static inline int
-helper_receive_rc(int index, int *rc_p)
-{
- switch (*rc_p) {
- case 0:
- case REC_OPERAND_INV:
- case REC_OPERAND_SIZE:
- case REC_EVEN_MOD:
- case REC_INVALID_PAD:
- case REC_USE_PCICA:
- break;
-
- case REC_BUSY:
- case REC_NO_WORK:
- case REC_EMPTY:
- case REC_RETRY_DEV:
- case REC_FATAL_ERROR:
- return 0;
-
- case REC_NO_RESPONSE:
- break;
-
- default:
- PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
- *rc_p, SHRT2LONG(index));
- *rc_p = REC_NO_RESPONSE;
- break;
- }
- return 1;
-}
-
-static inline void
-z90crypt_schedule_reader_timer(void)
-{
- if (timer_pending(&reader_timer))
- return;
- if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
- PRINTK("Timer pending while modifying reader timer\n");
-}
-
-static void
-z90crypt_reader_task(unsigned long ptr)
-{
- int workavail, index, rc, buff_len;
- unsigned char psmid[8];
- unsigned char __user *resp_addr;
- static unsigned char buff[1024];
-
- /**
- * we use workavail = 2 to ensure 2 passes with nothing dequeued before
- * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
- * loop, there is no work remaining on the queues.
- */
- resp_addr = 0;
- workavail = 2;
- buff_len = 0;
- while (workavail) {
- workavail--;
- rc = 0;
- spin_lock_irq(&queuespinlock);
- memset(buff, 0x00, sizeof(buff));
-
- /* Dequeue once from each device in round robin. */
- for (index = 0; index < z90crypt.mask.st_count; index++) {
- PDEBUG("About to receive.\n");
- rc = receive_from_crypto_device(SHRT2LONG(index),
- psmid,
- &buff_len,
- buff,
- &resp_addr);
- PDEBUG("Dequeued: rc = %d.\n", rc);
-
- if (helper_receive_rc(index, &rc)) {
- if (rc != REC_NO_RESPONSE) {
- helper_send_work(index);
- workavail = 2;
- }
-
- helper_handle_work_element(index, psmid, rc,
- buff_len, buff,
- resp_addr);
- }
-
- if (rc == REC_FATAL_ERROR)
- PRINTKW("REC_FATAL_ERROR from device %d!\n",
- SHRT2LONG(index));
- }
- spin_unlock_irq(&queuespinlock);
- }
-
- if (pendingq_count + requestq_count)
- z90crypt_schedule_reader_timer();
-}
-
-static inline void
-z90crypt_schedule_config_task(unsigned int expiration)
-{
- if (timer_pending(&config_timer))
- return;
- if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
- PRINTK("Timer pending while modifying config timer\n");
-}
-
-static void
-z90crypt_config_task(unsigned long ptr)
-{
- int rc;
-
- PDEBUG("jiffies %ld\n", jiffies);
-
- if ((rc = refresh_z90crypt(&z90crypt.cdx)))
- PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
- /* If return was fatal, don't bother reconfiguring */
- if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
- z90crypt_schedule_config_task(CONFIGTIME);
-}
-
-static inline void
-z90crypt_schedule_cleanup_task(void)
-{
- if (timer_pending(&cleanup_timer))
- return;
- if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
- PRINTK("Timer pending while modifying cleanup timer\n");
-}
-
-static inline void
-helper_drain_queues(void)
-{
- struct work_element *pq_p;
- struct list_head *lptr, *tptr;
-
- list_for_each_safe(lptr, tptr, &pending_list) {
- pq_p = list_entry(lptr, struct work_element, liste);
- pq_p->retcode = -ENODEV;
- pq_p->status[0] |= STAT_FAILED;
- unbuild_caller(LONG2DEVPTR(pq_p->devindex),
- (struct caller *)pq_p->requestptr);
- list_del_init(lptr);
- pendingq_count--;
- pq_p->audit[1] |= FP_NOTPENDING;
- pq_p->audit[1] |= FP_AWAKENING;
- atomic_set(&pq_p->alarmrung, 1);
- wake_up(&pq_p->waitq);
- }
-
- list_for_each_safe(lptr, tptr, &request_list) {
- pq_p = list_entry(lptr, struct work_element, liste);
- pq_p->retcode = -ENODEV;
- pq_p->status[0] |= STAT_FAILED;
- list_del_init(lptr);
- requestq_count--;
- pq_p->audit[1] |= FP_REMREQUEST;
- pq_p->audit[1] |= FP_AWAKENING;
- atomic_set(&pq_p->alarmrung, 1);
- wake_up(&pq_p->waitq);
- }
-}
-
-static inline void
-helper_timeout_requests(void)
-{
- struct work_element *pq_p;
- struct list_head *lptr, *tptr;
- long timelimit;
-
- timelimit = jiffies - (CLEANUPTIME * HZ);
- /* The list is in strict chronological order */
- list_for_each_safe(lptr, tptr, &pending_list) {
- pq_p = list_entry(lptr, struct work_element, liste);
- if (pq_p->requestsent >= timelimit)
- break;
- PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
- ((struct caller *)pq_p->requestptr)->caller_id[0],
- ((struct caller *)pq_p->requestptr)->caller_id[1],
- ((struct caller *)pq_p->requestptr)->caller_id[2],
- ((struct caller *)pq_p->requestptr)->caller_id[3],
- ((struct caller *)pq_p->requestptr)->caller_id[4],
- ((struct caller *)pq_p->requestptr)->caller_id[5],
- ((struct caller *)pq_p->requestptr)->caller_id[6],
- ((struct caller *)pq_p->requestptr)->caller_id[7]);
- pq_p->retcode = -ETIMEOUT;
- pq_p->status[0] |= STAT_FAILED;
- /* get this off any caller queue it may be on */
- unbuild_caller(LONG2DEVPTR(pq_p->devindex),
- (struct caller *) pq_p->requestptr);
- list_del_init(lptr);
- pendingq_count--;
- pq_p->audit[1] |= FP_TIMEDOUT;
- pq_p->audit[1] |= FP_NOTPENDING;
- pq_p->audit[1] |= FP_AWAKENING;
- atomic_set(&pq_p->alarmrung, 1);
- wake_up(&pq_p->waitq);
- }
-
- /**
- * If pending count is zero, items left on the request queue may
- * never be processed.
- */
- if (pendingq_count <= 0) {
- list_for_each_safe(lptr, tptr, &request_list) {
- pq_p = list_entry(lptr, struct work_element, liste);
- if (pq_p->requestsent >= timelimit)
- break;
- PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
- ((struct caller *)pq_p->requestptr)->caller_id[0],
- ((struct caller *)pq_p->requestptr)->caller_id[1],
- ((struct caller *)pq_p->requestptr)->caller_id[2],
- ((struct caller *)pq_p->requestptr)->caller_id[3],
- ((struct caller *)pq_p->requestptr)->caller_id[4],
- ((struct caller *)pq_p->requestptr)->caller_id[5],
- ((struct caller *)pq_p->requestptr)->caller_id[6],
- ((struct caller *)pq_p->requestptr)->caller_id[7]);
- pq_p->retcode = -ETIMEOUT;
- pq_p->status[0] |= STAT_FAILED;
- list_del_init(lptr);
- requestq_count--;
- pq_p->audit[1] |= FP_TIMEDOUT;
- pq_p->audit[1] |= FP_REMREQUEST;
- pq_p->audit[1] |= FP_AWAKENING;
- atomic_set(&pq_p->alarmrung, 1);
- wake_up(&pq_p->waitq);
- }
- }
-}
-
-static void
-z90crypt_cleanup_task(unsigned long ptr)
-{
- PDEBUG("jiffies %ld\n", jiffies);
- spin_lock_irq(&queuespinlock);
- if (z90crypt.mask.st_count <= 0) // no devices!
- helper_drain_queues();
- else
- helper_timeout_requests();
- spin_unlock_irq(&queuespinlock);
- z90crypt_schedule_cleanup_task();
-}
-
-static void
-z90crypt_schedule_reader_task(unsigned long ptr)
-{
- tasklet_schedule(&reader_tasklet);
-}
-
-/**
- * Lowlevel Functions:
- *
- * create_z90crypt: creates and initializes basic data structures
- * refresh_z90crypt: re-initializes basic data structures
- * find_crypto_devices: returns a count and mask of hardware status
- * create_crypto_device: builds the descriptor for a device
- * destroy_crypto_device: unallocates the descriptor for a device
- * destroy_z90crypt: drains all work, unallocates structs
- */
-
-/**
- * build the z90crypt root structure using the given domain index
- */
-static int
-create_z90crypt(int *cdx_p)
-{
- struct hdware_block *hdware_blk_p;
-
- memset(&z90crypt, 0x00, sizeof(struct z90crypt));
- z90crypt.domain_established = 0;
- z90crypt.len = sizeof(struct z90crypt);
- z90crypt.max_count = Z90CRYPT_NUM_DEVS;
- z90crypt.cdx = *cdx_p;
-
- hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC);
- if (!hdware_blk_p) {
- PDEBUG("kmalloc for hardware block failed\n");
- return ENOMEM;
- }
- z90crypt.hdware_info = hdware_blk_p;
-
- return 0;
-}
-
-static inline int
-helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
-{
- enum hdstat hd_stat;
- int q_depth, dev_type;
- int indx, chkdom, numdomains;
-
- q_depth = dev_type = numdomains = 0;
- for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
- for (indx = 0; indx < z90crypt.max_count; indx++) {
- hd_stat = HD_NOT_THERE;
- numdomains = 0;
- for (chkdom = 0; chkdom <= 15; chkdom++) {
- hd_stat = query_online(indx, chkdom, MAX_RESET,
- &q_depth, &dev_type);
- if (hd_stat == HD_TSQ_EXCEPTION) {
- z90crypt.terminating = 1;
- PRINTKC("exception taken!\n");
- break;
- }
- if (hd_stat == HD_ONLINE) {
- cdx_array[numdomains++] = chkdom;
- if (*cdx_p == chkdom) {
- *correct_cdx_found = 1;
- break;
- }
- }
- }
- if ((*correct_cdx_found == 1) || (numdomains != 0))
- break;
- if (z90crypt.terminating)
- break;
- }
- return numdomains;
-}
-
-static inline int
-probe_crypto_domain(int *cdx_p)
-{
- int cdx_array[16];
- char cdx_array_text[53], temp[5];
- int correct_cdx_found, numdomains;
-
- correct_cdx_found = 0;
- numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
-
- if (z90crypt.terminating)
- return TSQ_FATAL_ERROR;
-
- if (correct_cdx_found)
- return 0;
-
- if (numdomains == 0) {
- PRINTKW("Unable to find crypto domain: No devices found\n");
- return Z90C_NO_DEVICES;
- }
-
- if (numdomains == 1) {
- if (*cdx_p == -1) {
- *cdx_p = cdx_array[0];
- return 0;
- }
- PRINTKW("incorrect domain: specified = %d, found = %d\n",
- *cdx_p, cdx_array[0]);
- return Z90C_INCORRECT_DOMAIN;
- }
-
- numdomains--;
- sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
- while (numdomains) {
- numdomains--;
- sprintf(temp, ", %d", cdx_array[numdomains]);
- strcat(cdx_array_text, temp);
- }
-
- PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
- *cdx_p, cdx_array_text);
- return Z90C_AMBIGUOUS_DOMAIN;
-}
-
-static int
-refresh_z90crypt(int *cdx_p)
-{
- int i, j, indx, rv;
- static struct status local_mask;
- struct device *devPtr;
- unsigned char oldStat, newStat;
- int return_unchanged;
-
- if (z90crypt.len != sizeof(z90crypt))
- return ENOTINIT;
- if (z90crypt.terminating)
- return TSQ_FATAL_ERROR;
- rv = 0;
- if (!z90crypt.hdware_info->hdware_mask.st_count &&
- !z90crypt.domain_established) {
- rv = probe_crypto_domain(cdx_p);
- if (z90crypt.terminating)
- return TSQ_FATAL_ERROR;
- if (rv == Z90C_NO_DEVICES)
- return 0; // try later
- if (rv)
- return rv;
- z90crypt.cdx = *cdx_p;
- z90crypt.domain_established = 1;
- }
- rv = find_crypto_devices(&local_mask);
- if (rv) {
- PRINTK("find crypto devices returned %d\n", rv);
- return rv;
- }
- if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
- sizeof(struct status))) {
- return_unchanged = 1;
- for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
- /**
- * Check for disabled cards. If any device is marked
- * disabled, destroy it.
- */
- for (j = 0;
- j < z90crypt.hdware_info->type_mask[i].st_count;
- j++) {
- indx = z90crypt.hdware_info->type_x_addr[i].
- device_index[j];
- devPtr = z90crypt.device_p[indx];
- if (devPtr && devPtr->disabled) {
- local_mask.st_mask[indx] = HD_NOT_THERE;
- return_unchanged = 0;
- }
- }
- }
- if (return_unchanged == 1)
- return 0;
- }
-
- spin_lock_irq(&queuespinlock);
- for (i = 0; i < z90crypt.max_count; i++) {
- oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
- newStat = local_mask.st_mask[i];
- if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
- destroy_crypto_device(i);
- else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
- rv = create_crypto_device(i);
- if (rv >= REC_FATAL_ERROR)
- return rv;
- if (rv != 0) {
- local_mask.st_mask[i] = HD_NOT_THERE;
- local_mask.st_count--;
- }
- }
- }
- memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
- sizeof(local_mask.st_mask));
- z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
- z90crypt.hdware_info->hdware_mask.disabled_count =
- local_mask.disabled_count;
- refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
- for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
- refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
- &(z90crypt.hdware_info->type_x_addr[i]));
- spin_unlock_irq(&queuespinlock);
-
- return rv;
-}
-
-static int
-find_crypto_devices(struct status *deviceMask)
-{
- int i, q_depth, dev_type;
- enum hdstat hd_stat;
-
- deviceMask->st_count = 0;
- deviceMask->disabled_count = 0;
- deviceMask->user_disabled_count = 0;
-
- for (i = 0; i < z90crypt.max_count; i++) {
- hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
- &dev_type);
- if (hd_stat == HD_TSQ_EXCEPTION) {
- z90crypt.terminating = 1;
- PRINTKC("Exception during probe for crypto devices\n");
- return TSQ_FATAL_ERROR;
- }
- deviceMask->st_mask[i] = hd_stat;
- if (hd_stat == HD_ONLINE) {
- PDEBUG("Got an online crypto!: %d\n", i);
- PDEBUG("Got a queue depth of %d\n", q_depth);
- PDEBUG("Got a device type of %d\n", dev_type);
- if (q_depth <= 0)
- return TSQ_FATAL_ERROR;
- deviceMask->st_count++;
- z90crypt.q_depth_array[i] = q_depth;
- z90crypt.dev_type_array[i] = dev_type;
- }
- }
-
- return 0;
-}
-
-static int
-refresh_index_array(struct status *status_str, struct device_x *index_array)
-{
- int i, count;
- enum devstat stat;
-
- i = -1;
- count = 0;
- do {
- stat = status_str->st_mask[++i];
- if (stat == DEV_ONLINE)
- index_array->device_index[count++] = i;
- } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
-
- return count;
-}
-
-static int
-create_crypto_device(int index)
-{
- int rv, devstat, total_size;
- struct device *dev_ptr;
- struct status *type_str_p;
- int deviceType;
-
- dev_ptr = z90crypt.device_p[index];
- if (!dev_ptr) {
- total_size = sizeof(struct device) +
- z90crypt.q_depth_array[index] * sizeof(int);
-
- dev_ptr = kzalloc(total_size, GFP_ATOMIC);
- if (!dev_ptr) {
- PRINTK("kmalloc device %d failed\n", index);
- return ENOMEM;
- }
- dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
- if (!dev_ptr->dev_resp_p) {
- kfree(dev_ptr);
- PRINTK("kmalloc device %d rec buffer failed\n", index);
- return ENOMEM;
- }
- dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
- INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
- }
-
- devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
- if (devstat == DEV_RSQ_EXCEPTION) {
- PRINTK("exception during reset device %d\n", index);
- kfree(dev_ptr->dev_resp_p);
- kfree(dev_ptr);
- return RSQ_FATAL_ERROR;
- }
- if (devstat == DEV_ONLINE) {
- dev_ptr->dev_self_x = index;
- dev_ptr->dev_type = z90crypt.dev_type_array[index];
- if (dev_ptr->dev_type == NILDEV) {
- rv = probe_device_type(dev_ptr);
- if (rv) {
- PRINTK("rv = %d from probe_device_type %d\n",
- rv, index);
- kfree(dev_ptr->dev_resp_p);
- kfree(dev_ptr);
- return rv;
- }
- }
- if (dev_ptr->dev_type == PCIXCC_UNK) {
- rv = probe_PCIXCC_type(dev_ptr);
- if (rv) {
- PRINTK("rv = %d from probe_PCIXCC_type %d\n",
- rv, index);
- kfree(dev_ptr->dev_resp_p);
- kfree(dev_ptr);
- return rv;
- }
- }
- deviceType = dev_ptr->dev_type;
- z90crypt.dev_type_array[index] = deviceType;
- if (deviceType == PCICA)
- z90crypt.hdware_info->device_type_array[index] = 1;
- else if (deviceType == PCICC)
- z90crypt.hdware_info->device_type_array[index] = 2;
- else if (deviceType == PCIXCC_MCL2)
- z90crypt.hdware_info->device_type_array[index] = 3;
- else if (deviceType == PCIXCC_MCL3)
- z90crypt.hdware_info->device_type_array[index] = 4;
- else if (deviceType == CEX2C)
- z90crypt.hdware_info->device_type_array[index] = 5;
- else if (deviceType == CEX2A)
- z90crypt.hdware_info->device_type_array[index] = 6;
- else // No idea how this would happen.
- z90crypt.hdware_info->device_type_array[index] = -1;
- }
-
- /**
- * 'q_depth' returned by the hardware is one less than
- * the actual depth
- */
- dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
- dev_ptr->dev_type = z90crypt.dev_type_array[index];
- dev_ptr->dev_stat = devstat;
- dev_ptr->disabled = 0;
- z90crypt.device_p[index] = dev_ptr;
-
- if (devstat == DEV_ONLINE) {
- if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
- z90crypt.mask.st_mask[index] = DEV_ONLINE;
- z90crypt.mask.st_count++;
- }
- deviceType = dev_ptr->dev_type;
- type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
- if (type_str_p->st_mask[index] != DEV_ONLINE) {
- type_str_p->st_mask[index] = DEV_ONLINE;
- type_str_p->st_count++;
- }
- }
-
- return 0;
-}
-
-static int
-destroy_crypto_device(int index)
-{
- struct device *dev_ptr;
- int t, disabledFlag;
-
- dev_ptr = z90crypt.device_p[index];
-
- /* remember device type; get rid of device struct */
- if (dev_ptr) {
- disabledFlag = dev_ptr->disabled;
- t = dev_ptr->dev_type;
- kfree(dev_ptr->dev_resp_p);
- kfree(dev_ptr);
- } else {
- disabledFlag = 0;
- t = -1;
- }
- z90crypt.device_p[index] = 0;
-
- /* if the type is valid, remove the device from the type_mask */
- if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
- z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
- z90crypt.hdware_info->type_mask[t].st_count--;
- if (disabledFlag == 1)
- z90crypt.hdware_info->type_mask[t].disabled_count--;
- }
- if (z90crypt.mask.st_mask[index] != DEV_GONE) {
- z90crypt.mask.st_mask[index] = DEV_GONE;
- z90crypt.mask.st_count--;
- }
- z90crypt.hdware_info->device_type_array[index] = 0;
-
- return 0;
-}
-
-static void
-destroy_z90crypt(void)
-{
- int i;
-
- for (i = 0; i < z90crypt.max_count; i++)
- if (z90crypt.device_p[i])
- destroy_crypto_device(i);
- kfree(z90crypt.hdware_info);
- memset((void *)&z90crypt, 0, sizeof(z90crypt));
-}
-
-static unsigned char static_testmsg[384] = {
-0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
-0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
-0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
-0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
-0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
-0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
-0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
-0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
-0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
-0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
-0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
-0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
-0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
-0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
-0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
-};
-
-static int
-probe_device_type(struct device *devPtr)
-{
- int rv, dv, i, index, length;
- unsigned char psmid[8];
- static unsigned char loc_testmsg[sizeof(static_testmsg)];
-
- index = devPtr->dev_self_x;
- rv = 0;
- do {
- memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
- length = sizeof(static_testmsg) - 24;
- /* the -24 allows for the header */
- dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
- if (dv) {
- PDEBUG("dv returned by send during probe: %d\n", dv);
- if (dv == DEV_SEN_EXCEPTION) {
- rv = SEN_FATAL_ERROR;
- PRINTKC("exception in send to AP %d\n", index);
- break;
- }
- PDEBUG("return value from send_to_AP: %d\n", rv);
- switch (dv) {
- case DEV_GONE:
- PDEBUG("dev %d not available\n", index);
- rv = SEN_NOT_AVAIL;
- break;
- case DEV_ONLINE:
- rv = 0;
- break;
- case DEV_EMPTY:
- rv = SEN_NOT_AVAIL;
- break;
- case DEV_NO_WORK:
- rv = SEN_FATAL_ERROR;
- break;
- case DEV_BAD_MESSAGE:
- rv = SEN_USER_ERROR;
- break;
- case DEV_QUEUE_FULL:
- rv = SEN_QUEUE_FULL;
- break;
- default:
- PRINTK("unknown dv=%d for dev %d\n", dv, index);
- rv = SEN_NOT_AVAIL;
- break;
- }
- }
-
- if (rv)
- break;
-
- for (i = 0; i < 6; i++) {
- mdelay(300);
- dv = receive_from_AP(index, z90crypt.cdx,
- devPtr->dev_resp_l,
- devPtr->dev_resp_p, psmid);
- PDEBUG("dv returned by DQ = %d\n", dv);
- if (dv == DEV_REC_EXCEPTION) {
- rv = REC_FATAL_ERROR;
- PRINTKC("exception in dequeue %d\n",
- index);
- break;
- }
- switch (dv) {
- case DEV_ONLINE:
- rv = 0;
- break;
- case DEV_EMPTY:
- rv = REC_EMPTY;
- break;
- case DEV_NO_WORK:
- rv = REC_NO_WORK;
- break;
- case DEV_BAD_MESSAGE:
- case DEV_GONE:
- default:
- rv = REC_NO_RESPONSE;
- break;
- }
- if ((rv != 0) && (rv != REC_NO_WORK))
- break;
- if (rv == 0)
- break;
- }
- if (rv)
- break;
- rv = (devPtr->dev_resp_p[0] == 0x00) &&
- (devPtr->dev_resp_p[1] == 0x86);
- if (rv)
- devPtr->dev_type = PCICC;
- else
- devPtr->dev_type = PCICA;
- rv = 0;
- } while (0);
- /* In a general error case, the card is not marked online */
- return rv;
-}
-
-static unsigned char MCL3_testmsg[] = {
-0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
-0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
-0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
-0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
-0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
-0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
-0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
-0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
-0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
-0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
-0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
-0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
-0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
-0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
-0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
-0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
-};
-
-static int
-probe_PCIXCC_type(struct device *devPtr)
-{
- int rv, dv, i, index, length;
- unsigned char psmid[8];
- static unsigned char loc_testmsg[548];
- struct CPRBX *cprbx_p;
-
- index = devPtr->dev_self_x;
- rv = 0;
- do {
- memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
- length = sizeof(MCL3_testmsg) - 0x0C;
- dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
- if (dv) {
- PDEBUG("dv returned = %d\n", dv);
- if (dv == DEV_SEN_EXCEPTION) {
- rv = SEN_FATAL_ERROR;
- PRINTKC("exception in send to AP %d\n", index);
- break;
- }
- PDEBUG("return value from send_to_AP: %d\n", rv);
- switch (dv) {
- case DEV_GONE:
- PDEBUG("dev %d not available\n", index);
- rv = SEN_NOT_AVAIL;
- break;
- case DEV_ONLINE:
- rv = 0;
- break;
- case DEV_EMPTY:
- rv = SEN_NOT_AVAIL;
- break;
- case DEV_NO_WORK:
- rv = SEN_FATAL_ERROR;
- break;
- case DEV_BAD_MESSAGE:
- rv = SEN_USER_ERROR;
- break;
- case DEV_QUEUE_FULL:
- rv = SEN_QUEUE_FULL;
- break;
- default:
- PRINTK("unknown dv=%d for dev %d\n", dv, index);
- rv = SEN_NOT_AVAIL;
- break;
- }
- }
-
- if (rv)
- break;
-
- for (i = 0; i < 6; i++) {
- mdelay(300);
- dv = receive_from_AP(index, z90crypt.cdx,
- devPtr->dev_resp_l,
- devPtr->dev_resp_p, psmid);
- PDEBUG("dv returned by DQ = %d\n", dv);
- if (dv == DEV_REC_EXCEPTION) {
- rv = REC_FATAL_ERROR;
- PRINTKC("exception in dequeue %d\n",
- index);
- break;
- }
- switch (dv) {
- case DEV_ONLINE:
- rv = 0;
- break;
- case DEV_EMPTY:
- rv = REC_EMPTY;
- break;
- case DEV_NO_WORK:
- rv = REC_NO_WORK;
- break;
- case DEV_BAD_MESSAGE:
- case DEV_GONE:
- default:
- rv = REC_NO_RESPONSE;
- break;
- }
- if ((rv != 0) && (rv != REC_NO_WORK))
- break;
- if (rv == 0)
- break;
- }
- if (rv)
- break;
- cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
- if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
- devPtr->dev_type = PCIXCC_MCL2;
- PDEBUG("device %d is MCL2\n", index);
- } else {
- devPtr->dev_type = PCIXCC_MCL3;
- PDEBUG("device %d is MCL3\n", index);
- }
- } while (0);
- /* In a general error case, the card is not marked online */
- return rv;
-}
-
-module_init(z90crypt_init_module);
-module_exit(z90crypt_cleanup_module);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
new file mode 100644
index 00000000000..1edc10a7a6f
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -0,0 +1,1091 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_api.c
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/compat.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "zcrypt_api.h"
+
+/**
+ * Module description.
+ */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
+ "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+
+static DEFINE_SPINLOCK(zcrypt_device_lock);
+static LIST_HEAD(zcrypt_device_list);
+static int zcrypt_device_count = 0;
+static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
+
+/**
+ * Device attributes common for all crypto devices.
+ */
+static ssize_t zcrypt_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zcrypt_device *zdev = to_ap_dev(dev)->private;
+ return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
+}
+
+static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
+
+static ssize_t zcrypt_online_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zcrypt_device *zdev = to_ap_dev(dev)->private;
+ return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
+}
+
+static ssize_t zcrypt_online_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zcrypt_device *zdev = to_ap_dev(dev)->private;
+ int online;
+
+ if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+ return -EINVAL;
+ zdev->online = online;
+ if (!online)
+ ap_flush_queue(zdev->ap_dev);
+ return count;
+}
+
+static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
+
+static struct attribute * zcrypt_device_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_online.attr,
+ NULL,
+};
+
+static struct attribute_group zcrypt_device_attr_group = {
+ .attrs = zcrypt_device_attrs,
+};
+
+/**
+ * Move the device towards the head of the device list.
+ * Need to be called while holding the zcrypt device list lock.
+ * Note: cards with speed_rating of 0 are kept at the end of the list.
+ */
+static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
+{
+ struct zcrypt_device *tmp;
+ struct list_head *l;
+
+ if (zdev->speed_rating == 0)
+ return;
+ for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
+ tmp = list_entry(l, struct zcrypt_device, list);
+ if ((tmp->request_count + 1) * tmp->speed_rating <=
+ (zdev->request_count + 1) * zdev->speed_rating &&
+ tmp->speed_rating != 0)
+ break;
+ }
+ if (l == zdev->list.prev)
+ return;
+ /* Move zdev behind l */
+ list_del(&zdev->list);
+ list_add(&zdev->list, l);
+}
+
+/**
+ * Move the device towards the tail of the device list.
+ * Need to be called while holding the zcrypt device list lock.
+ * Note: cards with speed_rating of 0 are kept at the end of the list.
+ */
+static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
+{
+ struct zcrypt_device *tmp;
+ struct list_head *l;
+
+ if (zdev->speed_rating == 0)
+ return;
+ for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
+ tmp = list_entry(l, struct zcrypt_device, list);
+ if ((tmp->request_count + 1) * tmp->speed_rating >
+ (zdev->request_count + 1) * zdev->speed_rating ||
+ tmp->speed_rating == 0)
+ break;
+ }
+ if (l == zdev->list.next)
+ return;
+ /* Move zdev before l */
+ list_del(&zdev->list);
+ list_add_tail(&zdev->list, l);
+}
+
+static void zcrypt_device_release(struct kref *kref)
+{
+ struct zcrypt_device *zdev =
+ container_of(kref, struct zcrypt_device, refcount);
+ zcrypt_device_free(zdev);
+}
+
+void zcrypt_device_get(struct zcrypt_device *zdev)
+{
+ kref_get(&zdev->refcount);
+}
+EXPORT_SYMBOL(zcrypt_device_get);
+
+int zcrypt_device_put(struct zcrypt_device *zdev)
+{
+ return kref_put(&zdev->refcount, zcrypt_device_release);
+}
+EXPORT_SYMBOL(zcrypt_device_put);
+
+struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
+{
+ struct zcrypt_device *zdev;
+
+ zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
+ if (!zdev)
+ return NULL;
+ zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
+ if (!zdev->reply.message)
+ goto out_free;
+ zdev->reply.length = max_response_size;
+ spin_lock_init(&zdev->lock);
+ INIT_LIST_HEAD(&zdev->list);
+ return zdev;
+
+out_free:
+ kfree(zdev);
+ return NULL;
+}
+EXPORT_SYMBOL(zcrypt_device_alloc);
+
+void zcrypt_device_free(struct zcrypt_device *zdev)
+{
+ kfree(zdev->reply.message);
+ kfree(zdev);
+}
+EXPORT_SYMBOL(zcrypt_device_free);
+
+/**
+ * Register a crypto device.
+ */
+int zcrypt_device_register(struct zcrypt_device *zdev)
+{
+ int rc;
+
+ rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
+ &zcrypt_device_attr_group);
+ if (rc)
+ goto out;
+ get_device(&zdev->ap_dev->device);
+ kref_init(&zdev->refcount);
+ spin_lock_bh(&zcrypt_device_lock);
+ zdev->online = 1; /* New devices are online by default. */
+ list_add_tail(&zdev->list, &zcrypt_device_list);
+ __zcrypt_increase_preference(zdev);
+ zcrypt_device_count++;
+ spin_unlock_bh(&zcrypt_device_lock);
+out:
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_device_register);
+
+/**
+ * Unregister a crypto device.
+ */
+void zcrypt_device_unregister(struct zcrypt_device *zdev)
+{
+ spin_lock_bh(&zcrypt_device_lock);
+ zcrypt_device_count--;
+ list_del_init(&zdev->list);
+ spin_unlock_bh(&zcrypt_device_lock);
+ sysfs_remove_group(&zdev->ap_dev->device.kobj,
+ &zcrypt_device_attr_group);
+ put_device(&zdev->ap_dev->device);
+ zcrypt_device_put(zdev);
+}
+EXPORT_SYMBOL(zcrypt_device_unregister);
+
+/**
+ * zcrypt_read is not be supported beyond zcrypt 1.3.1
+ */
+static ssize_t zcrypt_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ return -EPERM;
+}
+
+/**
+ * Write is is not allowed
+ */
+static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ return -EPERM;
+}
+
+/**
+ * Device open/close functions to count number of users.
+ */
+static int zcrypt_open(struct inode *inode, struct file *filp)
+{
+ atomic_inc(&zcrypt_open_count);
+ return 0;
+}
+
+static int zcrypt_release(struct inode *inode, struct file *filp)
+{
+ atomic_dec(&zcrypt_open_count);
+ return 0;
+}
+
+/**
+ * zcrypt ioctls.
+ */
+static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
+{
+ struct zcrypt_device *zdev;
+ int rc;
+
+ if (mex->outputdatalength < mex->inputdatalength)
+ return -EINVAL;
+ /**
+ * As long as outputdatalength is big enough, we can set the
+ * outputdatalength equal to the inputdatalength, since that is the
+ * number of bytes we will copy in any case
+ */
+ mex->outputdatalength = mex->inputdatalength;
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ if (!zdev->online ||
+ !zdev->ops->rsa_modexpo ||
+ zdev->min_mod_size > mex->inputdatalength ||
+ zdev->max_mod_size < mex->inputdatalength)
+ continue;
+ zcrypt_device_get(zdev);
+ get_device(&zdev->ap_dev->device);
+ zdev->request_count++;
+ __zcrypt_decrease_preference(zdev);
+ spin_unlock_bh(&zcrypt_device_lock);
+ if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+ rc = zdev->ops->rsa_modexpo(zdev, mex);
+ module_put(zdev->ap_dev->drv->driver.owner);
+ }
+ else
+ rc = -EAGAIN;
+ spin_lock_bh(&zcrypt_device_lock);
+ zdev->request_count--;
+ __zcrypt_increase_preference(zdev);
+ put_device(&zdev->ap_dev->device);
+ zcrypt_device_put(zdev);
+ spin_unlock_bh(&zcrypt_device_lock);
+ return rc;
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+ return -ENODEV;
+}
+
+static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
+{
+ struct zcrypt_device *zdev;
+ unsigned long long z1, z2, z3;
+ int rc, copied;
+
+ if (crt->outputdatalength < crt->inputdatalength ||
+ (crt->inputdatalength & 1))
+ return -EINVAL;
+ /**
+ * As long as outputdatalength is big enough, we can set the
+ * outputdatalength equal to the inputdatalength, since that is the
+ * number of bytes we will copy in any case
+ */
+ crt->outputdatalength = crt->inputdatalength;
+
+ copied = 0;
+ restart:
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ if (!zdev->online ||
+ !zdev->ops->rsa_modexpo_crt ||
+ zdev->min_mod_size > crt->inputdatalength ||
+ zdev->max_mod_size < crt->inputdatalength)
+ continue;
+ if (zdev->short_crt && crt->inputdatalength > 240) {
+ /**
+ * Check inputdata for leading zeros for cards
+ * that can't handle np_prime, bp_key, or
+ * u_mult_inv > 128 bytes.
+ */
+ if (copied == 0) {
+ int len;
+ spin_unlock_bh(&zcrypt_device_lock);
+ /* len is max 256 / 2 - 120 = 8 */
+ len = crt->inputdatalength / 2 - 120;
+ z1 = z2 = z3 = 0;
+ if (copy_from_user(&z1, crt->np_prime, len) ||
+ copy_from_user(&z2, crt->bp_key, len) ||
+ copy_from_user(&z3, crt->u_mult_inv, len))
+ return -EFAULT;
+ copied = 1;
+ /**
+ * We have to restart device lookup -
+ * the device list may have changed by now.
+ */
+ goto restart;
+ }
+ if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
+ /* The device can't handle this request. */
+ continue;
+ }
+ zcrypt_device_get(zdev);
+ get_device(&zdev->ap_dev->device);
+ zdev->request_count++;
+ __zcrypt_decrease_preference(zdev);
+ spin_unlock_bh(&zcrypt_device_lock);
+ if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+ rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
+ module_put(zdev->ap_dev->drv->driver.owner);
+ }
+ else
+ rc = -EAGAIN;
+ spin_lock_bh(&zcrypt_device_lock);
+ zdev->request_count--;
+ __zcrypt_increase_preference(zdev);
+ put_device(&zdev->ap_dev->device);
+ zcrypt_device_put(zdev);
+ spin_unlock_bh(&zcrypt_device_lock);
+ return rc;
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+ return -ENODEV;
+}
+
+static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+{
+ struct zcrypt_device *zdev;
+ int rc;
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ if (!zdev->online || !zdev->ops->send_cprb ||
+ (xcRB->user_defined != AUTOSELECT &&
+ AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
+ )
+ continue;
+ zcrypt_device_get(zdev);
+ get_device(&zdev->ap_dev->device);
+ zdev->request_count++;
+ __zcrypt_decrease_preference(zdev);
+ spin_unlock_bh(&zcrypt_device_lock);
+ if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
+ rc = zdev->ops->send_cprb(zdev, xcRB);
+ module_put(zdev->ap_dev->drv->driver.owner);
+ }
+ else
+ rc = -EAGAIN;
+ spin_lock_bh(&zcrypt_device_lock);
+ zdev->request_count--;
+ __zcrypt_increase_preference(zdev);
+ put_device(&zdev->ap_dev->device);
+ zcrypt_device_put(zdev);
+ spin_unlock_bh(&zcrypt_device_lock);
+ return rc;
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+ return -ENODEV;
+}
+
+static void zcrypt_status_mask(char status[AP_DEVICES])
+{
+ struct zcrypt_device *zdev;
+
+ memset(status, 0, sizeof(char) * AP_DEVICES);
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list)
+ status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
+ zdev->online ? zdev->user_space_type : 0x0d;
+ spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
+{
+ struct zcrypt_device *zdev;
+
+ memset(qdepth, 0, sizeof(char) * AP_DEVICES);
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ spin_lock(&zdev->ap_dev->lock);
+ qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
+ zdev->ap_dev->pendingq_count +
+ zdev->ap_dev->requestq_count;
+ spin_unlock(&zdev->ap_dev->lock);
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
+{
+ struct zcrypt_device *zdev;
+
+ memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ spin_lock(&zdev->ap_dev->lock);
+ reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
+ zdev->ap_dev->total_request_count;
+ spin_unlock(&zdev->ap_dev->lock);
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static int zcrypt_pendingq_count(void)
+{
+ struct zcrypt_device *zdev;
+ int pendingq_count = 0;
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ spin_lock(&zdev->ap_dev->lock);
+ pendingq_count += zdev->ap_dev->pendingq_count;
+ spin_unlock(&zdev->ap_dev->lock);
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+ return pendingq_count;
+}
+
+static int zcrypt_requestq_count(void)
+{
+ struct zcrypt_device *zdev;
+ int requestq_count = 0;
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list) {
+ spin_lock(&zdev->ap_dev->lock);
+ requestq_count += zdev->ap_dev->requestq_count;
+ spin_unlock(&zdev->ap_dev->lock);
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+ return requestq_count;
+}
+
+static int zcrypt_count_type(int type)
+{
+ struct zcrypt_device *zdev;
+ int device_count = 0;
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list)
+ if (zdev->user_space_type == type)
+ device_count++;
+ spin_unlock_bh(&zcrypt_device_lock);
+ return device_count;
+}
+
+/**
+ * Old, deprecated combi status call.
+ */
+static long zcrypt_ica_status(struct file *filp, unsigned long arg)
+{
+ struct ica_z90_status *pstat;
+ int ret;
+
+ pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
+ if (!pstat)
+ return -ENOMEM;
+ pstat->totalcount = zcrypt_device_count;
+ pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
+ pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
+ pstat->requestqWaitCount = zcrypt_requestq_count();
+ pstat->pendingqWaitCount = zcrypt_pendingq_count();
+ pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
+ pstat->cryptoDomain = ap_domain_index;
+ zcrypt_status_mask(pstat->status);
+ zcrypt_qdepth_mask(pstat->qdepth);
+ ret = 0;
+ if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
+ ret = -EFAULT;
+ kfree(pstat);
+ return ret;
+}
+
+static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+
+ switch (cmd) {
+ case ICARSAMODEXPO: {
+ struct ica_rsa_modexpo __user *umex = (void __user *) arg;
+ struct ica_rsa_modexpo mex;
+ if (copy_from_user(&mex, umex, sizeof(mex)))
+ return -EFAULT;
+ do {
+ rc = zcrypt_rsa_modexpo(&mex);
+ } while (rc == -EAGAIN);
+ if (rc)
+ return rc;
+ return put_user(mex.outputdatalength, &umex->outputdatalength);
+ }
+ case ICARSACRT: {
+ struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+ struct ica_rsa_modexpo_crt crt;
+ if (copy_from_user(&crt, ucrt, sizeof(crt)))
+ return -EFAULT;
+ do {
+ rc = zcrypt_rsa_crt(&crt);
+ } while (rc == -EAGAIN);
+ if (rc)
+ return rc;
+ return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+ }
+ case ZSECSENDCPRB: {
+ struct ica_xcRB __user *uxcRB = (void __user *) arg;
+ struct ica_xcRB xcRB;
+ if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
+ return -EFAULT;
+ do {
+ rc = zcrypt_send_cprb(&xcRB);
+ } while (rc == -EAGAIN);
+ if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
+ return -EFAULT;
+ return rc;
+ }
+ case Z90STAT_STATUS_MASK: {
+ char status[AP_DEVICES];
+ zcrypt_status_mask(status);
+ if (copy_to_user((char __user *) arg, status,
+ sizeof(char) * AP_DEVICES))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_QDEPTH_MASK: {
+ char qdepth[AP_DEVICES];
+ zcrypt_qdepth_mask(qdepth);
+ if (copy_to_user((char __user *) arg, qdepth,
+ sizeof(char) * AP_DEVICES))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_PERDEV_REQCNT: {
+ int reqcnt[AP_DEVICES];
+ zcrypt_perdev_reqcnt(reqcnt);
+ if (copy_to_user((int __user *) arg, reqcnt,
+ sizeof(int) * AP_DEVICES))
+ return -EFAULT;
+ return 0;
+ }
+ case Z90STAT_REQUESTQ_COUNT:
+ return put_user(zcrypt_requestq_count(), (int __user *) arg);
+ case Z90STAT_PENDINGQ_COUNT:
+ return put_user(zcrypt_pendingq_count(), (int __user *) arg);
+ case Z90STAT_TOTALOPEN_COUNT:
+ return put_user(atomic_read(&zcrypt_open_count),
+ (int __user *) arg);
+ case Z90STAT_DOMAIN_INDEX:
+ return put_user(ap_domain_index, (int __user *) arg);
+ /**
+ * Deprecated ioctls. Don't add another device count ioctl,
+ * you can count them yourself in the user space with the
+ * output of the Z90STAT_STATUS_MASK ioctl.
+ */
+ case ICAZ90STATUS:
+ return zcrypt_ica_status(filp, arg);
+ case Z90STAT_TOTALCOUNT:
+ return put_user(zcrypt_device_count, (int __user *) arg);
+ case Z90STAT_PCICACOUNT:
+ return put_user(zcrypt_count_type(ZCRYPT_PCICA),
+ (int __user *) arg);
+ case Z90STAT_PCICCCOUNT:
+ return put_user(zcrypt_count_type(ZCRYPT_PCICC),
+ (int __user *) arg);
+ case Z90STAT_PCIXCCMCL2COUNT:
+ return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
+ (int __user *) arg);
+ case Z90STAT_PCIXCCMCL3COUNT:
+ return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
+ (int __user *) arg);
+ case Z90STAT_PCIXCCCOUNT:
+ return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
+ zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
+ (int __user *) arg);
+ case Z90STAT_CEX2CCOUNT:
+ return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
+ (int __user *) arg);
+ case Z90STAT_CEX2ACOUNT:
+ return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
+ (int __user *) arg);
+ default:
+ /* unknown ioctl number */
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+/**
+ * ioctl32 conversion routines
+ */
+struct compat_ica_rsa_modexpo {
+ compat_uptr_t inputdata;
+ unsigned int inputdatalength;
+ compat_uptr_t outputdata;
+ unsigned int outputdatalength;
+ compat_uptr_t b_key;
+ compat_uptr_t n_modulus;
+};
+
+static long trans_modexpo32(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
+ struct compat_ica_rsa_modexpo mex32;
+ struct ica_rsa_modexpo mex64;
+ long rc;
+
+ if (copy_from_user(&mex32, umex32, sizeof(mex32)))
+ return -EFAULT;
+ mex64.inputdata = compat_ptr(mex32.inputdata);
+ mex64.inputdatalength = mex32.inputdatalength;
+ mex64.outputdata = compat_ptr(mex32.outputdata);
+ mex64.outputdatalength = mex32.outputdatalength;
+ mex64.b_key = compat_ptr(mex32.b_key);
+ mex64.n_modulus = compat_ptr(mex32.n_modulus);
+ do {
+ rc = zcrypt_rsa_modexpo(&mex64);
+ } while (rc == -EAGAIN);
+ if (!rc)
+ rc = put_user(mex64.outputdatalength,
+ &umex32->outputdatalength);
+ return rc;
+}
+
+struct compat_ica_rsa_modexpo_crt {
+ compat_uptr_t inputdata;
+ unsigned int inputdatalength;
+ compat_uptr_t outputdata;
+ unsigned int outputdatalength;
+ compat_uptr_t bp_key;
+ compat_uptr_t bq_key;
+ compat_uptr_t np_prime;
+ compat_uptr_t nq_prime;
+ compat_uptr_t u_mult_inv;
+};
+
+static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
+ struct compat_ica_rsa_modexpo_crt crt32;
+ struct ica_rsa_modexpo_crt crt64;
+ long rc;
+
+ if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
+ return -EFAULT;
+ crt64.inputdata = compat_ptr(crt32.inputdata);
+ crt64.inputdatalength = crt32.inputdatalength;
+ crt64.outputdata= compat_ptr(crt32.outputdata);
+ crt64.outputdatalength = crt32.outputdatalength;
+ crt64.bp_key = compat_ptr(crt32.bp_key);
+ crt64.bq_key = compat_ptr(crt32.bq_key);
+ crt64.np_prime = compat_ptr(crt32.np_prime);
+ crt64.nq_prime = compat_ptr(crt32.nq_prime);
+ crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
+ do {
+ rc = zcrypt_rsa_crt(&crt64);
+ } while (rc == -EAGAIN);
+ if (!rc)
+ rc = put_user(crt64.outputdatalength,
+ &ucrt32->outputdatalength);
+ return rc;
+}
+
+struct compat_ica_xcRB {
+ unsigned short agent_ID;
+ unsigned int user_defined;
+ unsigned short request_ID;
+ unsigned int request_control_blk_length;
+ unsigned char padding1[16 - sizeof (compat_uptr_t)];
+ compat_uptr_t request_control_blk_addr;
+ unsigned int request_data_length;
+ char padding2[16 - sizeof (compat_uptr_t)];
+ compat_uptr_t request_data_address;
+ unsigned int reply_control_blk_length;
+ char padding3[16 - sizeof (compat_uptr_t)];
+ compat_uptr_t reply_control_blk_addr;
+ unsigned int reply_data_length;
+ char padding4[16 - sizeof (compat_uptr_t)];
+ compat_uptr_t reply_data_addr;
+ unsigned short priority_window;
+ unsigned int status;
+} __attribute__((packed));
+
+static long trans_xcRB32(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
+ struct compat_ica_xcRB xcRB32;
+ struct ica_xcRB xcRB64;
+ long rc;
+
+ if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
+ return -EFAULT;
+ xcRB64.agent_ID = xcRB32.agent_ID;
+ xcRB64.user_defined = xcRB32.user_defined;
+ xcRB64.request_ID = xcRB32.request_ID;
+ xcRB64.request_control_blk_length =
+ xcRB32.request_control_blk_length;
+ xcRB64.request_control_blk_addr =
+ compat_ptr(xcRB32.request_control_blk_addr);
+ xcRB64.request_data_length =
+ xcRB32.request_data_length;
+ xcRB64.request_data_address =
+ compat_ptr(xcRB32.request_data_address);
+ xcRB64.reply_control_blk_length =
+ xcRB32.reply_control_blk_length;
+ xcRB64.reply_control_blk_addr =
+ compat_ptr(xcRB32.reply_control_blk_addr);
+ xcRB64.reply_data_length = xcRB32.reply_data_length;
+ xcRB64.reply_data_addr =
+ compat_ptr(xcRB32.reply_data_addr);
+ xcRB64.priority_window = xcRB32.priority_window;
+ xcRB64.status = xcRB32.status;
+ do {
+ rc = zcrypt_send_cprb(&xcRB64);
+ } while (rc == -EAGAIN);
+ xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
+ xcRB32.reply_data_length = xcRB64.reply_data_length;
+ xcRB32.status = xcRB64.status;
+ if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
+ return -EFAULT;
+ return rc;
+}
+
+long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ if (cmd == ICARSAMODEXPO)
+ return trans_modexpo32(filp, cmd, arg);
+ if (cmd == ICARSACRT)
+ return trans_modexpo_crt32(filp, cmd, arg);
+ if (cmd == ZSECSENDCPRB)
+ return trans_xcRB32(filp, cmd, arg);
+ return zcrypt_unlocked_ioctl(filp, cmd, arg);
+}
+#endif
+
+/**
+ * Misc device file operations.
+ */
+static struct file_operations zcrypt_fops = {
+ .owner = THIS_MODULE,
+ .read = zcrypt_read,
+ .write = zcrypt_write,
+ .unlocked_ioctl = zcrypt_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = zcrypt_compat_ioctl,
+#endif
+ .open = zcrypt_open,
+ .release = zcrypt_release
+};
+
+/**
+ * Misc device.
+ */
+static struct miscdevice zcrypt_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "z90crypt",
+ .fops = &zcrypt_fops,
+};
+
+/**
+ * Deprecated /proc entry support.
+ */
+static struct proc_dir_entry *zcrypt_entry;
+
+static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
+ unsigned int len)
+{
+ int hl, i;
+
+ hl = 0;
+ for (i = 0; i < len; i++)
+ hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
+ hl += sprintf(outaddr+hl, " ");
+ return hl;
+}
+
+static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
+ unsigned int len)
+{
+ int hl, inl, c, cx;
+
+ hl = sprintf(outaddr, " ");
+ inl = 0;
+ for (c = 0; c < (len / 16); c++) {
+ hl += sprintcl(outaddr+hl, addr+inl, 16);
+ inl += 16;
+ }
+ cx = len%16;
+ if (cx) {
+ hl += sprintcl(outaddr+hl, addr+inl, cx);
+ inl += cx;
+ }
+ hl += sprintf(outaddr+hl, "\n");
+ return hl;
+}
+
+static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
+ unsigned char *addr, unsigned int len)
+{
+ int hl, inl, r, rx;
+
+ hl = sprintf(outaddr, "\n%s\n", title);
+ inl = 0;
+ for (r = 0; r < (len / 64); r++) {
+ hl += sprintrw(outaddr+hl, addr+inl, 64);
+ inl += 64;
+ }
+ rx = len % 64;
+ if (rx) {
+ hl += sprintrw(outaddr+hl, addr+inl, rx);
+ inl += rx;
+ }
+ hl += sprintf(outaddr+hl, "\n");
+ return hl;
+}
+
+static inline int sprinthx4(unsigned char *title, unsigned char *outaddr,
+ unsigned int *array, unsigned int len)
+{
+ int hl, r;
+
+ hl = sprintf(outaddr, "\n%s\n", title);
+ for (r = 0; r < len; r++) {
+ if ((r % 8) == 0)
+ hl += sprintf(outaddr+hl, " ");
+ hl += sprintf(outaddr+hl, "%08X ", array[r]);
+ if ((r % 8) == 7)
+ hl += sprintf(outaddr+hl, "\n");
+ }
+ hl += sprintf(outaddr+hl, "\n");
+ return hl;
+}
+
+static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ unsigned char *workarea;
+ int len;
+
+ len = 0;
+
+ /* resp_buff is a page. Use the right half for a work area */
+ workarea = resp_buff + 2000;
+ len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n",
+ ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
+ len += sprintf(resp_buff + len, "Cryptographic domain: %d\n",
+ ap_domain_index);
+ len += sprintf(resp_buff + len, "Total device count: %d\n",
+ zcrypt_device_count);
+ len += sprintf(resp_buff + len, "PCICA count: %d\n",
+ zcrypt_count_type(ZCRYPT_PCICA));
+ len += sprintf(resp_buff + len, "PCICC count: %d\n",
+ zcrypt_count_type(ZCRYPT_PCICC));
+ len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n",
+ zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
+ len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n",
+ zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
+ len += sprintf(resp_buff + len, "CEX2C count: %d\n",
+ zcrypt_count_type(ZCRYPT_CEX2C));
+ len += sprintf(resp_buff + len, "CEX2A count: %d\n",
+ zcrypt_count_type(ZCRYPT_CEX2A));
+ len += sprintf(resp_buff + len, "requestq count: %d\n",
+ zcrypt_requestq_count());
+ len += sprintf(resp_buff + len, "pendingq count: %d\n",
+ zcrypt_pendingq_count());
+ len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
+ atomic_read(&zcrypt_open_count));
+ zcrypt_status_mask(workarea);
+ len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
+ "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
+ resp_buff+len, workarea, AP_DEVICES);
+ zcrypt_qdepth_mask(workarea);
+ len += sprinthx("Waiting work element counts",
+ resp_buff+len, workarea, AP_DEVICES);
+ zcrypt_perdev_reqcnt((unsigned int *) workarea);
+ len += sprinthx4("Per-device successfully completed request counts",
+ resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
+ *eof = 1;
+ memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int));
+ return len;
+}
+
+static void zcrypt_disable_card(int index)
+{
+ struct zcrypt_device *zdev;
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list)
+ if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
+ zdev->online = 0;
+ ap_flush_queue(zdev->ap_dev);
+ break;
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static void zcrypt_enable_card(int index)
+{
+ struct zcrypt_device *zdev;
+
+ spin_lock_bh(&zcrypt_device_lock);
+ list_for_each_entry(zdev, &zcrypt_device_list, list)
+ if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
+ zdev->online = 1;
+ break;
+ }
+ spin_unlock_bh(&zcrypt_device_lock);
+}
+
+static int zcrypt_status_write(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ unsigned char *lbuf, *ptr;
+ unsigned long local_count;
+ int j;
+
+ if (count <= 0)
+ return 0;
+
+#define LBUFSIZE 1200UL
+ lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
+ if (!lbuf) {
+ PRINTK("kmalloc failed!\n");
+ return 0;
+ }
+
+ local_count = min(LBUFSIZE - 1, count);
+ if (copy_from_user(lbuf, buffer, local_count) != 0) {
+ kfree(lbuf);
+ return -EFAULT;
+ }
+ lbuf[local_count] = '\0';
+
+ ptr = strstr(lbuf, "Online devices");
+ if (!ptr) {
+ PRINTK("Unable to parse data (missing \"Online devices\")\n");
+ goto out;
+ }
+ ptr = strstr(ptr, "\n");
+ if (!ptr) {
+ PRINTK("Unable to parse data (missing newline "
+ "after \"Online devices\")\n");
+ goto out;
+ }
+ ptr++;
+
+ if (strstr(ptr, "Waiting work element counts") == NULL) {
+ PRINTK("Unable to parse data (missing "
+ "\"Waiting work element counts\")\n");
+ goto out;
+ }
+
+ for (j = 0; j < 64 && *ptr; ptr++) {
+ /**
+ * '0' for no device, '1' for PCICA, '2' for PCICC,
+ * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
+ * '5' for CEX2C and '6' for CEX2A'
+ */
+ if (*ptr >= '0' && *ptr <= '6')
+ j++;
+ else if (*ptr == 'd' || *ptr == 'D')
+ zcrypt_disable_card(j++);
+ else if (*ptr == 'e' || *ptr == 'E')
+ zcrypt_enable_card(j++);
+ else if (*ptr != ' ' && *ptr != '\t')
+ break;
+ }
+out:
+ kfree(lbuf);
+ return count;
+}
+
+/**
+ * The module initialization code.
+ */
+int __init zcrypt_api_init(void)
+{
+ int rc;
+
+ /* Register the request sprayer. */
+ rc = misc_register(&zcrypt_misc_device);
+ if (rc < 0) {
+ PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
+ zcrypt_misc_device.minor, rc);
+ goto out;
+ }
+
+ /* Set up the proc file system */
+ zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
+ if (!zcrypt_entry) {
+ PRINTK("Couldn't create z90crypt proc entry\n");
+ rc = -ENOMEM;
+ goto out_misc;
+ }
+ zcrypt_entry->nlink = 1;
+ zcrypt_entry->data = NULL;
+ zcrypt_entry->read_proc = zcrypt_status_read;
+ zcrypt_entry->write_proc = zcrypt_status_write;
+
+ return 0;
+
+out_misc:
+ misc_deregister(&zcrypt_misc_device);
+out:
+ return rc;
+}
+
+/**
+ * The module termination code.
+ */
+void zcrypt_api_exit(void)
+{
+ remove_proc_entry("driver/z90crypt", NULL);
+ misc_deregister(&zcrypt_misc_device);
+}
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+module_init(zcrypt_api_init);
+module_exit(zcrypt_api_exit);
+#endif
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
new file mode 100644
index 00000000000..de4877ee618
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -0,0 +1,141 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_api.h
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_API_H_
+#define _ZCRYPT_API_H_
+
+/**
+ * Macro definitions
+ *
+ * PDEBUG debugs in the form "zcrypt: function_name -> message"
+ *
+ * PRINTK is like PDEBUG, except that it is always enabled
+ * PRINTKN is like PRINTK, except that it does not include the function name
+ * PRINTKW is like PRINTK, except that it uses KERN_WARNING
+ * PRINTKC is like PRINTK, except that it uses KERN_CRIT
+ */
+#define DEV_NAME "zcrypt"
+
+#define PRINTK(fmt, args...) \
+ printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+#define PRINTKN(fmt, args...) \
+ printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
+#define PRINTKW(fmt, args...) \
+ printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+#define PRINTKC(fmt, args...) \
+ printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+
+#ifdef ZCRYPT_DEBUG
+#define PDEBUG(fmt, args...) \
+ printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
+#else
+#define PDEBUG(fmt, args...) do {} while (0)
+#endif
+
+#include "ap_bus.h"
+#include <asm/zcrypt.h>
+
+/* deprecated status calls */
+#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
+#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
+
+/**
+ * This structure is deprecated and the corresponding ioctl() has been
+ * replaced with individual ioctl()s for each piece of data!
+ */
+struct ica_z90_status {
+ int totalcount;
+ int leedslitecount; // PCICA
+ int leeds2count; // PCICC
+ // int PCIXCCCount; is not in struct for backward compatibility
+ int requestqWaitCount;
+ int pendingqWaitCount;
+ int totalOpenCount;
+ int cryptoDomain;
+ // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
+ // 5=CEX2C
+ unsigned char status[64];
+ // qdepth: # work elements waiting for each device
+ unsigned char qdepth[64];
+};
+
+/**
+ * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
+ * PCIXCC_MCL3, CEX2C, or CEX2A
+ *
+ * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed
+ * Internal Code (LIC) (EC J12220 level 29).
+ * PCIXCC_MCL2 refers to any LIC before this level.
+ */
+#define ZCRYPT_PCICA 1
+#define ZCRYPT_PCICC 2
+#define ZCRYPT_PCIXCC_MCL2 3
+#define ZCRYPT_PCIXCC_MCL3 4
+#define ZCRYPT_CEX2C 5
+#define ZCRYPT_CEX2A 6
+
+struct zcrypt_device;
+
+struct zcrypt_ops {
+ long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
+ long (*rsa_modexpo_crt)(struct zcrypt_device *,
+ struct ica_rsa_modexpo_crt *);
+ long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
+};
+
+struct zcrypt_device {
+ struct list_head list; /* Device list. */
+ spinlock_t lock; /* Per device lock. */
+ struct kref refcount; /* device refcounting */
+ struct ap_device *ap_dev; /* The "real" ap device. */
+ struct zcrypt_ops *ops; /* Crypto operations. */
+ int online; /* User online/offline */
+
+ int user_space_type; /* User space device id. */
+ char *type_string; /* User space device name. */
+ int min_mod_size; /* Min number of bits. */
+ int max_mod_size; /* Max number of bits. */
+ int short_crt; /* Card has crt length restriction. */
+ int speed_rating; /* Speed of the crypto device. */
+
+ int request_count; /* # current requests. */
+
+ struct ap_message reply; /* Per-device reply structure. */
+};
+
+struct zcrypt_device *zcrypt_device_alloc(size_t);
+void zcrypt_device_free(struct zcrypt_device *);
+void zcrypt_device_get(struct zcrypt_device *);
+int zcrypt_device_put(struct zcrypt_device *);
+int zcrypt_device_register(struct zcrypt_device *);
+void zcrypt_device_unregister(struct zcrypt_device *);
+int zcrypt_api_init(void);
+void zcrypt_api_exit(void);
+
+#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
new file mode 100644
index 00000000000..8dbcf0eef3e
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -0,0 +1,350 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_cca_key.h
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_CCA_KEY_H_
+#define _ZCRYPT_CCA_KEY_H_
+
+struct T6_keyBlock_hdr {
+ unsigned short blen;
+ unsigned short ulen;
+ unsigned short flags;
+};
+
+/**
+ * mapping for the cca private ME key token.
+ * Three parts of interest here: the header, the private section and
+ * the public section.
+ *
+ * mapping for the cca key token header
+ */
+struct cca_token_hdr {
+ unsigned char token_identifier;
+ unsigned char version;
+ unsigned short token_length;
+ unsigned char reserved[4];
+} __attribute__((packed));
+
+#define CCA_TKN_HDR_ID_EXT 0x1E
+
+/**
+ * mapping for the cca private ME section
+ */
+struct cca_private_ext_ME_sec {
+ unsigned char section_identifier;
+ unsigned char version;
+ unsigned short section_length;
+ unsigned char private_key_hash[20];
+ unsigned char reserved1[4];
+ unsigned char key_format;
+ unsigned char reserved2;
+ unsigned char key_name_hash[20];
+ unsigned char key_use_flags[4];
+ unsigned char reserved3[6];
+ unsigned char reserved4[24];
+ unsigned char confounder[24];
+ unsigned char exponent[128];
+ unsigned char modulus[128];
+} __attribute__((packed));
+
+#define CCA_PVT_USAGE_ALL 0x80
+
+/**
+ * mapping for the cca public section
+ * In a private key, the modulus doesn't appear in the public
+ * section. So, an arbitrary public exponent of 0x010001 will be
+ * used, for a section length of 0x0F always.
+ */
+struct cca_public_sec {
+ unsigned char section_identifier;
+ unsigned char version;
+ unsigned short section_length;
+ unsigned char reserved[2];
+ unsigned short exponent_len;
+ unsigned short modulus_bit_len;
+ unsigned short modulus_byte_len; /* In a private key, this is 0 */
+} __attribute__((packed));
+
+/**
+ * mapping for the cca private CRT key 'token'
+ * The first three parts (the only parts considered in this release)
+ * are: the header, the private section and the public section.
+ * The header and public section are the same as for the
+ * struct cca_private_ext_ME
+ *
+ * Following the structure are the quantities p, q, dp, dq, u, pad,
+ * and modulus, in that order, where pad_len is the modulo 8
+ * complement of the residue modulo 8 of the sum of
+ * (p_len + q_len + dp_len + dq_len + u_len).
+ */
+struct cca_pvt_ext_CRT_sec {
+ unsigned char section_identifier;
+ unsigned char version;
+ unsigned short section_length;
+ unsigned char private_key_hash[20];
+ unsigned char reserved1[4];
+ unsigned char key_format;
+ unsigned char reserved2;
+ unsigned char key_name_hash[20];
+ unsigned char key_use_flags[4];
+ unsigned short p_len;
+ unsigned short q_len;
+ unsigned short dp_len;
+ unsigned short dq_len;
+ unsigned short u_len;
+ unsigned short mod_len;
+ unsigned char reserved3[4];
+ unsigned short pad_len;
+ unsigned char reserved4[52];
+ unsigned char confounder[8];
+} __attribute__((packed));
+
+#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
+#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
+
+/**
+ * Set up private key fields of a type6 MEX message.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or -EFAULT
+ */
+static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
+ void *p, int big_endian)
+{
+ static struct cca_token_hdr static_pvt_me_hdr = {
+ .token_identifier = 0x1E,
+ .token_length = 0x0183,
+ };
+ static struct cca_private_ext_ME_sec static_pvt_me_sec = {
+ .section_identifier = 0x02,
+ .section_length = 0x016C,
+ .key_use_flags = {0x80,0x00,0x00,0x00},
+ };
+ static struct cca_public_sec static_pub_me_sec = {
+ .section_identifier = 0x04,
+ .section_length = 0x000F,
+ .exponent_len = 0x0003,
+ };
+ static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
+ struct {
+ struct T6_keyBlock_hdr t6_hdr;
+ struct cca_token_hdr pvtMeHdr;
+ struct cca_private_ext_ME_sec pvtMeSec;
+ struct cca_public_sec pubMeSec;
+ char exponent[3];
+ } __attribute__((packed)) *key = p;
+ unsigned char *temp;
+
+ memset(key, 0, sizeof(*key));
+
+ if (big_endian) {
+ key->t6_hdr.blen = cpu_to_be16(0x189);
+ key->t6_hdr.ulen = cpu_to_be16(0x189 - 2);
+ } else {
+ key->t6_hdr.blen = cpu_to_le16(0x189);
+ key->t6_hdr.ulen = cpu_to_le16(0x189 - 2);
+ }
+ key->pvtMeHdr = static_pvt_me_hdr;
+ key->pvtMeSec = static_pvt_me_sec;
+ key->pubMeSec = static_pub_me_sec;
+ /**
+ * In a private key, the modulus doesn't appear in the public
+ * section. So, an arbitrary public exponent of 0x010001 will be
+ * used.
+ */
+ memcpy(key->exponent, pk_exponent, 3);
+
+ /* key parameter block */
+ temp = key->pvtMeSec.exponent +
+ sizeof(key->pvtMeSec.exponent) - mex->inputdatalength;
+ if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
+ return -EFAULT;
+
+ /* modulus */
+ temp = key->pvtMeSec.modulus +
+ sizeof(key->pvtMeSec.modulus) - mex->inputdatalength;
+ if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
+ return -EFAULT;
+ key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength;
+ return sizeof(*key);
+}
+
+/**
+ * Set up private key fields of a type6 MEX message. The _pad variant
+ * strips leading zeroes from the b_key.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or -EFAULT
+ */
+static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
+ void *p, int big_endian)
+{
+ static struct cca_token_hdr static_pub_hdr = {
+ .token_identifier = 0x1E,
+ };
+ static struct cca_public_sec static_pub_sec = {
+ .section_identifier = 0x04,
+ };
+ struct {
+ struct T6_keyBlock_hdr t6_hdr;
+ struct cca_token_hdr pubHdr;
+ struct cca_public_sec pubSec;
+ char exponent[0];
+ } __attribute__((packed)) *key = p;
+ unsigned char *temp;
+ int i;
+
+ memset(key, 0, sizeof(*key));
+
+ key->pubHdr = static_pub_hdr;
+ key->pubSec = static_pub_sec;
+
+ /* key parameter block */
+ temp = key->exponent;
+ if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
+ return -EFAULT;
+ /* Strip leading zeroes from b_key. */
+ for (i = 0; i < mex->inputdatalength; i++)
+ if (temp[i])
+ break;
+ if (i >= mex->inputdatalength)
+ return -EINVAL;
+ memmove(temp, temp + i, mex->inputdatalength - i);
+ temp += mex->inputdatalength - i;
+ /* modulus */
+ if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
+ return -EFAULT;
+
+ key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
+ key->pubSec.modulus_byte_len = mex->inputdatalength;
+ key->pubSec.exponent_len = mex->inputdatalength - i;
+ key->pubSec.section_length = sizeof(key->pubSec) +
+ 2*mex->inputdatalength - i;
+ key->pubHdr.token_length =
+ key->pubSec.section_length + sizeof(key->pubHdr);
+ if (big_endian) {
+ key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4);
+ key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6);
+ } else {
+ key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4);
+ key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6);
+ }
+ return sizeof(*key) + 2*mex->inputdatalength - i;
+}
+
+/**
+ * Set up private key fields of a type6 CRT message.
+ * Note that all numerics in the key token are big-endian,
+ * while the entries in the key block header are little-endian.
+ *
+ * @mex: pointer to user input data
+ * @p: pointer to memory area for the key
+ *
+ * Returns the size of the key area or -EFAULT
+ */
+static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
+ void *p, int big_endian)
+{
+ static struct cca_public_sec static_cca_pub_sec = {
+ .section_identifier = 4,
+ .section_length = 0x000f,
+ .exponent_len = 0x0003,
+ };
+ static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
+ struct {
+ struct T6_keyBlock_hdr t6_hdr;
+ struct cca_token_hdr token;
+ struct cca_pvt_ext_CRT_sec pvt;
+ char key_parts[0];
+ } __attribute__((packed)) *key = p;
+ struct cca_public_sec *pub;
+ int short_len, long_len, pad_len, key_len, size;
+
+ memset(key, 0, sizeof(*key));
+
+ short_len = crt->inputdatalength / 2;
+ long_len = short_len + 8;
+ pad_len = -(3*long_len + 2*short_len) & 7;
+ key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
+ size = sizeof(*key) + key_len + sizeof(*pub) + 3;
+
+ /* parameter block.key block */
+ if (big_endian) {
+ key->t6_hdr.blen = cpu_to_be16(size);
+ key->t6_hdr.ulen = cpu_to_be16(size - 2);
+ } else {
+ key->t6_hdr.blen = cpu_to_le16(size);
+ key->t6_hdr.ulen = cpu_to_le16(size - 2);
+ }
+
+ /* key token header */
+ key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
+ key->token.token_length = size - 6;
+
+ /* private section */
+ key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
+ key->pvt.section_length = sizeof(key->pvt) + key_len;
+ key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
+ key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL;
+ key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len;
+ key->pvt.q_len = key->pvt.dq_len = short_len;
+ key->pvt.mod_len = crt->inputdatalength;
+ key->pvt.pad_len = pad_len;
+
+ /* key parts */
+ if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
+ copy_from_user(key->key_parts + long_len,
+ crt->nq_prime, short_len) ||
+ copy_from_user(key->key_parts + long_len + short_len,
+ crt->bp_key, long_len) ||
+ copy_from_user(key->key_parts + 2*long_len + short_len,
+ crt->bq_key, short_len) ||
+ copy_from_user(key->key_parts + 2*long_len + 2*short_len,
+ crt->u_mult_inv, long_len))
+ return -EFAULT;
+ memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
+ 0xff, crt->inputdatalength);
+ pub = (struct cca_public_sec *)(key->key_parts + key_len);
+ *pub = static_cca_pub_sec;
+ pub->modulus_bit_len = 8 * crt->inputdatalength;
+ /**
+ * In a private key, the modulus doesn't appear in the public
+ * section. So, an arbitrary public exponent of 0x010001 will be
+ * used.
+ */
+ memcpy((char *) (pub + 1), pk_exponent, 3);
+ return size;
+}
+
+#endif /* _ZCRYPT_CCA_KEY_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
new file mode 100644
index 00000000000..a62b00083d0
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -0,0 +1,435 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_cex2a.c
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_cex2a.h"
+
+#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
+#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
+
+#define CEX2A_SPEED_RATING 970
+
+#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
+#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
+
+#define CEX2A_CLEANUP_TIME (15*HZ)
+
+static struct ap_device_id zcrypt_cex2a_ids[] = {
+ { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
+ { /* end of list */ },
+};
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
+ "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+#endif
+
+static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
+static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
+static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
+ struct ap_message *);
+
+static struct ap_driver zcrypt_cex2a_driver = {
+ .probe = zcrypt_cex2a_probe,
+ .remove = zcrypt_cex2a_remove,
+ .receive = zcrypt_cex2a_receive,
+ .ids = zcrypt_cex2a_ids,
+};
+
+/**
+ * Convert a ICAMEX message to a type50 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo *mex)
+{
+ unsigned char *mod, *exp, *inp;
+ int mod_len;
+
+ mod_len = mex->inputdatalength;
+
+ if (mod_len <= 128) {
+ struct type50_meb1_msg *meb1 = ap_msg->message;
+ memset(meb1, 0, sizeof(*meb1));
+ ap_msg->length = sizeof(*meb1);
+ meb1->header.msg_type_code = TYPE50_TYPE_CODE;
+ meb1->header.msg_len = sizeof(*meb1);
+ meb1->keyblock_type = TYPE50_MEB1_FMT;
+ mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
+ exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
+ inp = meb1->message + sizeof(meb1->message) - mod_len;
+ } else {
+ struct type50_meb2_msg *meb2 = ap_msg->message;
+ memset(meb2, 0, sizeof(*meb2));
+ ap_msg->length = sizeof(*meb2);
+ meb2->header.msg_type_code = TYPE50_TYPE_CODE;
+ meb2->header.msg_len = sizeof(*meb2);
+ meb2->keyblock_type = TYPE50_MEB2_FMT;
+ mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
+ exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
+ inp = meb2->message + sizeof(meb2->message) - mod_len;
+ }
+
+ if (copy_from_user(mod, mex->n_modulus, mod_len) ||
+ copy_from_user(exp, mex->b_key, mod_len) ||
+ copy_from_user(inp, mex->inputdata, mod_len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type50 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ int mod_len, short_len, long_len, long_offset;
+ unsigned char *p, *q, *dp, *dq, *u, *inp;
+
+ mod_len = crt->inputdatalength;
+ short_len = mod_len / 2;
+ long_len = mod_len / 2 + 8;
+
+ /*
+ * CEX2A cannot handle p, dp, or U > 128 bytes.
+ * If we have one of these, we need to do extra checking.
+ */
+ if (long_len > 128) {
+ /*
+ * zcrypt_rsa_crt already checked for the leading
+ * zeroes of np_prime, bp_key and u_mult_inc.
+ */
+ long_offset = long_len - 128;
+ long_len = 128;
+ } else
+ long_offset = 0;
+
+ /*
+ * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
+ * the larger message structure.
+ */
+ if (long_len <= 64) {
+ struct type50_crb1_msg *crb1 = ap_msg->message;
+ memset(crb1, 0, sizeof(*crb1));
+ ap_msg->length = sizeof(*crb1);
+ crb1->header.msg_type_code = TYPE50_TYPE_CODE;
+ crb1->header.msg_len = sizeof(*crb1);
+ crb1->keyblock_type = TYPE50_CRB1_FMT;
+ p = crb1->p + sizeof(crb1->p) - long_len;
+ q = crb1->q + sizeof(crb1->q) - short_len;
+ dp = crb1->dp + sizeof(crb1->dp) - long_len;
+ dq = crb1->dq + sizeof(crb1->dq) - short_len;
+ u = crb1->u + sizeof(crb1->u) - long_len;
+ inp = crb1->message + sizeof(crb1->message) - mod_len;
+ } else {
+ struct type50_crb2_msg *crb2 = ap_msg->message;
+ memset(crb2, 0, sizeof(*crb2));
+ ap_msg->length = sizeof(*crb2);
+ crb2->header.msg_type_code = TYPE50_TYPE_CODE;
+ crb2->header.msg_len = sizeof(*crb2);
+ crb2->keyblock_type = TYPE50_CRB2_FMT;
+ p = crb2->p + sizeof(crb2->p) - long_len;
+ q = crb2->q + sizeof(crb2->q) - short_len;
+ dp = crb2->dp + sizeof(crb2->dp) - long_len;
+ dq = crb2->dq + sizeof(crb2->dq) - short_len;
+ u = crb2->u + sizeof(crb2->u) - long_len;
+ inp = crb2->message + sizeof(crb2->message) - mod_len;
+ }
+
+ if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
+ copy_from_user(q, crt->nq_prime, short_len) ||
+ copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
+ copy_from_user(dq, crt->bq_key, short_len) ||
+ copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
+ copy_from_user(inp, crt->inputdata, mod_len))
+ return -EFAULT;
+
+
+ return 0;
+}
+
+/**
+ * Copy results from a type 80 reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int convert_type80(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ struct type80_hdr *t80h = reply->message;
+ unsigned char *data;
+
+ if (t80h->len < sizeof(*t80h) + outputdatalength) {
+ /* The result is too short, the CEX2A card may not do that.. */
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+ BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+ data = reply->message + t80h->len - outputdatalength;
+ if (copy_to_user(outputdata, data, outputdatalength))
+ return -EFAULT;
+ return 0;
+}
+
+static int convert_response(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *) reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return convert_error(zdev, reply);
+ case TYPE80_RSP_CODE:
+ return convert_type80(zdev, reply,
+ outputdata, outputdatalength);
+ default: /* Unknown response type, this should NEVER EVER happen */
+ PRINTK("Unrecognized Message Header: %08x%08x\n",
+ *(unsigned int *) reply->message,
+ *(unsigned int *) (reply->message+4));
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct type80_hdr *t80h = reply->message;
+ int length;
+
+ /* Copy the reply message to the request message buffer. */
+ if (IS_ERR(reply))
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ else if (t80h->type == TYPE80_RSP_CODE) {
+ length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
+ memcpy(msg->message, reply->message, length);
+ } else
+ memcpy(msg->message, reply->message, sizeof error_reply);
+ complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the CEX2A
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * CEX2A device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo *mex)
+{
+ struct ap_message ap_msg;
+ struct completion work;
+ int rc;
+
+ ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &work;
+ rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
+ if (rc)
+ goto out_free;
+ init_completion(&work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &work, CEX2A_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response(zdev, &ap_msg, mex->outputdata,
+ mex->outputdatalength);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ kfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the CEX2A
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * CEX2A device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ struct ap_message ap_msg;
+ struct completion work;
+ int rc;
+
+ ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &work;
+ rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
+ if (rc)
+ goto out_free;
+ init_completion(&work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &work, CEX2A_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response(zdev, &ap_msg, crt->outputdata,
+ crt->outputdatalength);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ kfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The crypto operations for a CEX2A card.
+ */
+static struct zcrypt_ops zcrypt_cex2a_ops = {
+ .rsa_modexpo = zcrypt_cex2a_modexpo,
+ .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
+};
+
+/**
+ * Probe function for CEX2A cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev;
+ int rc;
+
+ zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->ap_dev = ap_dev;
+ zdev->ops = &zcrypt_cex2a_ops;
+ zdev->online = 1;
+ zdev->user_space_type = ZCRYPT_CEX2A;
+ zdev->type_string = "CEX2A";
+ zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
+ zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
+ zdev->short_crt = 1;
+ zdev->speed_rating = CEX2A_SPEED_RATING;
+ ap_dev->reply = &zdev->reply;
+ ap_dev->private = zdev;
+ rc = zcrypt_device_register(zdev);
+ if (rc)
+ goto out_free;
+ return 0;
+
+out_free:
+ ap_dev->private = NULL;
+ zcrypt_device_free(zdev);
+ return rc;
+}
+
+/**
+ * This is called to remove the extended CEX2A driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev = ap_dev->private;
+
+ zcrypt_device_unregister(zdev);
+}
+
+int __init zcrypt_cex2a_init(void)
+{
+ return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a");
+}
+
+void __exit zcrypt_cex2a_exit(void)
+{
+ ap_driver_unregister(&zcrypt_cex2a_driver);
+}
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+module_init(zcrypt_cex2a_init);
+module_exit(zcrypt_cex2a_exit);
+#endif
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
new file mode 100644
index 00000000000..8f69d1dacab
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -0,0 +1,126 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_cex2a.h
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_CEX2A_H_
+#define _ZCRYPT_CEX2A_H_
+
+/**
+ * The type 50 message family is associated with a CEX2A card.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type50_hdr {
+ unsigned char reserved1;
+ unsigned char msg_type_code; /* 0x50 */
+ unsigned short msg_len;
+ unsigned char reserved2;
+ unsigned char ignored;
+ unsigned short reserved3;
+} __attribute__((packed));
+
+#define TYPE50_TYPE_CODE 0x50
+
+#define TYPE50_MEB1_FMT 0x0001
+#define TYPE50_MEB2_FMT 0x0002
+#define TYPE50_CRB1_FMT 0x0011
+#define TYPE50_CRB2_FMT 0x0012
+
+/* Mod-Exp, with a small modulus */
+struct type50_meb1_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0001 */
+ unsigned char reserved[6];
+ unsigned char exponent[128];
+ unsigned char modulus[128];
+ unsigned char message[128];
+} __attribute__((packed));
+
+/* Mod-Exp, with a large modulus */
+struct type50_meb2_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0002 */
+ unsigned char reserved[6];
+ unsigned char exponent[256];
+ unsigned char modulus[256];
+ unsigned char message[256];
+} __attribute__((packed));
+
+/* CRT, with a small modulus */
+struct type50_crb1_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0011 */
+ unsigned char reserved[6];
+ unsigned char p[64];
+ unsigned char q[64];
+ unsigned char dp[64];
+ unsigned char dq[64];
+ unsigned char u[64];
+ unsigned char message[128];
+} __attribute__((packed));
+
+/* CRT, with a large modulus */
+struct type50_crb2_msg {
+ struct type50_hdr header;
+ unsigned short keyblock_type; /* 0x0012 */
+ unsigned char reserved[6];
+ unsigned char p[128];
+ unsigned char q[128];
+ unsigned char dp[128];
+ unsigned char dq[128];
+ unsigned char u[128];
+ unsigned char message[256];
+} __attribute__((packed));
+
+/**
+ * The type 80 response family is associated with a CEX2A card.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+#define TYPE80_RSP_CODE 0x80
+
+struct type80_hdr {
+ unsigned char reserved1;
+ unsigned char type; /* 0x80 */
+ unsigned short len;
+ unsigned char code; /* 0x00 */
+ unsigned char reserved2[3];
+ unsigned char reserved3[8];
+} __attribute__((packed));
+
+int zcrypt_cex2a_init(void);
+void zcrypt_cex2a_exit(void);
+
+#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
new file mode 100644
index 00000000000..2cb616ba8be
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -0,0 +1,133 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_error.h
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_ERROR_H_
+#define _ZCRYPT_ERROR_H_
+
+#include "zcrypt_api.h"
+
+/**
+ * Reply Messages
+ *
+ * Error reply messages are of two types:
+ * 82: Error (see below)
+ * 88: Error (see below)
+ * Both type 82 and type 88 have the same structure in the header.
+ *
+ * Request reply messages are of three known types:
+ * 80: Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS)
+ * 84: Reply from a Type 4 Request (see PCICA-RELATED STRUCTS)
+ * 86: Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS)
+ *
+ */
+struct error_hdr {
+ unsigned char reserved1; /* 0x00 */
+ unsigned char type; /* 0x82 or 0x88 */
+ unsigned char reserved2[2]; /* 0x0000 */
+ unsigned char reply_code; /* reply code */
+ unsigned char reserved3[3]; /* 0x000000 */
+};
+
+#define TYPE82_RSP_CODE 0x82
+#define TYPE88_RSP_CODE 0x88
+
+#define REP82_ERROR_MACHINE_FAILURE 0x10
+#define REP82_ERROR_PREEMPT_FAILURE 0x12
+#define REP82_ERROR_CHECKPT_FAILURE 0x14
+#define REP82_ERROR_MESSAGE_TYPE 0x20
+#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
+#define REP82_ERROR_INVALID_MSG_LEN 0x23
+#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
+#define REP82_ERROR_FORMAT_FIELD 0x29
+#define REP82_ERROR_INVALID_COMMAND 0x30
+#define REP82_ERROR_MALFORMED_MSG 0x40
+#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
+#define REP82_ERROR_WORD_ALIGNMENT 0x60
+#define REP82_ERROR_MESSAGE_LENGTH 0x80
+#define REP82_ERROR_OPERAND_INVALID 0x82
+#define REP82_ERROR_OPERAND_SIZE 0x84
+#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
+#define REP82_ERROR_RESERVED_FIELD 0x88
+#define REP82_ERROR_TRANSPORT_FAIL 0x90
+#define REP82_ERROR_PACKET_TRUNCATED 0xA0
+#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
+
+#define REP88_ERROR_MODULE_FAILURE 0x10
+
+#define REP88_ERROR_MESSAGE_TYPE 0x20
+#define REP88_ERROR_MESSAGE_MALFORMD 0x22
+#define REP88_ERROR_MESSAGE_LENGTH 0x23
+#define REP88_ERROR_RESERVED_FIELD 0x24
+#define REP88_ERROR_KEY_TYPE 0x34
+#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
+#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
+#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
+
+static inline int convert_error(struct zcrypt_device *zdev,
+ struct ap_message *reply)
+{
+ struct error_hdr *ehdr = reply->message;
+
+ PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n",
+ ehdr->type, *(unsigned int *) reply->message,
+ *(unsigned int *) (reply->message + 4));
+
+ switch (ehdr->reply_code) {
+ case REP82_ERROR_OPERAND_INVALID:
+ case REP82_ERROR_OPERAND_SIZE:
+ case REP82_ERROR_EVEN_MOD_IN_OPND:
+ case REP88_ERROR_MESSAGE_MALFORMD:
+ // REP88_ERROR_INVALID_KEY // '82' CEX2A
+ // REP88_ERROR_OPERAND // '84' CEX2A
+ // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
+ /* Invalid input data. */
+ return -EINVAL;
+ case REP82_ERROR_MESSAGE_TYPE:
+ // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
+ /**
+ * To sent a message of the wrong type is a bug in the
+ * device driver. Warn about it, disable the device
+ * and then repeat the request.
+ */
+ WARN_ON(1);
+ zdev->online = 0;
+ return -EAGAIN;
+ case REP82_ERROR_TRANSPORT_FAIL:
+ case REP82_ERROR_MACHINE_FAILURE:
+ // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
+ /* If a card fails disable it and repeat the request. */
+ zdev->online = 0;
+ return -EAGAIN;
+ default:
+ PRINTKW("unknown type %02x reply code = %d\n",
+ ehdr->type, ehdr->reply_code);
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+#endif /* _ZCRYPT_ERROR_H_ */
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
new file mode 100644
index 00000000000..2a9349ad68b
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -0,0 +1,100 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_mono.c
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/compat.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_pcica.h"
+#include "zcrypt_pcicc.h"
+#include "zcrypt_pcixcc.h"
+#include "zcrypt_cex2a.h"
+
+/**
+ * The module initialization code.
+ */
+int __init zcrypt_init(void)
+{
+ int rc;
+
+ rc = ap_module_init();
+ if (rc)
+ goto out;
+ rc = zcrypt_api_init();
+ if (rc)
+ goto out_ap;
+ rc = zcrypt_pcica_init();
+ if (rc)
+ goto out_api;
+ rc = zcrypt_pcicc_init();
+ if (rc)
+ goto out_pcica;
+ rc = zcrypt_pcixcc_init();
+ if (rc)
+ goto out_pcicc;
+ rc = zcrypt_cex2a_init();
+ if (rc)
+ goto out_pcixcc;
+ return 0;
+
+out_pcixcc:
+ zcrypt_pcixcc_exit();
+out_pcicc:
+ zcrypt_pcicc_exit();
+out_pcica:
+ zcrypt_pcica_exit();
+out_api:
+ zcrypt_api_exit();
+out_ap:
+ ap_module_exit();
+out:
+ return rc;
+}
+
+/**
+ * The module termination code.
+ */
+void __exit zcrypt_exit(void)
+{
+ zcrypt_cex2a_exit();
+ zcrypt_pcixcc_exit();
+ zcrypt_pcicc_exit();
+ zcrypt_pcica_exit();
+ zcrypt_api_exit();
+ ap_module_exit();
+}
+
+module_init(zcrypt_init);
+module_exit(zcrypt_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
new file mode 100644
index 00000000000..b6a4ecdc802
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -0,0 +1,418 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_pcica.c
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_pcica.h"
+
+#define PCICA_MIN_MOD_SIZE 1 /* 8 bits */
+#define PCICA_MAX_MOD_SIZE 256 /* 2048 bits */
+
+#define PCICA_SPEED_RATING 2800
+
+#define PCICA_MAX_MESSAGE_SIZE 0x3a0 /* sizeof(struct type4_lcr) */
+#define PCICA_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
+
+#define PCICA_CLEANUP_TIME (15*HZ)
+
+static struct ap_device_id zcrypt_pcica_ids[] = {
+ { AP_DEVICE(AP_DEVICE_TYPE_PCICA) },
+ { /* end of list */ },
+};
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
+ "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+#endif
+
+static int zcrypt_pcica_probe(struct ap_device *ap_dev);
+static void zcrypt_pcica_remove(struct ap_device *ap_dev);
+static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *,
+ struct ap_message *);
+
+static struct ap_driver zcrypt_pcica_driver = {
+ .probe = zcrypt_pcica_probe,
+ .remove = zcrypt_pcica_remove,
+ .receive = zcrypt_pcica_receive,
+ .ids = zcrypt_pcica_ids,
+};
+
+/**
+ * Convert a ICAMEX message to a type4 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type4MEX_msg(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo *mex)
+{
+ unsigned char *modulus, *exponent, *message;
+ int mod_len;
+
+ mod_len = mex->inputdatalength;
+
+ if (mod_len <= 128) {
+ struct type4_sme *sme = ap_msg->message;
+ memset(sme, 0, sizeof(*sme));
+ ap_msg->length = sizeof(*sme);
+ sme->header.msg_fmt = TYPE4_SME_FMT;
+ sme->header.msg_len = sizeof(*sme);
+ sme->header.msg_type_code = TYPE4_TYPE_CODE;
+ sme->header.request_code = TYPE4_REQU_CODE;
+ modulus = sme->modulus + sizeof(sme->modulus) - mod_len;
+ exponent = sme->exponent + sizeof(sme->exponent) - mod_len;
+ message = sme->message + sizeof(sme->message) - mod_len;
+ } else {
+ struct type4_lme *lme = ap_msg->message;
+ memset(lme, 0, sizeof(*lme));
+ ap_msg->length = sizeof(*lme);
+ lme->header.msg_fmt = TYPE4_LME_FMT;
+ lme->header.msg_len = sizeof(*lme);
+ lme->header.msg_type_code = TYPE4_TYPE_CODE;
+ lme->header.request_code = TYPE4_REQU_CODE;
+ modulus = lme->modulus + sizeof(lme->modulus) - mod_len;
+ exponent = lme->exponent + sizeof(lme->exponent) - mod_len;
+ message = lme->message + sizeof(lme->message) - mod_len;
+ }
+
+ if (copy_from_user(modulus, mex->n_modulus, mod_len) ||
+ copy_from_user(exponent, mex->b_key, mod_len) ||
+ copy_from_user(message, mex->inputdata, mod_len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type4 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ unsigned char *p, *q, *dp, *dq, *u, *inp;
+ int mod_len, short_len, long_len;
+
+ mod_len = crt->inputdatalength;
+ short_len = mod_len / 2;
+ long_len = mod_len / 2 + 8;
+
+ if (mod_len <= 128) {
+ struct type4_scr *scr = ap_msg->message;
+ memset(scr, 0, sizeof(*scr));
+ ap_msg->length = sizeof(*scr);
+ scr->header.msg_type_code = TYPE4_TYPE_CODE;
+ scr->header.request_code = TYPE4_REQU_CODE;
+ scr->header.msg_fmt = TYPE4_SCR_FMT;
+ scr->header.msg_len = sizeof(*scr);
+ p = scr->p + sizeof(scr->p) - long_len;
+ q = scr->q + sizeof(scr->q) - short_len;
+ dp = scr->dp + sizeof(scr->dp) - long_len;
+ dq = scr->dq + sizeof(scr->dq) - short_len;
+ u = scr->u + sizeof(scr->u) - long_len;
+ inp = scr->message + sizeof(scr->message) - mod_len;
+ } else {
+ struct type4_lcr *lcr = ap_msg->message;
+ memset(lcr, 0, sizeof(*lcr));
+ ap_msg->length = sizeof(*lcr);
+ lcr->header.msg_type_code = TYPE4_TYPE_CODE;
+ lcr->header.request_code = TYPE4_REQU_CODE;
+ lcr->header.msg_fmt = TYPE4_LCR_FMT;
+ lcr->header.msg_len = sizeof(*lcr);
+ p = lcr->p + sizeof(lcr->p) - long_len;
+ q = lcr->q + sizeof(lcr->q) - short_len;
+ dp = lcr->dp + sizeof(lcr->dp) - long_len;
+ dq = lcr->dq + sizeof(lcr->dq) - short_len;
+ u = lcr->u + sizeof(lcr->u) - long_len;
+ inp = lcr->message + sizeof(lcr->message) - mod_len;
+ }
+
+ if (copy_from_user(p, crt->np_prime, long_len) ||
+ copy_from_user(q, crt->nq_prime, short_len) ||
+ copy_from_user(dp, crt->bp_key, long_len) ||
+ copy_from_user(dq, crt->bq_key, short_len) ||
+ copy_from_user(u, crt->u_mult_inv, long_len) ||
+ copy_from_user(inp, crt->inputdata, mod_len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Copy results from a type 84 reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static inline int convert_type84(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ struct type84_hdr *t84h = reply->message;
+ char *data;
+
+ if (t84h->len < sizeof(*t84h) + outputdatalength) {
+ /* The result is too short, the PCICA card may not do that.. */
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+ BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
+ data = reply->message + t84h->len - outputdatalength;
+ if (copy_to_user(outputdata, data, outputdatalength))
+ return -EFAULT;
+ return 0;
+}
+
+static int convert_response(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *) reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return convert_error(zdev, reply);
+ case TYPE84_RSP_CODE:
+ return convert_type84(zdev, reply,
+ outputdata, outputdatalength);
+ default: /* Unknown response type, this should NEVER EVER happen */
+ PRINTK("Unrecognized Message Header: %08x%08x\n",
+ *(unsigned int *) reply->message,
+ *(unsigned int *) (reply->message+4));
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_pcica_receive(struct ap_device *ap_dev,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct type84_hdr *t84h = reply->message;
+ int length;
+
+ /* Copy the reply message to the request message buffer. */
+ if (IS_ERR(reply))
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ else if (t84h->code == TYPE84_RSP_CODE) {
+ length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len);
+ memcpy(msg->message, reply->message, length);
+ } else
+ memcpy(msg->message, reply->message, sizeof error_reply);
+ complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the PCICA
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCICA device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo *mex)
+{
+ struct ap_message ap_msg;
+ struct completion work;
+ int rc;
+
+ ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &work;
+ rc = ICAMEX_msg_to_type4MEX_msg(zdev, &ap_msg, mex);
+ if (rc)
+ goto out_free;
+ init_completion(&work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &work, PCICA_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response(zdev, &ap_msg, mex->outputdata,
+ mex->outputdatalength);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ kfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCICA
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCICA device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ struct ap_message ap_msg;
+ struct completion work;
+ int rc;
+
+ ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &work;
+ rc = ICACRT_msg_to_type4CRT_msg(zdev, &ap_msg, crt);
+ if (rc)
+ goto out_free;
+ init_completion(&work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &work, PCICA_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response(zdev, &ap_msg, crt->outputdata,
+ crt->outputdatalength);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ kfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The crypto operations for a PCICA card.
+ */
+static struct zcrypt_ops zcrypt_pcica_ops = {
+ .rsa_modexpo = zcrypt_pcica_modexpo,
+ .rsa_modexpo_crt = zcrypt_pcica_modexpo_crt,
+};
+
+/**
+ * Probe function for PCICA cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_pcica_probe(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev;
+ int rc;
+
+ zdev = zcrypt_device_alloc(PCICA_MAX_RESPONSE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->ap_dev = ap_dev;
+ zdev->ops = &zcrypt_pcica_ops;
+ zdev->online = 1;
+ zdev->user_space_type = ZCRYPT_PCICA;
+ zdev->type_string = "PCICA";
+ zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
+ zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
+ zdev->speed_rating = PCICA_SPEED_RATING;
+ ap_dev->reply = &zdev->reply;
+ ap_dev->private = zdev;
+ rc = zcrypt_device_register(zdev);
+ if (rc)
+ goto out_free;
+ return 0;
+
+out_free:
+ ap_dev->private = NULL;
+ zcrypt_device_free(zdev);
+ return rc;
+}
+
+/**
+ * This is called to remove the extended PCICA driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_pcica_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev = ap_dev->private;
+
+ zcrypt_device_unregister(zdev);
+}
+
+int __init zcrypt_pcica_init(void)
+{
+ return ap_driver_register(&zcrypt_pcica_driver, THIS_MODULE, "pcica");
+}
+
+void zcrypt_pcica_exit(void)
+{
+ ap_driver_unregister(&zcrypt_pcica_driver);
+}
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+module_init(zcrypt_pcica_init);
+module_exit(zcrypt_pcica_exit);
+#endif
diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h
new file mode 100644
index 00000000000..3be11187f6d
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcica.h
@@ -0,0 +1,117 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_pcica.h
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_PCICA_H_
+#define _ZCRYPT_PCICA_H_
+
+/**
+ * The type 4 message family is associated with a PCICA card.
+ *
+ * The four members of the family are described below.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type4_hdr {
+ unsigned char reserved1;
+ unsigned char msg_type_code; /* 0x04 */
+ unsigned short msg_len;
+ unsigned char request_code; /* 0x40 */
+ unsigned char msg_fmt;
+ unsigned short reserved2;
+} __attribute__((packed));
+
+#define TYPE4_TYPE_CODE 0x04
+#define TYPE4_REQU_CODE 0x40
+
+#define TYPE4_SME_FMT 0x00
+#define TYPE4_LME_FMT 0x10
+#define TYPE4_SCR_FMT 0x40
+#define TYPE4_LCR_FMT 0x50
+
+/* Mod-Exp, with a small modulus */
+struct type4_sme {
+ struct type4_hdr header;
+ unsigned char message[128];
+ unsigned char exponent[128];
+ unsigned char modulus[128];
+} __attribute__((packed));
+
+/* Mod-Exp, with a large modulus */
+struct type4_lme {
+ struct type4_hdr header;
+ unsigned char message[256];
+ unsigned char exponent[256];
+ unsigned char modulus[256];
+} __attribute__((packed));
+
+/* CRT, with a small modulus */
+struct type4_scr {
+ struct type4_hdr header;
+ unsigned char message[128];
+ unsigned char dp[72];
+ unsigned char dq[64];
+ unsigned char p[72];
+ unsigned char q[64];
+ unsigned char u[72];
+} __attribute__((packed));
+
+/* CRT, with a large modulus */
+struct type4_lcr {
+ struct type4_hdr header;
+ unsigned char message[256];
+ unsigned char dp[136];
+ unsigned char dq[128];
+ unsigned char p[136];
+ unsigned char q[128];
+ unsigned char u[136];
+} __attribute__((packed));
+
+/**
+ * The type 84 response family is associated with a PCICA card.
+ *
+ * Note that all unsigned char arrays are right-justified and left-padded
+ * with zeroes.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+
+struct type84_hdr {
+ unsigned char reserved1;
+ unsigned char code;
+ unsigned short len;
+ unsigned char reserved2[4];
+} __attribute__((packed));
+
+#define TYPE84_RSP_CODE 0x84
+
+int zcrypt_pcica_init(void);
+void zcrypt_pcica_exit(void);
+
+#endif /* _ZCRYPT_PCICA_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
new file mode 100644
index 00000000000..f295a403b29
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -0,0 +1,630 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_pcicc.c
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_pcicc.h"
+#include "zcrypt_cca_key.h"
+
+#define PCICC_MIN_MOD_SIZE 64 /* 512 bits */
+#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
+#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
+
+/**
+ * PCICC cards need a speed rating of 0. This keeps them at the end of
+ * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
+ * used if no other cards are present because they are slow and can only
+ * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded
+ * requests are rejected. The modexpo function encrypts PKCS12 padded data
+ * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption
+ * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts
+ * the data in the assumption that its PKCS12 encrypted data.
+ */
+#define PCICC_SPEED_RATING 0
+
+#define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */
+#define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */
+
+#define PCICC_CLEANUP_TIME (15*HZ)
+
+static struct ap_device_id zcrypt_pcicc_ids[] = {
+ { AP_DEVICE(AP_DEVICE_TYPE_PCICC) },
+ { /* end of list */ },
+};
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
+ "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+#endif
+
+static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
+static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
+static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
+ struct ap_message *);
+
+static struct ap_driver zcrypt_pcicc_driver = {
+ .probe = zcrypt_pcicc_probe,
+ .remove = zcrypt_pcicc_remove,
+ .receive = zcrypt_pcicc_receive,
+ .ids = zcrypt_pcicc_ids,
+};
+
+/**
+ * The following is used to initialize the CPRB passed to the PCICC card
+ * in a type6 message. The 3 fields that must be filled in at execution
+ * time are req_parml, rpl_parml and usage_domain. Note that all three
+ * fields are *little*-endian. Actually, everything about this interface
+ * is ascii/little-endian, since the device has 'Intel inside'.
+ *
+ * The CPRB is followed immediately by the parm block.
+ * The parm block contains:
+ * - function code ('PD' 0x5044 or 'PK' 0x504B)
+ * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD')
+ * - VUD block
+ */
+static struct CPRB static_cprb = {
+ .cprb_len = __constant_cpu_to_le16(0x0070),
+ .cprb_ver_id = 0x41,
+ .func_id = {0x54,0x32},
+ .checkpoint_flag= 0x01,
+ .svr_namel = __constant_cpu_to_le16(0x0008),
+ .svr_name = {'I','C','S','F',' ',' ',' ',' '}
+};
+
+/**
+ * Check the message for PKCS11 padding.
+ */
+static inline int is_PKCS11_padded(unsigned char *buffer, int length)
+{
+ int i;
+ if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
+ return 0;
+ for (i = 2; i < length; i++)
+ if (buffer[i] != 0xFF)
+ break;
+ if (i < 10 || i == length)
+ return 0;
+ if (buffer[i] != 0x00)
+ return 0;
+ return 1;
+}
+
+/**
+ * Check the message for PKCS12 padding.
+ */
+static inline int is_PKCS12_padded(unsigned char *buffer, int length)
+{
+ int i;
+ if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
+ return 0;
+ for (i = 2; i < length; i++)
+ if (buffer[i] == 0x00)
+ break;
+ if ((i < 10) || (i == length))
+ return 0;
+ if (buffer[i] != 0x00)
+ return 0;
+ return 1;
+}
+
+/**
+ * Convert a ICAMEX message to a type6 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo *mex)
+{
+ static struct type6_hdr static_type6_hdr = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
+ 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
+ .function_code = {'P','K'},
+ };
+ static struct function_and_rules_block static_pke_function_and_rules ={
+ .function_code = {'P','K'},
+ .ulen = __constant_cpu_to_le16(10),
+ .only_rule = {'P','K','C','S','-','1','.','2'}
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRB cprb;
+ struct function_and_rules_block fr;
+ unsigned short length;
+ char text[0];
+ } __attribute__((packed)) *msg = ap_msg->message;
+ int vud_len, pad_len, size;
+
+ /* VUD.ciphertext */
+ if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
+ return -EFAULT;
+
+ if (is_PKCS11_padded(msg->text, mex->inputdatalength))
+ return -EINVAL;
+
+ /* static message header and f&r */
+ msg->hdr = static_type6_hdr;
+ msg->fr = static_pke_function_and_rules;
+
+ if (is_PKCS12_padded(msg->text, mex->inputdatalength)) {
+ /* strip the padding and adjust the data length */
+ pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3;
+ if (pad_len <= 9 || pad_len >= mex->inputdatalength)
+ return -ENODEV;
+ vud_len = mex->inputdatalength - pad_len;
+ memmove(msg->text, msg->text + pad_len, vud_len);
+ msg->length = cpu_to_le16(vud_len + 2);
+
+ /* Set up key after the variable length text. */
+ size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + vud_len; /* total size of msg */
+ } else {
+ vud_len = mex->inputdatalength;
+ msg->length = cpu_to_le16(2 + vud_len);
+
+ msg->hdr.function_code[1] = 'D';
+ msg->fr.function_code[1] = 'D';
+
+ /* Set up key after the variable length text. */
+ size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + vud_len; /* total size of msg */
+ }
+
+ /* message header, cprb and f&r */
+ msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
+ msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
+
+ msg->cprb = static_cprb;
+ msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) -
+ sizeof(msg->cprb));
+ msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1);
+
+ ap_msg->length = (size + 3) & -4;
+ return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type6 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @zreq: crypto request pointer
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ static struct type6_hdr static_type6_hdr = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
+ 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
+ .function_code = {'P','D'},
+ };
+ static struct function_and_rules_block static_pkd_function_and_rules ={
+ .function_code = {'P','D'},
+ .ulen = __constant_cpu_to_le16(10),
+ .only_rule = {'P','K','C','S','-','1','.','2'}
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRB cprb;
+ struct function_and_rules_block fr;
+ unsigned short length;
+ char text[0];
+ } __attribute__((packed)) *msg = ap_msg->message;
+ int size;
+
+ /* VUD.ciphertext */
+ msg->length = cpu_to_le16(2 + crt->inputdatalength);
+ if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
+ return -EFAULT;
+
+ if (is_PKCS11_padded(msg->text, crt->inputdatalength))
+ return -EINVAL;
+
+ /* Set up key after the variable length text. */
+ size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
+
+ /* message header, cprb and f&r */
+ msg->hdr = static_type6_hdr;
+ msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
+ msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
+
+ msg->cprb = static_cprb;
+ msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprb.req_parml = msg->cprb.rpl_parml =
+ cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb));
+
+ msg->fr = static_pkd_function_and_rules;
+
+ ap_msg->length = (size + 3) & -4;
+ return 0;
+}
+
+/**
+ * Copy results from a type 86 reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+struct type86_reply {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct CPRB cprb;
+ unsigned char pad[4]; /* 4 byte function code/rules block ? */
+ unsigned short length;
+ char text[0];
+} __attribute__((packed));
+
+static int convert_type86(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ static unsigned char static_pad[] = {
+ 0x00,0x02,
+ 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
+ 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
+ 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
+ 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
+ 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
+ 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
+ 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
+ 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
+ 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
+ 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
+ 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
+ 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
+ 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
+ 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
+ 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
+ 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
+ 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
+ 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
+ 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
+ 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
+ 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
+ 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
+ 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
+ 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
+ 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
+ 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
+ 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
+ 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
+ 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
+ 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
+ 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
+ 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
+ };
+ struct type86_reply *msg = reply->message;
+ unsigned short service_rc, service_rs;
+ unsigned int reply_len, pad_len;
+ char *data;
+
+ service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
+ if (unlikely(service_rc != 0)) {
+ service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
+ if (service_rc == 8 && service_rs == 66) {
+ PDEBUG("Bad block format on PCICC\n");
+ return -EINVAL;
+ }
+ if (service_rc == 8 && service_rs == 65) {
+ PDEBUG("Probably an even modulus on PCICC\n");
+ return -EINVAL;
+ }
+ if (service_rc == 8 && service_rs == 770) {
+ PDEBUG("Invalid key length on PCICC\n");
+ zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
+ return -EAGAIN;
+ }
+ if (service_rc == 8 && service_rs == 783) {
+ PDEBUG("Extended bitlengths not enabled on PCICC\n");
+ zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
+ return -EAGAIN;
+ }
+ PRINTK("Unknown service rc/rs (PCICC): %d/%d\n",
+ service_rc, service_rs);
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+ data = msg->text;
+ reply_len = le16_to_cpu(msg->length) - 2;
+ if (reply_len > outputdatalength)
+ return -EINVAL;
+ /**
+ * For all encipher requests, the length of the ciphertext (reply_len)
+ * will always equal the modulus length. For MEX decipher requests
+ * the output needs to get padded. Minimum pad size is 10.
+ *
+ * Currently, the cases where padding will be added is for:
+ * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
+ * ZERO-PAD and CRT is only supported for PKD requests)
+ * - PCICC, always
+ */
+ pad_len = outputdatalength - reply_len;
+ if (pad_len > 0) {
+ if (pad_len < 10)
+ return -EINVAL;
+ /* 'restore' padding left in the PCICC/PCIXCC card. */
+ if (copy_to_user(outputdata, static_pad, pad_len - 1))
+ return -EFAULT;
+ if (put_user(0, outputdata + pad_len - 1))
+ return -EFAULT;
+ }
+ /* Copy the crypto response to user space. */
+ if (copy_to_user(outputdata + pad_len, data, reply_len))
+ return -EFAULT;
+ return 0;
+}
+
+static int convert_response(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ struct type86_reply *msg = reply->message;
+
+ /* Response type byte is the second byte in the response. */
+ switch (msg->hdr.type) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return convert_error(zdev, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code)
+ return convert_error(zdev, reply);
+ if (msg->cprb.cprb_ver_id == 0x01)
+ return convert_type86(zdev, reply,
+ outputdata, outputdatalength);
+ /* no break, incorrect cprb version is an unknown response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ PRINTK("Unrecognized Message Header: %08x%08x\n",
+ *(unsigned int *) reply->message,
+ *(unsigned int *) (reply->message+4));
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_pcicc_receive(struct ap_device *ap_dev,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct type86_reply *t86r = reply->message;
+ int length;
+
+ /* Copy the reply message to the request message buffer. */
+ if (IS_ERR(reply))
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ else if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ t86r->cprb.cprb_ver_id == 0x01) {
+ length = sizeof(struct type86_reply) + t86r->length - 2;
+ length = min(PCICC_MAX_RESPONSE_SIZE, length);
+ memcpy(msg->message, reply->message, length);
+ } else
+ memcpy(msg->message, reply->message, sizeof error_reply);
+ complete((struct completion *) msg->private);
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the PCICC
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCICC device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo *mex)
+{
+ struct ap_message ap_msg;
+ struct completion work;
+ int rc;
+
+ ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.length = PAGE_SIZE;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &work;
+ rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex);
+ if (rc)
+ goto out_free;
+ init_completion(&work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &work, PCICC_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response(zdev, &ap_msg, mex->outputdata,
+ mex->outputdatalength);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ free_page((unsigned long) ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCICC
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCICC device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ struct ap_message ap_msg;
+ struct completion work;
+ int rc;
+
+ ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.length = PAGE_SIZE;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &work;
+ rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt);
+ if (rc)
+ goto out_free;
+ init_completion(&work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &work, PCICC_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response(zdev, &ap_msg, crt->outputdata,
+ crt->outputdatalength);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ free_page((unsigned long) ap_msg.message);
+ return rc;
+}
+
+/**
+ * The crypto operations for a PCICC card.
+ */
+static struct zcrypt_ops zcrypt_pcicc_ops = {
+ .rsa_modexpo = zcrypt_pcicc_modexpo,
+ .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt,
+};
+
+/**
+ * Probe function for PCICC cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev;
+ int rc;
+
+ zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->ap_dev = ap_dev;
+ zdev->ops = &zcrypt_pcicc_ops;
+ zdev->online = 1;
+ zdev->user_space_type = ZCRYPT_PCICC;
+ zdev->type_string = "PCICC";
+ zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
+ zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
+ zdev->speed_rating = PCICC_SPEED_RATING;
+ ap_dev->reply = &zdev->reply;
+ ap_dev->private = zdev;
+ rc = zcrypt_device_register(zdev);
+ if (rc)
+ goto out_free;
+ return 0;
+
+ out_free:
+ ap_dev->private = NULL;
+ zcrypt_device_free(zdev);
+ return rc;
+}
+
+/**
+ * This is called to remove the extended PCICC driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_pcicc_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev = ap_dev->private;
+
+ zcrypt_device_unregister(zdev);
+}
+
+int __init zcrypt_pcicc_init(void)
+{
+ return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc");
+}
+
+void zcrypt_pcicc_exit(void)
+{
+ ap_driver_unregister(&zcrypt_pcicc_driver);
+}
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+module_init(zcrypt_pcicc_init);
+module_exit(zcrypt_pcicc_exit);
+#endif
diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h
new file mode 100644
index 00000000000..6d4454846c8
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcicc.h
@@ -0,0 +1,176 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_pcicc.h
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_PCICC_H_
+#define _ZCRYPT_PCICC_H_
+
+/**
+ * The type 6 message family is associated with PCICC or PCIXCC cards.
+ *
+ * It contains a message header followed by a CPRB, both of which
+ * are described below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type6_hdr {
+ unsigned char reserved1; /* 0x00 */
+ unsigned char type; /* 0x06 */
+ unsigned char reserved2[2]; /* 0x0000 */
+ unsigned char right[4]; /* 0x00000000 */
+ unsigned char reserved3[2]; /* 0x0000 */
+ unsigned char reserved4[2]; /* 0x0000 */
+ unsigned char apfs[4]; /* 0x00000000 */
+ unsigned int offset1; /* 0x00000058 (offset to CPRB) */
+ unsigned int offset2; /* 0x00000000 */
+ unsigned int offset3; /* 0x00000000 */
+ unsigned int offset4; /* 0x00000000 */
+ unsigned char agent_id[16]; /* PCICC: */
+ /* 0x0100 */
+ /* 0x4343412d4150504c202020 */
+ /* 0x010101 */
+ /* PCIXCC: */
+ /* 0x4341000000000000 */
+ /* 0x0000000000000000 */
+ unsigned char rqid[2]; /* rqid. internal to 603 */
+ unsigned char reserved5[2]; /* 0x0000 */
+ unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
+ unsigned char reserved6[2]; /* 0x0000 */
+ unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
+ unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
+ unsigned int ToCardLen3; /* 0x00000000 */
+ unsigned int ToCardLen4; /* 0x00000000 */
+ unsigned int FromCardLen1; /* response buffer length */
+ unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
+ unsigned int FromCardLen3; /* 0x00000000 */
+ unsigned int FromCardLen4; /* 0x00000000 */
+} __attribute__((packed));
+
+/**
+ * CPRB
+ * Note that all shorts, ints and longs are little-endian.
+ * All pointer fields are 32-bits long, and mean nothing
+ *
+ * A request CPRB is followed by a request_parameter_block.
+ *
+ * The request (or reply) parameter block is organized thus:
+ * function code
+ * VUD block
+ * key block
+ */
+struct CPRB {
+ unsigned short cprb_len; /* CPRB length */
+ unsigned char cprb_ver_id; /* CPRB version id. */
+ unsigned char pad_000; /* Alignment pad byte. */
+ unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
+ unsigned char srpi_verb; /* SRPI verb type */
+ unsigned char flags; /* flags */
+ unsigned char func_id[2]; /* function id */
+ unsigned char checkpoint_flag; /* */
+ unsigned char resv2; /* reserved */
+ unsigned short req_parml; /* request parameter buffer */
+ /* length 16-bit little endian */
+ unsigned char req_parmp[4]; /* request parameter buffer *
+ * pointer (means nothing: the *
+ * parameter buffer follows *
+ * the CPRB). */
+ unsigned char req_datal[4]; /* request data buffer */
+ /* length ULELONG */
+ unsigned char req_datap[4]; /* request data buffer */
+ /* pointer */
+ unsigned short rpl_parml; /* reply parameter buffer */
+ /* length 16-bit little endian */
+ unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
+ unsigned char rpl_parmp[4]; /* reply parameter buffer *
+ * pointer (means nothing: the *
+ * parameter buffer follows *
+ * the CPRB). */
+ unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
+ unsigned char rpl_datap[4]; /* reply data buffer */
+ /* pointer */
+ unsigned short ccp_rscode; /* server reason code ULESHORT */
+ unsigned short ccp_rtcode; /* server return code ULESHORT */
+ unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
+ unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
+ unsigned char repd_datal[4]; /* replied data length ULELONG */
+ unsigned char req_pc[2]; /* PC identifier */
+ unsigned char res_origin[8]; /* resource origin */
+ unsigned char mac_value[8]; /* Mac Value */
+ unsigned char logon_id[8]; /* Logon Identifier */
+ unsigned char usage_domain[2]; /* cdx */
+ unsigned char resv3[18]; /* reserved for requestor */
+ unsigned short svr_namel; /* server name length ULESHORT */
+ unsigned char svr_name[8]; /* server name */
+} __attribute__((packed));
+
+/**
+ * The type 86 message family is associated with PCICC and PCIXCC cards.
+ *
+ * It contains a message header followed by a CPRB. The CPRB is
+ * the same as the request CPRB, which is described above.
+ *
+ * If format is 1, an error condition exists and no data beyond
+ * the 8-byte message header is of interest.
+ *
+ * The non-error message is shown below.
+ *
+ * Note that all reserved fields must be zeroes.
+ */
+struct type86_hdr {
+ unsigned char reserved1; /* 0x00 */
+ unsigned char type; /* 0x86 */
+ unsigned char format; /* 0x01 (error) or 0x02 (ok) */
+ unsigned char reserved2; /* 0x00 */
+ unsigned char reply_code; /* reply code (see above) */
+ unsigned char reserved3[3]; /* 0x000000 */
+} __attribute__((packed));
+
+#define TYPE86_RSP_CODE 0x86
+#define TYPE86_FMT2 0x02
+
+struct type86_fmt2_ext {
+ unsigned char reserved[4]; /* 0x00000000 */
+ unsigned char apfs[4]; /* final status */
+ unsigned int count1; /* length of CPRB + parameters */
+ unsigned int offset1; /* offset to CPRB */
+ unsigned int count2; /* 0x00000000 */
+ unsigned int offset2; /* db offset 0x00000000 for PKD */
+ unsigned int count3; /* 0x00000000 */
+ unsigned int offset3; /* 0x00000000 */
+ unsigned int count4; /* 0x00000000 */
+ unsigned int offset4; /* 0x00000000 */
+} __attribute__((packed));
+
+struct function_and_rules_block {
+ unsigned char function_code[2];
+ unsigned short ulen;
+ unsigned char only_rule[8];
+} __attribute__((packed));
+
+int zcrypt_pcicc_init(void);
+void zcrypt_pcicc_exit(void);
+
+#endif /* _ZCRYPT_PCICC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
new file mode 100644
index 00000000000..2da8b938140
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -0,0 +1,951 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_pcixcc.c
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_error.h"
+#include "zcrypt_pcicc.h"
+#include "zcrypt_pcixcc.h"
+#include "zcrypt_cca_key.h"
+
+#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
+#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
+#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
+
+#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */
+#define PCIXCC_MCL3_SPEED_RATING 7870
+#define CEX2C_SPEED_RATING 8540
+
+#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
+#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
+
+#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
+#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
+#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
+#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
+
+#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
+
+#define PCIXCC_CLEANUP_TIME (15*HZ)
+
+#define CEIL4(x) ((((x)+3)/4)*4)
+
+struct response_type {
+ struct completion work;
+ int type;
+};
+#define PCIXCC_RESPONSE_TYPE_ICA 0
+#define PCIXCC_RESPONSE_TYPE_XCRB 1
+
+static struct ap_device_id zcrypt_pcixcc_ids[] = {
+ { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
+ { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
+ { /* end of list */ },
+};
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
+ "Copyright 2001, 2006 IBM Corporation");
+MODULE_LICENSE("GPL");
+#endif
+
+static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
+static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
+static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
+ struct ap_message *);
+
+static struct ap_driver zcrypt_pcixcc_driver = {
+ .probe = zcrypt_pcixcc_probe,
+ .remove = zcrypt_pcixcc_remove,
+ .receive = zcrypt_pcixcc_receive,
+ .ids = zcrypt_pcixcc_ids,
+};
+
+/**
+ * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
+ * card in a type6 message. The 3 fields that must be filled in at execution
+ * time are req_parml, rpl_parml and usage_domain.
+ * Everything about this interface is ascii/big-endian, since the
+ * device does *not* have 'Intel inside'.
+ *
+ * The CPRBX is followed immediately by the parm block.
+ * The parm block contains:
+ * - function code ('PD' 0x5044 or 'PK' 0x504B)
+ * - rule block (one of:)
+ * + 0x000A 'PKCS-1.2' (MCL2 'PD')
+ * + 0x000A 'ZERO-PAD' (MCL2 'PK')
+ * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
+ * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
+ * - VUD block
+ */
+static struct CPRBX static_cprbx = {
+ .cprb_len = 0x00DC,
+ .cprb_ver_id = 0x02,
+ .func_id = {0x54,0x32},
+};
+
+/**
+ * Convert a ICAMEX message to a type6 MEX message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @mex: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo *mex)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {'C','A',},
+ .function_code = {'P','K'},
+ };
+ static struct function_and_rules_block static_pke_fnr = {
+ .function_code = {'P','K'},
+ .ulen = 10,
+ .only_rule = {'M','R','P',' ',' ',' ',' ',' '}
+ };
+ static struct function_and_rules_block static_pke_fnr_MCL2 = {
+ .function_code = {'P','K'},
+ .ulen = 10,
+ .only_rule = {'Z','E','R','O','-','P','A','D'}
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ struct function_and_rules_block fr;
+ unsigned short length;
+ char text[0];
+ } __attribute__((packed)) *msg = ap_msg->message;
+ int size;
+
+ /* VUD.ciphertext */
+ msg->length = mex->inputdatalength + 2;
+ if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
+ return -EFAULT;
+
+ /* Set up key which is located after the variable length text. */
+ size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + mex->inputdatalength;
+
+ /* message header, cprbx and f&r */
+ msg->hdr = static_type6_hdrX;
+ msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
+ msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+ msg->cprbx = static_cprbx;
+ msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
+
+ msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
+ static_pke_fnr_MCL2 : static_pke_fnr;
+
+ msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+ ap_msg->length = size;
+ return 0;
+}
+
+/**
+ * Convert a ICACRT message to a type6 CRT message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @crt: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ .agent_id = {'C','A',},
+ .function_code = {'P','D'},
+ };
+ static struct function_and_rules_block static_pkd_fnr = {
+ .function_code = {'P','D'},
+ .ulen = 10,
+ .only_rule = {'Z','E','R','O','-','P','A','D'}
+ };
+
+ static struct function_and_rules_block static_pkd_fnr_MCL2 = {
+ .function_code = {'P','D'},
+ .ulen = 10,
+ .only_rule = {'P','K','C','S','-','1','.','2'}
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ struct function_and_rules_block fr;
+ unsigned short length;
+ char text[0];
+ } __attribute__((packed)) *msg = ap_msg->message;
+ int size;
+
+ /* VUD.ciphertext */
+ msg->length = crt->inputdatalength + 2;
+ if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
+ return -EFAULT;
+
+ /* Set up key which is located after the variable length text. */
+ size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
+ if (size < 0)
+ return size;
+ size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
+
+ /* message header, cprbx and f&r */
+ msg->hdr = static_type6_hdrX;
+ msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
+ msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+
+ msg->cprbx = static_cprbx;
+ msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+ msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
+ size - sizeof(msg->hdr) - sizeof(msg->cprbx);
+
+ msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
+ static_pkd_fnr_MCL2 : static_pkd_fnr;
+
+ ap_msg->length = size;
+ return 0;
+}
+
+/**
+ * Convert a XCRB message to a type6 CPRB message.
+ *
+ * @zdev: crypto device pointer
+ * @ap_msg: pointer to AP message
+ * @xcRB: pointer to user input data
+ *
+ * Returns 0 on success or -EFAULT.
+ */
+struct type86_fmt2_msg {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+} __attribute__((packed));
+
+static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
+ struct ap_message *ap_msg,
+ struct ica_xcRB *xcRB)
+{
+ static struct type6_hdr static_type6_hdrX = {
+ .type = 0x06,
+ .offset1 = 0x00000058,
+ };
+ struct {
+ struct type6_hdr hdr;
+ struct ica_CPRBX cprbx;
+ } __attribute__((packed)) *msg = ap_msg->message;
+
+ int rcblen = CEIL4(xcRB->request_control_blk_length);
+ int replylen;
+ char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
+ char *function_code;
+
+ /* length checks */
+ ap_msg->length = sizeof(struct type6_hdr) +
+ CEIL4(xcRB->request_control_blk_length) +
+ xcRB->request_data_length;
+ if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) {
+ PRINTK("Combined message is too large (%ld/%d/%d).\n",
+ sizeof(struct type6_hdr),
+ xcRB->request_control_blk_length,
+ xcRB->request_data_length);
+ return -EFAULT;
+ }
+ if (CEIL4(xcRB->reply_control_blk_length) >
+ PCIXCC_MAX_XCRB_REPLY_SIZE) {
+ PDEBUG("Reply CPRB length is too large (%d).\n",
+ xcRB->request_control_blk_length);
+ return -EFAULT;
+ }
+ if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) {
+ PDEBUG("Reply data block length is too large (%d).\n",
+ xcRB->reply_data_length);
+ return -EFAULT;
+ }
+ replylen = CEIL4(xcRB->reply_control_blk_length) +
+ CEIL4(xcRB->reply_data_length) +
+ sizeof(struct type86_fmt2_msg);
+ if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
+ PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE"
+ " (%d/%d/%d).\n",
+ sizeof(struct type86_fmt2_msg),
+ xcRB->reply_control_blk_length,
+ xcRB->reply_data_length);
+ xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
+ (sizeof(struct type86_fmt2_msg) +
+ CEIL4(xcRB->reply_data_length));
+ PDEBUG("Capping Reply CPRB length at %d\n",
+ xcRB->reply_control_blk_length);
+ }
+
+ /* prepare type6 header */
+ msg->hdr = static_type6_hdrX;
+ memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
+ msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
+ if (xcRB->request_data_length) {
+ msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
+ msg->hdr.ToCardLen2 = xcRB->request_data_length;
+ }
+ msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
+ msg->hdr.FromCardLen2 = xcRB->reply_data_length;
+
+ /* prepare CPRB */
+ if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
+ xcRB->request_control_blk_length))
+ return -EFAULT;
+ if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
+ xcRB->request_control_blk_length) {
+ PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len,
+ xcRB->request_control_blk_length);
+ return -EFAULT;
+ }
+ function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
+ memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
+
+ /* copy data block */
+ if (xcRB->request_data_length &&
+ copy_from_user(req_data, xcRB->request_data_address,
+ xcRB->request_data_length))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Copy results from a type 86 ICA reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @data: pointer to user output data
+ * @length: size of user output data
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+struct type86x_reply {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ struct CPRBX cprbx;
+ unsigned char pad[4]; /* 4 byte function code/rules block ? */
+ unsigned short length;
+ char text[0];
+} __attribute__((packed));
+
+static int convert_type86_ica(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ static unsigned char static_pad[] = {
+ 0x00,0x02,
+ 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
+ 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
+ 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
+ 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
+ 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
+ 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
+ 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
+ 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
+ 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
+ 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
+ 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
+ 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
+ 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
+ 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
+ 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
+ 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
+ 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
+ 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
+ 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
+ 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
+ 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
+ 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
+ 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
+ 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
+ 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
+ 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
+ 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
+ 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
+ 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
+ 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
+ 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
+ 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
+ };
+ struct type86x_reply *msg = reply->message;
+ unsigned short service_rc, service_rs;
+ unsigned int reply_len, pad_len;
+ char *data;
+
+ service_rc = msg->cprbx.ccp_rtcode;
+ if (unlikely(service_rc != 0)) {
+ service_rs = msg->cprbx.ccp_rscode;
+ if (service_rc == 8 && service_rs == 66) {
+ PDEBUG("Bad block format on PCIXCC/CEX2C\n");
+ return -EINVAL;
+ }
+ if (service_rc == 8 && service_rs == 65) {
+ PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n");
+ return -EINVAL;
+ }
+ if (service_rc == 8 && service_rs == 770) {
+ PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
+ zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+ return -EAGAIN;
+ }
+ if (service_rc == 8 && service_rs == 783) {
+ PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
+ zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+ return -EAGAIN;
+ }
+ PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
+ service_rc, service_rs);
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+ data = msg->text;
+ reply_len = msg->length - 2;
+ if (reply_len > outputdatalength)
+ return -EINVAL;
+ /**
+ * For all encipher requests, the length of the ciphertext (reply_len)
+ * will always equal the modulus length. For MEX decipher requests
+ * the output needs to get padded. Minimum pad size is 10.
+ *
+ * Currently, the cases where padding will be added is for:
+ * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
+ * ZERO-PAD and CRT is only supported for PKD requests)
+ * - PCICC, always
+ */
+ pad_len = outputdatalength - reply_len;
+ if (pad_len > 0) {
+ if (pad_len < 10)
+ return -EINVAL;
+ /* 'restore' padding left in the PCICC/PCIXCC card. */
+ if (copy_to_user(outputdata, static_pad, pad_len - 1))
+ return -EFAULT;
+ if (put_user(0, outputdata + pad_len - 1))
+ return -EFAULT;
+ }
+ /* Copy the crypto response to user space. */
+ if (copy_to_user(outputdata + pad_len, data, reply_len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Copy results from a type 86 XCRB reply message back to user space.
+ *
+ * @zdev: crypto device pointer
+ * @reply: reply AP message.
+ * @xcRB: pointer to XCRB
+ *
+ * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
+ */
+static int convert_type86_xcrb(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ struct ica_xcRB *xcRB)
+{
+ struct type86_fmt2_msg *msg = reply->message;
+ char *data = reply->message;
+
+ /* Copy CPRB to user */
+ if (copy_to_user(xcRB->reply_control_blk_addr,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
+ return -EFAULT;
+ xcRB->reply_control_blk_length = msg->fmt2.count1;
+
+ /* Copy data buffer to user */
+ if (msg->fmt2.count2)
+ if (copy_to_user(xcRB->reply_data_addr,
+ data + msg->fmt2.offset2, msg->fmt2.count2))
+ return -EFAULT;
+ xcRB->reply_data_length = msg->fmt2.count2;
+ return 0;
+}
+
+static int convert_response_ica(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
+{
+ struct type86x_reply *msg = reply->message;
+
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *) reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ return convert_error(zdev, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code)
+ return convert_error(zdev, reply);
+ if (msg->cprbx.cprb_ver_id == 0x02)
+ return convert_type86_ica(zdev, reply,
+ outputdata, outputdatalength);
+ /* no break, incorrect cprb version is an unknown response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ PRINTK("Unrecognized Message Header: %08x%08x\n",
+ *(unsigned int *) reply->message,
+ *(unsigned int *) (reply->message+4));
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+static int convert_response_xcrb(struct zcrypt_device *zdev,
+ struct ap_message *reply,
+ struct ica_xcRB *xcRB)
+{
+ struct type86x_reply *msg = reply->message;
+
+ /* Response type byte is the second byte in the response. */
+ switch (((unsigned char *) reply->message)[1]) {
+ case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
+ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ return convert_error(zdev, reply);
+ case TYPE86_RSP_CODE:
+ if (msg->hdr.reply_code) {
+ memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
+ return convert_error(zdev, reply);
+ }
+ if (msg->cprbx.cprb_ver_id == 0x02)
+ return convert_type86_xcrb(zdev, reply, xcRB);
+ /* no break, incorrect cprb version is an unknown response */
+ default: /* Unknown response type, this should NEVER EVER happen */
+ PRINTK("Unrecognized Message Header: %08x%08x\n",
+ *(unsigned int *) reply->message,
+ *(unsigned int *) (reply->message+4));
+ xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ zdev->online = 0;
+ return -EAGAIN; /* repeat the request on a different device. */
+ }
+}
+
+/**
+ * This function is called from the AP bus code after a crypto request
+ * "msg" has finished with the reply message "reply".
+ * It is called from tasklet context.
+ * @ap_dev: pointer to the AP device
+ * @msg: pointer to the AP message
+ * @reply: pointer to the AP reply message
+ */
+static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
+ struct ap_message *msg,
+ struct ap_message *reply)
+{
+ static struct error_hdr error_reply = {
+ .type = TYPE82_RSP_CODE,
+ .reply_code = REP82_ERROR_MACHINE_FAILURE,
+ };
+ struct response_type *resp_type =
+ (struct response_type *) msg->private;
+ struct type86x_reply *t86r = reply->message;
+ int length;
+
+ /* Copy the reply message to the request message buffer. */
+ if (IS_ERR(reply))
+ memcpy(msg->message, &error_reply, sizeof(error_reply));
+ else if (t86r->hdr.type == TYPE86_RSP_CODE &&
+ t86r->cprbx.cprb_ver_id == 0x02) {
+ switch (resp_type->type) {
+ case PCIXCC_RESPONSE_TYPE_ICA:
+ length = sizeof(struct type86x_reply)
+ + t86r->length - 2;
+ length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
+ memcpy(msg->message, reply->message, length);
+ break;
+ case PCIXCC_RESPONSE_TYPE_XCRB:
+ length = t86r->fmt2.offset2 + t86r->fmt2.count2;
+ length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
+ memcpy(msg->message, reply->message, length);
+ break;
+ default:
+ PRINTK("Invalid internal response type: %i\n",
+ resp_type->type);
+ memcpy(msg->message, &error_reply,
+ sizeof error_reply);
+ }
+ } else
+ memcpy(msg->message, reply->message, sizeof error_reply);
+ complete(&(resp_type->work));
+}
+
+static atomic_t zcrypt_step = ATOMIC_INIT(0);
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a modexpo request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCIXCC/CEX2C device to the request distributor
+ * @mex: pointer to the modexpo request buffer
+ */
+static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo *mex)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_ICA,
+ };
+ int rc;
+
+ ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &resp_type.work, PCIXCC_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
+ mex->outputdatalength);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ free_page((unsigned long) ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a modexpo_crt request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCIXCC/CEX2C device to the request distributor
+ * @crt: pointer to the modexpoc_crt request buffer
+ */
+static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
+ struct ica_rsa_modexpo_crt *crt)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_ICA,
+ };
+ int rc;
+
+ ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &resp_type.work, PCIXCC_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
+ crt->outputdatalength);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ free_page((unsigned long) ap_msg.message);
+ return rc;
+}
+
+/**
+ * The request distributor calls this function if it picked the PCIXCC/CEX2C
+ * device to handle a send_cprb request.
+ * @zdev: pointer to zcrypt_device structure that identifies the
+ * PCIXCC/CEX2C device to the request distributor
+ * @xcRB: pointer to the send_cprb request buffer
+ */
+long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB)
+{
+ struct ap_message ap_msg;
+ struct response_type resp_type = {
+ .type = PCIXCC_RESPONSE_TYPE_XCRB,
+ };
+ int rc;
+
+ ap_msg.message = (void *) kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
+ if (!ap_msg.message)
+ return -ENOMEM;
+ ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg.private = &resp_type;
+ rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
+ if (rc)
+ goto out_free;
+ init_completion(&resp_type.work);
+ ap_queue_message(zdev->ap_dev, &ap_msg);
+ rc = wait_for_completion_interruptible_timeout(
+ &resp_type.work, PCIXCC_CLEANUP_TIME);
+ if (rc > 0)
+ rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
+ else {
+ /* Signal pending or message timed out. */
+ ap_cancel_message(zdev->ap_dev, &ap_msg);
+ if (rc == 0)
+ /* Message timed out. */
+ rc = -ETIME;
+ }
+out_free:
+ memset(ap_msg.message, 0x0, ap_msg.length);
+ kfree(ap_msg.message);
+ return rc;
+}
+
+/**
+ * The crypto operations for a PCIXCC/CEX2C card.
+ */
+static struct zcrypt_ops zcrypt_pcixcc_ops = {
+ .rsa_modexpo = zcrypt_pcixcc_modexpo,
+ .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
+ .send_cprb = zcrypt_pcixcc_send_cprb,
+};
+
+/**
+ * Micro-code detection function. Its sends a message to a pcixcc card
+ * to find out the microcode level.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
+{
+ static unsigned char msg[] = {
+ 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
+ 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
+ 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
+ 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
+ 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
+ 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
+ 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
+ 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
+ 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
+ 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
+ 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
+ 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
+ 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
+ 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
+ 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
+ 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
+ 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
+ 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
+ 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
+ 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
+ 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
+ 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
+ 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
+ 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
+ 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
+ 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
+ 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
+ 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
+ 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
+ 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
+ 0xF1,0x3D,0x93,0x53
+ };
+ unsigned long long psmid;
+ struct CPRBX *cprbx;
+ char *reply;
+ int rc, i;
+
+ reply = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!reply)
+ return -ENOMEM;
+
+ rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
+ if (rc)
+ goto out_free;
+
+ /* Wait for the test message to complete. */
+ for (i = 0; i < 6; i++) {
+ mdelay(300);
+ rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
+ if (rc == 0 && psmid == 0x0102030405060708ULL)
+ break;
+ }
+
+ if (i >= 6) {
+ /* Got no answer. */
+ rc = -ENODEV;
+ goto out_free;
+ }
+
+ cprbx = (struct CPRBX *) (reply + 48);
+ if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
+ rc = ZCRYPT_PCIXCC_MCL2;
+ else
+ rc = ZCRYPT_PCIXCC_MCL3;
+out_free:
+ free_page((unsigned long) reply);
+ return rc;
+}
+
+/**
+ * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
+ * since the bus_match already checked the hardware type. The PCIXCC
+ * cards come in two flavours: micro code level 2 and micro code level 3.
+ * This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev;
+ int rc;
+
+ zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
+ if (!zdev)
+ return -ENOMEM;
+ zdev->ap_dev = ap_dev;
+ zdev->ops = &zcrypt_pcixcc_ops;
+ zdev->online = 1;
+ if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
+ rc = zcrypt_pcixcc_mcl(ap_dev);
+ if (rc < 0) {
+ zcrypt_device_free(zdev);
+ return rc;
+ }
+ zdev->user_space_type = rc;
+ if (rc == ZCRYPT_PCIXCC_MCL2) {
+ zdev->type_string = "PCIXCC_MCL2";
+ zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
+ zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+ zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+ } else {
+ zdev->type_string = "PCIXCC_MCL3";
+ zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
+ zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
+ zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+ }
+ } else {
+ zdev->user_space_type = ZCRYPT_CEX2C;
+ zdev->type_string = "CEX2C";
+ zdev->speed_rating = CEX2C_SPEED_RATING;
+ zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
+ zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+ }
+ ap_dev->reply = &zdev->reply;
+ ap_dev->private = zdev;
+ rc = zcrypt_device_register(zdev);
+ if (rc)
+ goto out_free;
+ return 0;
+
+ out_free:
+ ap_dev->private = NULL;
+ zcrypt_device_free(zdev);
+ return rc;
+}
+
+/**
+ * This is called to remove the extended PCIXCC/CEX2C driver information
+ * if an AP device is removed.
+ */
+static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
+{
+ struct zcrypt_device *zdev = ap_dev->private;
+
+ zcrypt_device_unregister(zdev);
+}
+
+int __init zcrypt_pcixcc_init(void)
+{
+ return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
+}
+
+void zcrypt_pcixcc_exit(void)
+{
+ ap_driver_unregister(&zcrypt_pcixcc_driver);
+}
+
+#ifndef CONFIG_ZCRYPT_MONOLITHIC
+module_init(zcrypt_pcixcc_init);
+module_exit(zcrypt_pcixcc_exit);
+#endif
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
new file mode 100644
index 00000000000..a78ff307fd1
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -0,0 +1,79 @@
+/*
+ * linux/drivers/s390/crypto/zcrypt_pcixcc.h
+ *
+ * zcrypt 2.1.0
+ *
+ * Copyright (C) 2001, 2006 IBM Corporation
+ * Author(s): Robert Burroughs
+ * Eric Rossman (edrossma@us.ibm.com)
+ *
+ * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _ZCRYPT_PCIXCC_H_
+#define _ZCRYPT_PCIXCC_H_
+
+/**
+ * CPRBX
+ * Note that all shorts and ints are big-endian.
+ * All pointer fields are 16 bytes long, and mean nothing.
+ *
+ * A request CPRB is followed by a request_parameter_block.
+ *
+ * The request (or reply) parameter block is organized thus:
+ * function code
+ * VUD block
+ * key block
+ */
+struct CPRBX {
+ unsigned short cprb_len; /* CPRB length 220 */
+ unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
+ unsigned char pad_000[3]; /* Alignment pad bytes */
+ unsigned char func_id[2]; /* function id 0x5432 */
+ unsigned char cprb_flags[4]; /* Flags */
+ unsigned int req_parml; /* request parameter buffer len */
+ unsigned int req_datal; /* request data buffer */
+ unsigned int rpl_msgbl; /* reply message block length */
+ unsigned int rpld_parml; /* replied parameter block len */
+ unsigned int rpl_datal; /* reply data block len */
+ unsigned int rpld_datal; /* replied data block len */
+ unsigned int req_extbl; /* request extension block len */
+ unsigned char pad_001[4]; /* reserved */
+ unsigned int rpld_extbl; /* replied extension block len */
+ unsigned char req_parmb[16]; /* request parm block 'address' */
+ unsigned char req_datab[16]; /* request data block 'address' */
+ unsigned char rpl_parmb[16]; /* reply parm block 'address' */
+ unsigned char rpl_datab[16]; /* reply data block 'address' */
+ unsigned char req_extb[16]; /* request extension block 'addr'*/
+ unsigned char rpl_extb[16]; /* reply extension block 'addres'*/
+ unsigned short ccp_rtcode; /* server return code */
+ unsigned short ccp_rscode; /* server reason code */
+ unsigned int mac_data_len; /* Mac Data Length */
+ unsigned char logon_id[8]; /* Logon Identifier */
+ unsigned char mac_value[8]; /* Mac Value */
+ unsigned char mac_content_flgs;/* Mac content flag byte */
+ unsigned char pad_002; /* Alignment */
+ unsigned short domain; /* Domain */
+ unsigned char pad_003[12]; /* Domain masks */
+ unsigned char pad_004[36]; /* reserved */
+} __attribute__((packed));
+
+int zcrypt_pcixcc_init(void);
+void zcrypt_pcixcc_exit(void);
+
+#endif /* _ZCRYPT_PCIXCC_H_ */
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 54885475492..1a93fa684e9 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -92,15 +92,6 @@ config QETH_VLAN
If CONFIG_QETH is switched on, this option will include IEEE
802.1q VLAN support in the qeth device driver.
-config QETH_PERF_STATS
- bool "Performance statistics in /proc"
- depends on QETH
- help
- When switched on, this option will add a file in the proc-fs
- (/proc/qeth_perf_stats) containing performance statistics. It
- may slightly impact performance, so this is only recommended for
- internal tuning of the device driver.
-
config CCWGROUP
tristate
default (LCS || CTC || QETH)
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 6775a837d64..4777e36a922 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
obj-$(CONFIG_LCS) += lcs.o cu3088.o
obj-$(CONFIG_CLAW) += claw.o cu3088.o
-obj-$(CONFIG_MPC) += ctcmpc.o fsm.o cu3088.o
qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o
qeth-$(CONFIG_PROC_FS) += qeth_proc.o
obj-$(CONFIG_QETH) += qeth.o
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 8a4b5812014..3257c22dd79 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1714,6 +1714,9 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
kfree(ch);
return 0;
}
+
+ spin_lock_init(&ch->collect_lock);
+
fsm_settimer(ch->fsm, &ch->timer);
skb_queue_head_init(&ch->io_queue);
skb_queue_head_init(&ch->collect_queue);
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index 0e863df4027..821dde86e24 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -335,8 +335,8 @@ do { \
#else
-#define iucv_debug(lvl, fmt, args...)
-#define iucv_dumpit(title, buf, len)
+#define iucv_debug(lvl, fmt, args...) do { } while (0)
+#define iucv_dumpit(title, buf, len) do { } while (0)
#endif
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 2eded55ae88..16ac68c27a2 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -670,9 +670,8 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, rc;
LCS_DBF_TEXT(5, trace, "rdybuff");
- if (buffer->state != BUF_STATE_LOCKED &&
- buffer->state != BUF_STATE_PROCESSED)
- BUG();
+ BUG_ON(buffer->state != BUF_STATE_LOCKED &&
+ buffer->state != BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer->state = BUF_STATE_READY;
index = buffer - channel->iob;
@@ -696,8 +695,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, prev, next;
LCS_DBF_TEXT(5, trace, "prcsbuff");
- if (buffer->state != BUF_STATE_READY)
- BUG();
+ BUG_ON(buffer->state != BUF_STATE_READY);
buffer->state = BUF_STATE_PROCESSED;
index = buffer - channel->iob;
prev = (index - 1) & (LCS_NUM_BUFFS - 1);
@@ -729,9 +727,8 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
unsigned long flags;
LCS_DBF_TEXT(5, trace, "relbuff");
- if (buffer->state != BUF_STATE_LOCKED &&
- buffer->state != BUF_STATE_PROCESSED)
- BUG();
+ BUG_ON(buffer->state != BUF_STATE_LOCKED &&
+ buffer->state != BUF_STATE_PROCESSED);
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer->state = BUF_STATE_EMPTY;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 5d6e6cbfa36..d7d1cc0a5c8 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -112,7 +112,12 @@ struct iucv_connection {
/**
* Linked list of all connection structs.
*/
-static struct iucv_connection *iucv_connections;
+struct iucv_connection_struct {
+ struct iucv_connection *iucv_connections;
+ rwlock_t iucv_rwlock;
+};
+
+static struct iucv_connection_struct iucv_conns;
/**
* Representation of event-data for the
@@ -1368,8 +1373,10 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf,
struct net_device *ndev = priv->conn->netdev;
char *p;
char *tmp;
- char username[10];
+ char username[9];
int i;
+ struct iucv_connection **clist = &iucv_conns.iucv_connections;
+ unsigned long flags;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
@@ -1382,7 +1389,7 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf,
tmp = strsep((char **) &buf, "\n");
for (i=0, p=tmp; i<8 && *p; i++, p++) {
if (isalnum(*p) || (*p == '$'))
- username[i]= *p;
+ username[i]= toupper(*p);
else if (*p == '\n') {
/* trailing lf, grr */
break;
@@ -1395,11 +1402,11 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf,
return -EINVAL;
}
}
- while (i<9)
+ while (i<8)
username[i++] = ' ';
- username[9] = '\0';
+ username[8] = '\0';
- if (memcmp(username, priv->conn->userid, 8)) {
+ if (memcmp(username, priv->conn->userid, 9)) {
/* username changed */
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
PRINT_WARN(
@@ -1410,6 +1417,19 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf,
return -EBUSY;
}
}
+ read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
+ while (*clist) {
+ if (!strncmp(username, (*clist)->userid, 9) ||
+ ((*clist)->netdev != ndev))
+ break;
+ clist = &((*clist)->next);
+ }
+ read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
+ if (*clist) {
+ PRINT_WARN("netiucv: Connection to %s already exists\n",
+ username);
+ return -EEXIST;
+ }
memcpy(priv->conn->userid, username, 9);
return count;
@@ -1781,13 +1801,15 @@ netiucv_unregister_device(struct device *dev)
static struct iucv_connection *
netiucv_new_connection(struct net_device *dev, char *username)
{
- struct iucv_connection **clist = &iucv_connections;
+ unsigned long flags;
+ struct iucv_connection **clist = &iucv_conns.iucv_connections;
struct iucv_connection *conn =
kzalloc(sizeof(struct iucv_connection), GFP_KERNEL);
if (conn) {
skb_queue_head_init(&conn->collect_queue);
skb_queue_head_init(&conn->commit_queue);
+ spin_lock_init(&conn->collect_lock);
conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
conn->netdev = dev;
@@ -1822,8 +1844,10 @@ netiucv_new_connection(struct net_device *dev, char *username)
fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
}
+ write_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
conn->next = *clist;
*clist = conn;
+ write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
}
return conn;
}
@@ -1835,14 +1859,17 @@ netiucv_new_connection(struct net_device *dev, char *username)
static void
netiucv_remove_connection(struct iucv_connection *conn)
{
- struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection **clist = &iucv_conns.iucv_connections;
+ unsigned long flags;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (conn == NULL)
return;
+ write_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
while (*clist) {
if (*clist == conn) {
*clist = conn->next;
+ write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
if (conn->handle) {
iucv_unregister_program(conn->handle);
conn->handle = NULL;
@@ -1855,6 +1882,7 @@ netiucv_remove_connection(struct iucv_connection *conn)
}
clist = &((*clist)->next);
}
+ write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
}
/**
@@ -1947,9 +1975,11 @@ static ssize_t
conn_write(struct device_driver *drv, const char *buf, size_t count)
{
char *p;
- char username[10];
+ char username[9];
int i, ret;
struct net_device *dev;
+ struct iucv_connection **clist = &iucv_conns.iucv_connections;
+ unsigned long flags;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
@@ -1960,7 +1990,7 @@ conn_write(struct device_driver *drv, const char *buf, size_t count)
for (i=0, p=(char *)buf; i<8 && *p; i++, p++) {
if (isalnum(*p) || (*p == '$'))
- username[i]= *p;
+ username[i]= toupper(*p);
else if (*p == '\n') {
/* trailing lf, grr */
break;
@@ -1971,9 +2001,22 @@ conn_write(struct device_driver *drv, const char *buf, size_t count)
return -EINVAL;
}
}
- while (i<9)
+ while (i<8)
username[i++] = ' ';
- username[9] = '\0';
+ username[8] = '\0';
+
+ read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
+ while (*clist) {
+ if (!strncmp(username, (*clist)->userid, 9))
+ break;
+ clist = &((*clist)->next);
+ }
+ read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
+ if (*clist) {
+ PRINT_WARN("netiucv: Connection to %s already exists\n",
+ username);
+ return -EEXIST;
+ }
dev = netiucv_init_netdevice(username);
if (!dev) {
PRINT_WARN(
@@ -2015,7 +2058,8 @@ DRIVER_ATTR(connection, 0200, NULL, conn_write);
static ssize_t
remove_write (struct device_driver *drv, const char *buf, size_t count)
{
- struct iucv_connection **clist = &iucv_connections;
+ struct iucv_connection **clist = &iucv_conns.iucv_connections;
+ unsigned long flags;
struct net_device *ndev;
struct netiucv_priv *priv;
struct device *dev;
@@ -2026,7 +2070,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count >= IFNAMSIZ)
- count = IFNAMSIZ-1;
+ count = IFNAMSIZ - 1;;
for (i=0, p=(char *)buf; i<count && *p; i++, p++) {
if ((*p == '\n') || (*p == ' ')) {
@@ -2038,6 +2082,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
}
name[i] = '\0';
+ read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
while (*clist) {
ndev = (*clist)->netdev;
priv = (struct netiucv_priv*)ndev->priv;
@@ -2047,6 +2092,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
clist = &((*clist)->next);
continue;
}
+ read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
PRINT_WARN(
"netiucv: net device %s active with peer %s\n",
@@ -2060,6 +2106,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
netiucv_unregister_device(dev);
return count;
}
+ read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
PRINT_WARN("netiucv: net device %s unknown\n", name);
IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
return -EINVAL;
@@ -2077,8 +2124,8 @@ static void __exit
netiucv_exit(void)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- while (iucv_connections) {
- struct net_device *ndev = iucv_connections->netdev;
+ while (iucv_conns.iucv_connections) {
+ struct net_device *ndev = iucv_conns.iucv_connections->netdev;
struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
struct device *dev = priv->dev;
@@ -2120,6 +2167,7 @@ netiucv_init(void)
if (!ret) {
ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
netiucv_banner();
+ rwlock_init(&iucv_conns.iucv_rwlock);
} else {
PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 619f4a0c716..821383d8cbe 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -176,7 +176,6 @@ extern struct ccwgroup_driver qeth_ccwgroup_driver;
/**
* card stuff
*/
-#ifdef CONFIG_QETH_PERF_STATS
struct qeth_perf_stats {
unsigned int bufs_rec;
unsigned int bufs_sent;
@@ -211,8 +210,10 @@ struct qeth_perf_stats {
unsigned int large_send_cnt;
unsigned int sg_skbs_sent;
unsigned int sg_frags_sent;
+ /* initial values when measuring starts */
+ unsigned long initial_rx_packets;
+ unsigned long initial_tx_packets;
};
-#endif /* CONFIG_QETH_PERF_STATS */
/* Routing stuff */
struct qeth_routing_info {
@@ -462,6 +463,7 @@ enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED,
+ QETH_QDIO_CLEANING
};
struct qeth_buffer_pool_entry {
@@ -536,7 +538,7 @@ struct qeth_qdio_out_q {
} __attribute__ ((aligned(256)));
struct qeth_qdio_info {
- volatile enum qeth_qdio_info_states state;
+ atomic_t state;
/* input */
struct qeth_qdio_q *in_q;
struct qeth_qdio_buffer_pool in_buf_pool;
@@ -767,6 +769,7 @@ struct qeth_card_options {
int fake_ll;
int layer2;
enum qeth_large_send_types large_send;
+ int performance_stats;
};
/*
@@ -819,9 +822,7 @@ struct qeth_card {
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
-#ifdef CONFIG_QETH_PERF_STATS
struct qeth_perf_stats perf_stats;
-#endif /* CONFIG_QETH_PERF_STATS */
int use_hard_stop;
int (*orig_hard_header)(struct sk_buff *,struct net_device *,
unsigned short,void *,void *,unsigned);
@@ -859,23 +860,18 @@ qeth_get_ipa_adp_type(enum qeth_link_types link_type)
}
}
-static inline int
-qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
+static inline struct sk_buff *
+qeth_realloc_headroom(struct qeth_card *card, struct sk_buff *skb, int size)
{
- struct sk_buff *new_skb = NULL;
-
- if (skb_headroom(*skb) < size){
- new_skb = skb_realloc_headroom(*skb, size);
- if (!new_skb) {
- PRINT_ERR("qeth_prepare_skb: could "
- "not realloc headroom for qeth_hdr "
- "on interface %s", QETH_CARD_IFNAME(card));
- return -ENOMEM;
- }
- kfree_skb(*skb);
- *skb = new_skb;
- }
- return 0;
+ struct sk_buff *new_skb = skb;
+
+ if (skb_headroom(skb) >= size)
+ return skb;
+ new_skb = skb_realloc_headroom(skb, size);
+ if (!new_skb)
+ PRINT_ERR("Could not realloc headroom for qeth_hdr "
+ "on interface %s", QETH_CARD_IFNAME(card));
+ return new_skb;
}
static inline struct sk_buff *
@@ -885,16 +881,15 @@ qeth_pskb_unshare(struct sk_buff *skb, int pri)
if (!skb_cloned(skb))
return skb;
nskb = skb_copy(skb, pri);
- kfree_skb(skb); /* free our shared copy */
return nskb;
}
static inline void *
-qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
+qeth_push_skb(struct qeth_card *card, struct sk_buff *skb, int size)
{
void *hdr;
- hdr = (void *) skb_push(*skb, size);
+ hdr = (void *) skb_push(skb, size);
/*
* sanity check, the Linux memory allocation scheme should
* never present us cases like this one (the qdio header size plus
@@ -903,8 +898,7 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
(((unsigned long) hdr + size +
QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
- PRINT_ERR("qeth_prepare_skb: misaligned "
- "packet on interface %s. Discarded.",
+ PRINT_ERR("Misaligned packet on interface %s. Discarded.",
QETH_CARD_IFNAME(card));
return NULL;
}
@@ -1056,13 +1050,11 @@ qeth_get_arphdr_type(int cardtype, int linktype)
}
}
-#ifdef CONFIG_QETH_PERF_STATS
static inline int
qeth_get_micros(void)
{
return (int) (get_clock() >> 12);
}
-#endif
static inline int
qeth_get_qdio_q_format(struct qeth_card *card)
@@ -1096,10 +1088,11 @@ qeth_string_to_ipaddr4(const char *buf, __u8 *addr)
{
int count = 0, rc = 0;
int in[4];
+ char c;
- rc = sscanf(buf, "%d.%d.%d.%d%n",
- &in[0], &in[1], &in[2], &in[3], &count);
- if (rc != 4 || count<=0)
+ rc = sscanf(buf, "%u.%u.%u.%u%c",
+ &in[0], &in[1], &in[2], &in[3], &c);
+ if (rc != 4 && (rc != 5 || c != '\n'))
return -EINVAL;
for (count = 0; count < 4; count++) {
if (in[count] > 255)
@@ -1123,24 +1116,28 @@ qeth_ipaddr6_to_string(const __u8 *addr, char *buf)
static inline int
qeth_string_to_ipaddr6(const char *buf, __u8 *addr)
{
- char *end, *start;
+ const char *end, *end_tmp, *start;
__u16 *in;
char num[5];
int num2, cnt, out, found, save_cnt;
unsigned short in_tmp[8] = {0, };
cnt = out = found = save_cnt = num2 = 0;
- end = start = (char *) buf;
+ end = start = buf;
in = (__u16 *) addr;
memset(in, 0, 16);
- while (end) {
- end = strchr(end,':');
+ while (*end) {
+ end = strchr(start,':');
if (end == NULL) {
- end = (char *)buf + (strlen(buf));
- out = 1;
+ end = buf + strlen(buf);
+ if ((end_tmp = strchr(start, '\n')) != NULL)
+ end = end_tmp;
+ out = 1;
}
if ((end - start)) {
memset(num, 0, 5);
+ if ((end - start) > 4)
+ return -EINVAL;
memcpy(num, start, end - start);
if (!qeth_isxdigit(num))
return -EINVAL;
@@ -1158,6 +1155,8 @@ qeth_string_to_ipaddr6(const char *buf, __u8 *addr)
}
start = ++end;
}
+ if (cnt + save_cnt > 8)
+ return -EINVAL;
cnt = 7;
while (save_cnt)
in[cnt--] = in_tmp[--save_cnt];
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 8491598f914..a363721cf28 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -179,9 +179,8 @@ out_check:
flush_cnt++;
}
} else {
-#ifdef CONFIG_QETH_PERF_STATS
- queue->card->perf_stats.skbs_sent_pack++;
-#endif
+ if (queue->card->options.performance_stats)
+ queue->card->perf_stats.skbs_sent_pack++;
QETH_DBF_TEXT(trace, 6, "fillbfpa");
if (buf->next_element_to_fill >=
QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index e1327b8fce0..5613b4564fa 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1073,6 +1073,7 @@ qeth_set_intial_options(struct qeth_card *card)
card->options.layer2 = 1;
else
card->options.layer2 = 0;
+ card->options.performance_stats = 1;
}
/**
@@ -1708,6 +1709,7 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
"IP address reset.\n",
QETH_CARD_IFNAME(card),
card->info.chpid);
+ netif_carrier_on(card->dev);
qeth_schedule_recovery(card);
return NULL;
case IPA_CMD_MODCCID:
@@ -2464,24 +2466,6 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
}
-static inline void
-qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr *hdr)
-{
-#ifdef CONFIG_QETH_VLAN
- u16 *vlan_tag;
-
- if (hdr->hdr.l3.ext_flags &
- (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
- vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN);
- *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
- hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
- *(vlan_tag + 1) = skb->protocol;
- skb->protocol = __constant_htons(ETH_P_8021Q);
- }
-#endif /* CONFIG_QETH_VLAN */
-}
-
static inline __u16
qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
@@ -2510,15 +2494,16 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
return vlan_id;
}
-static inline void
+static inline __u16
qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr *hdr)
{
+ unsigned short vlan_id = 0;
#ifdef CONFIG_QETH_IPV6
if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
skb->pkt_type = PACKET_HOST;
skb->protocol = qeth_type_trans(skb, card->dev);
- return;
+ return 0;
}
#endif /* CONFIG_QETH_IPV6 */
skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
@@ -2540,7 +2525,13 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
default:
skb->pkt_type = PACKET_HOST;
}
- qeth_rebuild_skb_vlan(card, skb, hdr);
+
+ if (hdr->hdr.l3.ext_flags &
+ (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
+ vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)?
+ hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+ }
+
if (card->options.fake_ll)
qeth_rebuild_skb_fake_ll(card, skb, hdr);
else
@@ -2556,6 +2547,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
else
skb->ip_summed = SW_CHECKSUMMING;
}
+ return vlan_id;
}
static inline void
@@ -2568,20 +2560,20 @@ qeth_process_inbound_buffer(struct qeth_card *card,
int offset;
int rxrc;
__u16 vlan_tag = 0;
+ __u16 *vlan_addr;
/* get first element of current buffer */
element = (struct qdio_buffer_element *)&buf->buffer->element[0];
offset = 0;
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.bufs_rec++;
-#endif
+ if (card->options.performance_stats)
+ card->perf_stats.bufs_rec++;
while((skb = qeth_get_next_skb(card, buf->buffer, &element,
&offset, &hdr))) {
skb->dev = card->dev;
if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr);
else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
- qeth_rebuild_skb(card, skb, hdr);
+ vlan_tag = qeth_rebuild_skb(card, skb, hdr);
else { /*in case of OSN*/
skb_push(skb, sizeof(struct qeth_hdr));
memcpy(skb->data, hdr, sizeof(struct qeth_hdr));
@@ -2591,14 +2583,19 @@ qeth_process_inbound_buffer(struct qeth_card *card,
dev_kfree_skb_any(skb);
continue;
}
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ rxrc = card->osn_info.data_cb(skb);
+ else
#ifdef CONFIG_QETH_VLAN
if (vlan_tag)
- vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
+ if (card->vlangrp)
+ vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag);
+ else {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
else
#endif
- if (card->info.type == QETH_CARD_TYPE_OSN)
- rxrc = card->osn_info.data_cb(skb);
- else
rxrc = netif_rx(skb);
card->dev->last_rx = jiffies;
card->stats.rx_packets++;
@@ -2626,7 +2623,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
{
struct qeth_buffer_pool_entry *pool_entry;
int i;
-
+
pool_entry = qeth_get_buffer_pool_entry(card);
/*
* since the buffer is accessed only from the input_tasklet
@@ -2700,17 +2697,18 @@ qeth_queue_input_buffer(struct qeth_card *card, int index)
* 'index') un-requeued -> this buffer is the first buffer that
* will be requeued the next time
*/
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.inbound_do_qdio_cnt++;
- card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros();
-#endif
+ if (card->options.performance_stats) {
+ card->perf_stats.inbound_do_qdio_cnt++;
+ card->perf_stats.inbound_do_qdio_start_time =
+ qeth_get_micros();
+ }
rc = do_QDIO(CARD_DDEV(card),
QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
0, queue->next_buf_to_init, count, NULL);
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.inbound_do_qdio_time += qeth_get_micros() -
- card->perf_stats.inbound_do_qdio_start_time;
-#endif
+ if (card->options.performance_stats)
+ card->perf_stats.inbound_do_qdio_time +=
+ qeth_get_micros() -
+ card->perf_stats.inbound_do_qdio_start_time;
if (rc){
PRINT_WARN("qeth_queue_input_buffer's do_QDIO "
"return %i (device %s).\n",
@@ -2746,10 +2744,10 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
QETH_DBF_TEXT(trace, 6, "qdinput");
card = (struct qeth_card *) card_ptr;
net_dev = card->dev;
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.inbound_cnt++;
- card->perf_stats.inbound_start_time = qeth_get_micros();
-#endif
+ if (card->options.performance_stats) {
+ card->perf_stats.inbound_cnt++;
+ card->perf_stats.inbound_start_time = qeth_get_micros();
+ }
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
QETH_DBF_TEXT(trace, 1,"qdinchk");
@@ -2771,10 +2769,9 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
qeth_queue_input_buffer(card, index);
}
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.inbound_time += qeth_get_micros() -
- card->perf_stats.inbound_start_time;
-#endif
+ if (card->options.performance_stats)
+ card->perf_stats.inbound_time += qeth_get_micros() -
+ card->perf_stats.inbound_start_time;
}
static inline int
@@ -2864,10 +2861,11 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
}
queue->card->dev->trans_start = jiffies;
-#ifdef CONFIG_QETH_PERF_STATS
- queue->card->perf_stats.outbound_do_qdio_cnt++;
- queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros();
-#endif
+ if (queue->card->options.performance_stats) {
+ queue->card->perf_stats.outbound_do_qdio_cnt++;
+ queue->card->perf_stats.outbound_do_qdio_start_time =
+ qeth_get_micros();
+ }
if (under_int)
rc = do_QDIO(CARD_DDEV(queue->card),
QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT,
@@ -2875,10 +2873,10 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
else
rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT,
queue->queue_no, index, count, NULL);
-#ifdef CONFIG_QETH_PERF_STATS
- queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() -
- queue->card->perf_stats.outbound_do_qdio_start_time;
-#endif
+ if (queue->card->options.performance_stats)
+ queue->card->perf_stats.outbound_do_qdio_time +=
+ qeth_get_micros() -
+ queue->card->perf_stats.outbound_do_qdio_start_time;
if (rc){
QETH_DBF_TEXT(trace, 2, "flushbuf");
QETH_DBF_TEXT_(trace, 2, " err%d", rc);
@@ -2890,9 +2888,8 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
return;
}
atomic_add(count, &queue->used_buffers);
-#ifdef CONFIG_QETH_PERF_STATS
- queue->card->perf_stats.bufs_sent += count;
-#endif
+ if (queue->card->options.performance_stats)
+ queue->card->perf_stats.bufs_sent += count;
}
/*
@@ -2907,9 +2904,8 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
QETH_DBF_TEXT(trace, 6, "np->pack");
-#ifdef CONFIG_QETH_PERF_STATS
- queue->card->perf_stats.sc_dp_p++;
-#endif
+ if (queue->card->options.performance_stats)
+ queue->card->perf_stats.sc_dp_p++;
queue->do_pack = 1;
}
}
@@ -2932,9 +2928,8 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
QETH_DBF_TEXT(trace, 6, "pack->np");
-#ifdef CONFIG_QETH_PERF_STATS
- queue->card->perf_stats.sc_p_dp++;
-#endif
+ if (queue->card->options.performance_stats)
+ queue->card->perf_stats.sc_p_dp++;
queue->do_pack = 0;
/* flush packing buffers */
buffer = &queue->bufs[queue->next_buf_to_fill];
@@ -2946,7 +2941,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
- }
+ }
}
}
return flush_count;
@@ -3002,11 +2997,10 @@ qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
!atomic_read(&queue->set_pci_flags_count))
flush_cnt +=
qeth_flush_buffers_on_no_pci(queue);
-#ifdef CONFIG_QETH_PERF_STATS
- if (q_was_packing)
+ if (queue->card->options.performance_stats &&
+ q_was_packing)
queue->card->perf_stats.bufs_sent_pack +=
flush_cnt;
-#endif
if (flush_cnt)
qeth_flush_buffers(queue, 1, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
@@ -3036,10 +3030,11 @@ qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
return;
}
}
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.outbound_handler_cnt++;
- card->perf_stats.outbound_handler_start_time = qeth_get_micros();
-#endif
+ if (card->options.performance_stats) {
+ card->perf_stats.outbound_handler_cnt++;
+ card->perf_stats.outbound_handler_start_time =
+ qeth_get_micros();
+ }
for(i = first_element; i < (first_element + count); ++i){
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
/*we only handle the KICK_IT error by doing a recovery */
@@ -3058,10 +3053,9 @@ qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
qeth_check_outbound_queue(queue);
netif_wake_queue(queue->card->dev);
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.outbound_handler_time += qeth_get_micros() -
- card->perf_stats.outbound_handler_start_time;
-#endif
+ if (card->options.performance_stats)
+ card->perf_stats.outbound_handler_time += qeth_get_micros() -
+ card->perf_stats.outbound_handler_start_time;
}
static void
@@ -3185,13 +3179,14 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
QETH_DBF_TEXT(setup, 2, "allcqdbf");
- if (card->qdio.state == QETH_QDIO_ALLOCATED)
+ if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
+ QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
GFP_KERNEL|GFP_DMA);
if (!card->qdio.in_q)
- return - ENOMEM;
+ goto out_nomem;
QETH_DBF_TEXT(setup, 2, "inq");
QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *));
memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
@@ -3200,27 +3195,19 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
card->qdio.in_q->bufs[i].buffer =
&card->qdio.in_q->qdio_bufs[i];
/* inbound buffer pool */
- if (qeth_alloc_buffer_pool(card)){
- kfree(card->qdio.in_q);
- return -ENOMEM;
- }
+ if (qeth_alloc_buffer_pool(card))
+ goto out_freeinq;
/* outbound */
card->qdio.out_qs =
kmalloc(card->qdio.no_out_queues *
sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
- if (!card->qdio.out_qs){
- qeth_free_buffer_pool(card);
- return -ENOMEM;
- }
- for (i = 0; i < card->qdio.no_out_queues; ++i){
+ if (!card->qdio.out_qs)
+ goto out_freepool;
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
GFP_KERNEL|GFP_DMA);
- if (!card->qdio.out_qs[i]){
- while (i > 0)
- kfree(card->qdio.out_qs[--i]);
- kfree(card->qdio.out_qs);
- return -ENOMEM;
- }
+ if (!card->qdio.out_qs[i])
+ goto out_freeoutq;
QETH_DBF_TEXT_(setup, 2, "outq %i", i);
QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *));
memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
@@ -3237,8 +3224,19 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
}
}
- card->qdio.state = QETH_QDIO_ALLOCATED;
return 0;
+
+out_freeoutq:
+ while (i > 0)
+ kfree(card->qdio.out_qs[--i]);
+ kfree(card->qdio.out_qs);
+out_freepool:
+ qeth_free_buffer_pool(card);
+out_freeinq:
+ kfree(card->qdio.in_q);
+out_nomem:
+ atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+ return -ENOMEM;
}
static void
@@ -3247,7 +3245,8 @@ qeth_free_qdio_buffers(struct qeth_card *card)
int i, j;
QETH_DBF_TEXT(trace, 2, "freeqdbf");
- if (card->qdio.state == QETH_QDIO_UNINITIALIZED)
+ if (atomic_swap(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+ QETH_QDIO_UNINITIALIZED)
return;
kfree(card->qdio.in_q);
/* inbound buffer pool */
@@ -3260,7 +3259,6 @@ qeth_free_qdio_buffers(struct qeth_card *card)
kfree(card->qdio.out_qs[i]);
}
kfree(card->qdio.out_qs);
- card->qdio.state = QETH_QDIO_UNINITIALIZED;
}
static void
@@ -3282,7 +3280,7 @@ static void
qeth_init_qdio_info(struct qeth_card *card)
{
QETH_DBF_TEXT(setup, 4, "intqdinf");
- card->qdio.state = QETH_QDIO_UNINITIALIZED;
+ atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
/* inbound */
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
@@ -3345,7 +3343,7 @@ qeth_qdio_establish(struct qeth_card *card)
struct qdio_buffer **in_sbal_ptrs;
struct qdio_buffer **out_sbal_ptrs;
int i, j, k;
- int rc;
+ int rc = 0;
QETH_DBF_TEXT(setup, 2, "qdioest");
@@ -3404,8 +3402,10 @@ qeth_qdio_establish(struct qeth_card *card)
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
- if (!(rc = qdio_initialize(&init_data)))
- card->qdio.state = QETH_QDIO_ESTABLISHED;
+ if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
+ QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED)
+ if ((rc = qdio_initialize(&init_data)))
+ atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
kfree(out_sbal_ptrs);
kfree(in_sbal_ptrs);
@@ -3521,13 +3521,20 @@ qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
int rc = 0;
QETH_DBF_TEXT(trace,3,"qdioclr");
- if (card->qdio.state == QETH_QDIO_ESTABLISHED){
+ switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
+ QETH_QDIO_CLEANING)) {
+ case QETH_QDIO_ESTABLISHED:
if ((rc = qdio_cleanup(CARD_DDEV(card),
- (card->info.type == QETH_CARD_TYPE_IQD) ?
- QDIO_FLAG_CLEANUP_USING_HALT :
- QDIO_FLAG_CLEANUP_USING_CLEAR)))
+ (card->info.type == QETH_CARD_TYPE_IQD) ?
+ QDIO_FLAG_CLEANUP_USING_HALT :
+ QDIO_FLAG_CLEANUP_USING_CLEAR)))
QETH_DBF_TEXT_(trace, 3, "1err%d", rc);
- card->qdio.state = QETH_QDIO_ALLOCATED;
+ atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
+ break;
+ case QETH_QDIO_CLEANING:
+ return rc;
+ default:
+ break;
}
if ((rc = qeth_clear_halt_card(card, use_halt)))
QETH_DBF_TEXT_(trace, 3, "2err%d", rc);
@@ -3687,10 +3694,10 @@ qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* return OK; otherwise ksoftirqd goes to 100% */
return NETDEV_TX_OK;
}
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.outbound_cnt++;
- card->perf_stats.outbound_start_time = qeth_get_micros();
-#endif
+ if (card->options.performance_stats) {
+ card->perf_stats.outbound_cnt++;
+ card->perf_stats.outbound_start_time = qeth_get_micros();
+ }
netif_stop_queue(dev);
if ((rc = qeth_send_packet(card, skb))) {
if (rc == -EBUSY) {
@@ -3704,10 +3711,9 @@ qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
netif_wake_queue(dev);
-#ifdef CONFIG_QETH_PERF_STATS
- card->perf_stats.outbound_time += qeth_get_micros() -
- card->perf_stats.outbound_start_time;
-#endif
+ if (card->options.performance_stats)
+ card->perf_stats.outbound_time += qeth_get_micros() -
+ card->perf_stats.outbound_start_time;
return rc;
}
@@ -3922,49 +3928,59 @@ qeth_get_ip_version(struct sk_buff *skb)
}
}
-static inline int
-qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
- struct qeth_hdr **hdr, int ipv)
+static inline struct qeth_hdr *
+__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
{
- int rc = 0;
#ifdef CONFIG_QETH_VLAN
u16 *tag;
-#endif
-
- QETH_DBF_TEXT(trace, 6, "prepskb");
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- *hdr = (struct qeth_hdr *)(*skb)->data;
- return rc;
- }
- rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
- if (rc)
- return rc;
-#ifdef CONFIG_QETH_VLAN
- if (card->vlangrp && vlan_tx_tag_present(*skb) &&
+ if (card->vlangrp && vlan_tx_tag_present(skb) &&
((ipv == 6) || card->options.layer2) ) {
/*
* Move the mac addresses (6 bytes src, 6 bytes dest)
* to the beginning of the new header. We are using three
* memcpys instead of one memmove to save cycles.
*/
- skb_push(*skb, VLAN_HLEN);
- memcpy((*skb)->data, (*skb)->data + 4, 4);
- memcpy((*skb)->data + 4, (*skb)->data + 8, 4);
- memcpy((*skb)->data + 8, (*skb)->data + 12, 4);
- tag = (u16 *)((*skb)->data + 12);
+ skb_push(skb, VLAN_HLEN);
+ memcpy(skb->data, skb->data + 4, 4);
+ memcpy(skb->data + 4, skb->data + 8, 4);
+ memcpy(skb->data + 8, skb->data + 12, 4);
+ tag = (u16 *)(skb->data + 12);
/*
* first two bytes = ETH_P_8021Q (0x8100)
* second two bytes = VLANID
*/
*tag = __constant_htons(ETH_P_8021Q);
- *(tag + 1) = htons(vlan_tx_tag_get(*skb));
+ *(tag + 1) = htons(vlan_tx_tag_get(skb));
}
#endif
- *hdr = (struct qeth_hdr *)
- qeth_push_skb(card, skb, sizeof(struct qeth_hdr));
- if (*hdr == NULL)
- return -EINVAL;
- return 0;
+ return ((struct qeth_hdr *)
+ qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
+}
+
+static inline void
+__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
+{
+ if (orig_skb != new_skb)
+ dev_kfree_skb_any(new_skb);
+}
+
+static inline struct sk_buff *
+qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr **hdr, int ipv)
+{
+ struct sk_buff *new_skb;
+
+ QETH_DBF_TEXT(trace, 6, "prepskb");
+
+ new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
+ if (new_skb == NULL)
+ return NULL;
+ *hdr = __qeth_prepare_skb(card, new_skb, ipv);
+ if (*hdr == NULL) {
+ __qeth_free_new_skb(skb, new_skb);
+ return NULL;
+ }
+ return new_skb;
}
static inline u8
@@ -4206,9 +4222,8 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue,
flush_cnt = 1;
} else {
QETH_DBF_TEXT(trace, 6, "fillbfpa");
-#ifdef CONFIG_QETH_PERF_STATS
- queue->card->perf_stats.skbs_sent_pack++;
-#endif
+ if (queue->card->options.performance_stats)
+ queue->card->perf_stats.skbs_sent_pack++;
if (buf->next_element_to_fill >=
QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
/*
@@ -4245,21 +4260,15 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
*/
- if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
- card->stats.tx_dropped++;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
- return -EBUSY;
- }
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
+ goto out;
if (ctx == NULL)
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
else {
buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx);
- if (buffers_needed < 0) {
- card->stats.tx_dropped++;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
- return -EBUSY;
- }
+ if (buffers_needed < 0)
+ goto out;
queue->next_buf_to_fill =
(queue->next_buf_to_fill + buffers_needed) %
QDIO_MAX_BUFFERS_PER_Q;
@@ -4274,6 +4283,9 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
qeth_flush_buffers(queue, 0, index, flush_cnt);
}
return 0;
+out:
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ return -EBUSY;
}
static inline int
@@ -4299,8 +4311,7 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
*/
- if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
- card->stats.tx_dropped++;
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
@@ -4323,7 +4334,6 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* again */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY){
- card->stats.tx_dropped++;
qeth_flush_buffers(queue, 0, start_index, flush_count);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
@@ -4334,7 +4344,6 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* free buffers) to handle eddp context */
if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){
printk("eddp tx_dropped 1\n");
- card->stats.tx_dropped++;
rc = -EBUSY;
goto out;
}
@@ -4346,7 +4355,6 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill);
if (tmp < 0) {
printk("eddp tx_dropped 2\n");
- card->stats.tx_dropped++;
rc = - EBUSY;
goto out;
}
@@ -4380,10 +4388,8 @@ out:
qeth_flush_buffers(queue, 0, start_index, flush_count);
}
/* at this point the queue is UNLOCKED again */
-#ifdef CONFIG_QETH_PERF_STATS
- if (do_pack)
+ if (queue->card->options.performance_stats && do_pack)
queue->card->perf_stats.bufs_sent_pack += flush_count;
-#endif /* CONFIG_QETH_PERF_STATS */
return rc;
}
@@ -4394,21 +4400,21 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
{
int elements_needed = 0;
- if (skb_shinfo(skb)->nr_frags > 0) {
+ if (skb_shinfo(skb)->nr_frags > 0)
elements_needed = (skb_shinfo(skb)->nr_frags + 1);
- }
- if (elements_needed == 0 )
+ if (elements_needed == 0)
elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
+ skb->len) >> PAGE_SHIFT);
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
- PRINT_ERR("qeth_do_send_packet: invalid size of "
- "IP packet (Number=%d / Length=%d). Discarded.\n",
+ PRINT_ERR("Invalid size of IP packet "
+ "(Number=%d / Length=%d). Discarded.\n",
(elements_needed+elems), skb->len);
return 0;
}
return elements_needed;
}
+
static inline int
qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
{
@@ -4420,112 +4426,112 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
struct qeth_eddp_context *ctx = NULL;
int tx_bytes = skb->len;
-#ifdef CONFIG_QETH_PERF_STATS
unsigned short nr_frags = skb_shinfo(skb)->nr_frags;
unsigned short tso_size = skb_shinfo(skb)->gso_size;
-#endif
+ struct sk_buff *new_skb, *new_skb2;
int rc;
QETH_DBF_TEXT(trace, 6, "sendpkt");
+ new_skb = skb;
+ if ((card->info.type == QETH_CARD_TYPE_OSN) &&
+ (skb->protocol == htons(ETH_P_IPV6)))
+ return -EPERM;
+ cast_type = qeth_get_cast_type(card, skb);
+ if ((cast_type == RTN_BROADCAST) &&
+ (card->info.broadcast_capable == 0))
+ return -EPERM;
+ queue = card->qdio.out_qs
+ [qeth_get_priority_queue(card, skb, ipv, cast_type)];
if (!card->options.layer2) {
ipv = qeth_get_ip_version(skb);
if ((card->dev->hard_header == qeth_fake_header) && ipv) {
- if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) {
- card->stats.tx_dropped++;
- dev_kfree_skb_irq(skb);
- return 0;
- }
+ new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
if(card->dev->type == ARPHRD_IEEE802_TR){
- skb_pull(skb, QETH_FAKE_LL_LEN_TR);
+ skb_pull(new_skb, QETH_FAKE_LL_LEN_TR);
} else {
- skb_pull(skb, QETH_FAKE_LL_LEN_ETH);
+ skb_pull(new_skb, QETH_FAKE_LL_LEN_ETH);
}
}
}
- if ((card->info.type == QETH_CARD_TYPE_OSN) &&
- (skb->protocol == htons(ETH_P_IPV6))) {
- dev_kfree_skb_any(skb);
- return 0;
- }
- cast_type = qeth_get_cast_type(card, skb);
- if ((cast_type == RTN_BROADCAST) &&
- (card->info.broadcast_capable == 0)){
- card->stats.tx_dropped++;
- card->stats.tx_errors++;
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- queue = card->qdio.out_qs
- [qeth_get_priority_queue(card, skb, ipv, cast_type)];
-
if (skb_is_gso(skb))
large_send = card->options.large_send;
-
- /*are we able to do TSO ? If so ,prepare and send it from here */
+ /* check on OSN device*/
+ if (card->info.type == QETH_CARD_TYPE_OSN)
+ hdr = (struct qeth_hdr *)new_skb->data;
+ /*are we able to do TSO ? */
if ((large_send == QETH_LARGE_SEND_TSO) &&
(cast_type == RTN_UNSPEC)) {
- rc = qeth_tso_prepare_packet(card, skb, ipv, cast_type);
+ rc = qeth_tso_prepare_packet(card, new_skb, ipv, cast_type);
if (rc) {
- card->stats.tx_dropped++;
- card->stats.tx_errors++;
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ __qeth_free_new_skb(skb, new_skb);
+ return rc;
}
elements_needed++;
- } else {
- if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))) {
- QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
- return rc;
+ } else if (card->info.type != QETH_CARD_TYPE_OSN) {
+ new_skb2 = qeth_prepare_skb(card, new_skb, &hdr, ipv);
+ if (!new_skb2) {
+ __qeth_free_new_skb(skb, new_skb);
+ return -EINVAL;
}
- if (card->info.type != QETH_CARD_TYPE_OSN)
- qeth_fill_header(card, hdr, skb, ipv, cast_type);
+ if (new_skb != skb)
+ __qeth_free_new_skb(new_skb2, new_skb);
+ new_skb = new_skb2;
+ qeth_fill_header(card, hdr, new_skb, ipv, cast_type);
}
-
if (large_send == QETH_LARGE_SEND_EDDP) {
- ctx = qeth_eddp_create_context(card, skb, hdr);
+ ctx = qeth_eddp_create_context(card, new_skb, hdr);
if (ctx == NULL) {
+ __qeth_free_new_skb(skb, new_skb);
PRINT_WARN("could not create eddp context\n");
return -EINVAL;
}
} else {
- int elems = qeth_get_elements_no(card,(void*) hdr, skb,
+ int elems = qeth_get_elements_no(card,(void*) hdr, new_skb,
elements_needed);
- if (!elems)
+ if (!elems) {
+ __qeth_free_new_skb(skb, new_skb);
return -EINVAL;
+ }
elements_needed += elems;
}
if (card->info.type != QETH_CARD_TYPE_IQD)
- rc = qeth_do_send_packet(card, queue, skb, hdr,
+ rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed, ctx);
else
- rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
+ rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, ctx);
- if (!rc){
+ if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
-#ifdef CONFIG_QETH_PERF_STATS
- if (tso_size &&
- !(large_send == QETH_LARGE_SEND_NO)) {
- card->perf_stats.large_send_bytes += tx_bytes;
- card->perf_stats.large_send_cnt++;
- }
- if (nr_frags > 0){
- card->perf_stats.sg_skbs_sent++;
- /* nr_frags + skb->data */
- card->perf_stats.sg_frags_sent +=
- nr_frags + 1;
+ if (new_skb != skb)
+ dev_kfree_skb_any(skb);
+ if (card->options.performance_stats) {
+ if (tso_size &&
+ !(large_send == QETH_LARGE_SEND_NO)) {
+ card->perf_stats.large_send_bytes += tx_bytes;
+ card->perf_stats.large_send_cnt++;
+ }
+ if (nr_frags > 0) {
+ card->perf_stats.sg_skbs_sent++;
+ /* nr_frags + skb->data */
+ card->perf_stats.sg_frags_sent +=
+ nr_frags + 1;
+ }
}
-#endif /* CONFIG_QETH_PERF_STATS */
+ } else {
+ card->stats.tx_dropped++;
+ __qeth_free_new_skb(skb, new_skb);
}
if (ctx != NULL) {
/* drop creator's reference */
qeth_eddp_put_context(ctx);
/* free skb; it's not referenced by a buffer */
- if (rc == 0)
- dev_kfree_skb_any(skb);
-
+ if (!rc)
+ dev_kfree_skb_any(new_skb);
}
return rc;
}
@@ -7338,6 +7344,8 @@ qeth_setrouting_v6(struct qeth_card *card)
QETH_DBF_TEXT(trace,3,"setrtg6");
#ifdef CONFIG_QETH_IPV6
+ if (!qeth_is_supported(card, IPA_IPV6))
+ return 0;
qeth_correct_routing_type(card, &card->options.route6.type,
QETH_PROT_IPV6);
@@ -7876,12 +7884,12 @@ __qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
goto out_remove;
}
- card->state = CARD_STATE_SOFTSETUP;
if ((rc = qeth_init_qdio_queues(card))){
QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
goto out_remove;
}
+ card->state = CARD_STATE_SOFTSETUP;
netif_carrier_on(card->dev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
@@ -8538,34 +8546,44 @@ qeth_ipv6_uninit(void)
static void
qeth_sysfs_unregister(void)
{
+ s390_root_dev_unregister(qeth_root_dev);
qeth_remove_driver_attributes();
ccw_driver_unregister(&qeth_ccw_driver);
ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
- s390_root_dev_unregister(qeth_root_dev);
}
+
/**
* register qeth at sysfs
*/
static int
qeth_sysfs_register(void)
{
- int rc=0;
+ int rc;
rc = ccwgroup_driver_register(&qeth_ccwgroup_driver);
if (rc)
- return rc;
+ goto out;
+
rc = ccw_driver_register(&qeth_ccw_driver);
if (rc)
- return rc;
+ goto out_ccw_driver;
+
rc = qeth_create_driver_attributes();
if (rc)
- return rc;
+ goto out_qeth_attr;
+
qeth_root_dev = s390_root_dev_register("qeth");
- if (IS_ERR(qeth_root_dev)) {
- rc = PTR_ERR(qeth_root_dev);
- return rc;
- }
- return 0;
+ rc = IS_ERR(qeth_root_dev) ? PTR_ERR(qeth_root_dev) : 0;
+ if (!rc)
+ goto out;
+
+ qeth_remove_driver_attributes();
+out_qeth_attr:
+ ccw_driver_unregister(&qeth_ccw_driver);
+out_ccw_driver:
+ ccwgroup_driver_unregister(&qeth_ccwgroup_driver);
+out:
+ return rc;
}
/***
@@ -8574,7 +8592,7 @@ qeth_sysfs_register(void)
static int __init
qeth_init(void)
{
- int rc=0;
+ int rc;
PRINT_INFO("loading %s\n", version);
@@ -8583,20 +8601,26 @@ qeth_init(void)
spin_lock_init(&qeth_notify_lock);
rwlock_init(&qeth_card_list.rwlock);
- if (qeth_register_dbf_views())
+ rc = qeth_register_dbf_views();
+ if (rc)
goto out_err;
- if (qeth_sysfs_register())
- goto out_sysfs;
+
+ rc = qeth_sysfs_register();
+ if (rc)
+ goto out_dbf;
#ifdef CONFIG_QETH_IPV6
- if (qeth_ipv6_init()) {
- PRINT_ERR("Out of memory during ipv6 init.\n");
+ rc = qeth_ipv6_init();
+ if (rc) {
+ PRINT_ERR("Out of memory during ipv6 init code = %d\n", rc);
goto out_sysfs;
}
#endif /* QETH_IPV6 */
- if (qeth_register_notifiers())
+ rc = qeth_register_notifiers();
+ if (rc)
goto out_ipv6;
- if (qeth_create_procfs_entries())
+ rc = qeth_create_procfs_entries();
+ if (rc)
goto out_notifiers;
return rc;
@@ -8606,12 +8630,13 @@ out_notifiers:
out_ipv6:
#ifdef CONFIG_QETH_IPV6
qeth_ipv6_uninit();
-#endif /* QETH_IPV6 */
out_sysfs:
+#endif /* QETH_IPV6 */
qeth_sysfs_unregister();
+out_dbf:
qeth_unregister_dbf_views();
out_err:
- PRINT_ERR("Initialization failed");
+ PRINT_ERR("Initialization failed with code %d\n", rc);
return rc;
}
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
index 66f2da14e6e..faa768e5925 100644
--- a/drivers/s390/net/qeth_proc.c
+++ b/drivers/s390/net/qeth_proc.c
@@ -173,7 +173,6 @@ static struct file_operations qeth_procfile_fops = {
#define QETH_PERF_PROCFILE_NAME "qeth_perf"
static struct proc_dir_entry *qeth_perf_procfile;
-#ifdef CONFIG_QETH_PERF_STATS
static int
qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
{
@@ -192,14 +191,21 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
CARD_DDEV_ID(card),
QETH_CARD_IFNAME(card)
);
+ if (!card->options.performance_stats)
+ seq_printf(s, "Performance statistics are deactivated.\n");
seq_printf(s, " Skb's/buffers received : %lu/%u\n"
" Skb's/buffers sent : %lu/%u\n\n",
- card->stats.rx_packets, card->perf_stats.bufs_rec,
- card->stats.tx_packets, card->perf_stats.bufs_sent
+ card->stats.rx_packets -
+ card->perf_stats.initial_rx_packets,
+ card->perf_stats.bufs_rec,
+ card->stats.tx_packets -
+ card->perf_stats.initial_tx_packets,
+ card->perf_stats.bufs_sent
);
seq_printf(s, " Skb's/buffers sent without packing : %lu/%u\n"
" Skb's/buffers sent with packing : %u/%u\n\n",
- card->stats.tx_packets - card->perf_stats.skbs_sent_pack,
+ card->stats.tx_packets - card->perf_stats.initial_tx_packets
+ - card->perf_stats.skbs_sent_pack,
card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack,
card->perf_stats.skbs_sent_pack,
card->perf_stats.bufs_sent_pack
@@ -275,11 +281,6 @@ static struct file_operations qeth_perf_procfile_fops = {
.release = seq_release,
};
-#define qeth_perf_procfile_created qeth_perf_procfile
-#else
-#define qeth_perf_procfile_created 1
-#endif /* CONFIG_QETH_PERF_STATS */
-
int __init
qeth_create_procfs_entries(void)
{
@@ -288,15 +289,13 @@ qeth_create_procfs_entries(void)
if (qeth_procfile)
qeth_procfile->proc_fops = &qeth_procfile_fops;
-#ifdef CONFIG_QETH_PERF_STATS
qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME,
S_IFREG | 0444, NULL);
if (qeth_perf_procfile)
qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops;
-#endif /* CONFIG_QETH_PERF_STATS */
if (qeth_procfile &&
- qeth_perf_procfile_created)
+ qeth_perf_procfile)
return 0;
else
return -ENOMEM;
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 001497bbea1..5836737ac58 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -743,6 +743,47 @@ static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
qeth_dev_layer2_store);
static ssize_t
+qeth_dev_performance_stats_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev->driver_data;
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
+}
+
+static ssize_t
+qeth_dev_performance_stats_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev->driver_data;
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ i = simple_strtoul(buf, &tmp, 16);
+ if ((i == 0) || (i == 1)) {
+ if (i == card->options.performance_stats)
+ return count;
+ card->options.performance_stats = i;
+ if (i == 0)
+ memset(&card->perf_stats, 0,
+ sizeof(struct qeth_perf_stats));
+ card->perf_stats.initial_rx_packets = card->stats.rx_packets;
+ card->perf_stats.initial_tx_packets = card->stats.tx_packets;
+ } else {
+ PRINT_WARN("performance_stats: write 0 or 1 to this file!\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
+ qeth_dev_performance_stats_store);
+
+static ssize_t
qeth_dev_large_send_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev->driver_data;
@@ -928,6 +969,7 @@ static struct device_attribute * qeth_device_attrs[] = {
&dev_attr_canonical_macaddr,
&dev_attr_layer2,
&dev_attr_large_send,
+ &dev_attr_performance_stats,
NULL,
};
@@ -1110,12 +1152,12 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
{
const char *start, *end;
char *tmp;
- char buffer[49] = {0, };
+ char buffer[40] = {0, };
start = buf;
/* get address string */
end = strchr(start, '/');
- if (!end || (end-start >= 49)){
+ if (!end || (end - start >= 40)){
PRINT_WARN("Invalid format for ipato_addx/delx. "
"Use <ip addr>/<mask bits>\n");
return -EINVAL;
@@ -1127,7 +1169,12 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
}
start = end + 1;
*mask_bits = simple_strtoul(start, &tmp, 10);
-
+ if (!strlen(start) ||
+ (tmp == start) ||
+ (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
+ PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -1698,11 +1745,16 @@ qeth_create_device_attributes(struct device *dev)
sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
+ return ret;
}
- if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group)))
+ if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group))){
+ sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
+ sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
return ret;
-
- return ret;
+ }
+ return 0;
}
void
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
index 593f298142c..14504afb044 100644
--- a/drivers/s390/net/qeth_tso.h
+++ b/drivers/s390/net/qeth_tso.h
@@ -24,7 +24,7 @@ static inline struct qeth_hdr_tso *
qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
{
QETH_DBF_TEXT(trace, 5, "tsoprsk");
- return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_tso));
+ return qeth_push_skb(card, *skb, sizeof(struct qeth_hdr_tso));
}
/**
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5399c5d99b8..a914129a4da 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -19,9 +19,6 @@
#include "s390mach.h"
-#define DBG printk
-// #define DBG(args,...) do {} while (0);
-
static struct semaphore m_sem;
extern int css_process_crw(int, int);
@@ -83,11 +80,11 @@ repeat:
ccode = stcrw(&crw[chain]);
if (ccode != 0)
break;
- DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
- "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
- crw[chain].slct, crw[chain].oflw, crw[chain].chn,
- crw[chain].rsc, crw[chain].anc, crw[chain].erc,
- crw[chain].rsid);
+ printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ crw[chain].slct, crw[chain].oflw, crw[chain].chn,
+ crw[chain].rsc, crw[chain].anc, crw[chain].erc,
+ crw[chain].rsid);
/* Check for overflows. */
if (crw[chain].oflw) {
pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
@@ -117,8 +114,8 @@ repeat:
* reported to the common I/O layer.
*/
if (crw[chain].slct) {
- DBG(KERN_INFO"solicited machine check for "
- "channel path %02X\n", crw[0].rsid);
+ pr_debug("solicited machine check for "
+ "channel path %02X\n", crw[0].rsid);
break;
}
switch (crw[0].erc) {
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 94d1b74db35..7c84b3d4bd9 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -543,7 +543,7 @@ do { \
} while (0)
#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
-# define ZFCP_LOG_NORMAL(fmt, args...)
+# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0)
#else
# define ZFCP_LOG_NORMAL(fmt, args...) \
do { \
@@ -553,7 +553,7 @@ do { \
#endif
#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
-# define ZFCP_LOG_INFO(fmt, args...)
+# define ZFCP_LOG_INFO(fmt, args...) do { } while (0)
#else
# define ZFCP_LOG_INFO(fmt, args...) \
do { \
@@ -563,14 +563,14 @@ do { \
#endif
#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
-# define ZFCP_LOG_DEBUG(fmt, args...)
+# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0)
#else
# define ZFCP_LOG_DEBUG(fmt, args...) \
ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
#endif
#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
-# define ZFCP_LOG_TRACE(fmt, args...)
+# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0)
#else
# define ZFCP_LOG_TRACE(fmt, args...) \
ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index d1c1e75bfd6..1e788e815ce 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -11,19 +11,18 @@
#include <linux/init.h>
#include <asm/ebcdic.h>
-struct sysinfo_1_1_1
-{
+struct sysinfo_1_1_1 {
char reserved_0[32];
char manufacturer[16];
char type[4];
char reserved_1[12];
- char model[16];
+ char model_capacity[16];
char sequence[16];
char plant[4];
+ char model[16];
};
-struct sysinfo_1_2_1
-{
+struct sysinfo_1_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
@@ -31,9 +30,12 @@ struct sysinfo_1_2_1
unsigned short cpu_address;
};
-struct sysinfo_1_2_2
-{
- char reserved_0[32];
+struct sysinfo_1_2_2 {
+ char format;
+ char reserved_0[1];
+ unsigned short acc_offset;
+ char reserved_1[24];
+ unsigned int secondary_capability;
unsigned int capability;
unsigned short cpus_total;
unsigned short cpus_configured;
@@ -42,8 +44,12 @@ struct sysinfo_1_2_2
unsigned short adjustment[0];
};
-struct sysinfo_2_2_1
-{
+struct sysinfo_1_2_2_extension {
+ unsigned int alt_capability;
+ unsigned short alt_adjustment[0];
+};
+
+struct sysinfo_2_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
@@ -51,15 +57,11 @@ struct sysinfo_2_2_1
unsigned short cpu_address;
};
-struct sysinfo_2_2_2
-{
+struct sysinfo_2_2_2 {
char reserved_0[32];
unsigned short lpar_number;
char reserved_1;
unsigned char characteristics;
- #define LPAR_CHAR_DEDICATED (1 << 7)
- #define LPAR_CHAR_SHARED (1 << 6)
- #define LPAR_CHAR_LIMITED (1 << 5)
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
@@ -71,12 +73,14 @@ struct sysinfo_2_2_2
unsigned short cpus_shared;
};
-struct sysinfo_3_2_2
-{
+#define LPAR_CHAR_DEDICATED (1 << 7)
+#define LPAR_CHAR_SHARED (1 << 6)
+#define LPAR_CHAR_LIMITED (1 << 5)
+
+struct sysinfo_3_2_2 {
char reserved_0[31];
unsigned char count;
- struct
- {
+ struct {
char reserved_0[4];
unsigned short cpus_total;
unsigned short cpus_configured;
@@ -90,136 +94,223 @@ struct sysinfo_3_2_2
} vm[8];
};
-union s390_sysinfo
+static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
- struct sysinfo_1_1_1 sysinfo_1_1_1;
- struct sysinfo_1_2_1 sysinfo_1_2_1;
- struct sysinfo_1_2_2 sysinfo_1_2_2;
- struct sysinfo_2_2_1 sysinfo_2_2_1;
- struct sysinfo_2_2_2 sysinfo_2_2_2;
- struct sysinfo_3_2_2 sysinfo_3_2_2;
-};
-
-static inline int stsi (void *sysinfo,
- int fc, int sel1, int sel2)
-{
- int cc, retv;
-
-#ifndef CONFIG_64BIT
- __asm__ __volatile__ ( "lr\t0,%2\n"
- "\tlr\t1,%3\n"
- "\tstsi\t0(%4)\n"
- "0:\tipm\t%0\n"
- "\tsrl\t%0,28\n"
- "1:lr\t%1,0\n"
- ".section .fixup,\"ax\"\n"
- "2:\tlhi\t%0,3\n"
- "\tbras\t1,3f\n"
- "\t.long 1b\n"
- "3:\tl\t1,0(1)\n"
- "\tbr\t1\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- "\t.align 4\n"
- "\t.long 0b,2b\n"
- ".previous\n"
- : "=d" (cc), "=d" (retv)
- : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
- : "cc", "memory", "0", "1" );
-#else
- __asm__ __volatile__ ( "lr\t0,%2\n"
- "lr\t1,%3\n"
- "\tstsi\t0(%4)\n"
- "0:\tipm\t%0\n"
- "\tsrl\t%0,28\n"
- "1:lr\t%1,0\n"
- ".section .fixup,\"ax\"\n"
- "2:\tlhi\t%0,3\n"
- "\tjg\t1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- "\t.align 8\n"
- "\t.quad 0b,2b\n"
- ".previous\n"
- : "=d" (cc), "=d" (retv)
- : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
- : "cc", "memory", "0", "1" );
-#endif
-
- return cc? -1 : retv;
+ register int r0 asm("0") = (fc << 28) | sel1;
+ register int r1 asm("1") = sel2;
+
+ asm volatile(
+ " stsi 0(%2)\n"
+ "0: jz 2f\n"
+ "1: lhi %0,%3\n"
+ "2:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
+ : "cc", "memory" );
+ return r0;
}
-static inline int stsi_0 (void)
+static inline int stsi_0(void)
{
int rc = stsi (NULL, 0, 0, 0);
- return rc == -1 ? rc : (((unsigned int)rc) >> 28);
+ return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
}
-static inline int stsi_1_1_1 (struct sysinfo_1_1_1 *info)
+static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
{
- int rc = stsi (info, 1, 1, 1);
- if (rc != -1)
- {
- EBCASC (info->manufacturer, sizeof(info->manufacturer));
- EBCASC (info->type, sizeof(info->type));
- EBCASC (info->model, sizeof(info->model));
- EBCASC (info->sequence, sizeof(info->sequence));
- EBCASC (info->plant, sizeof(info->plant));
- }
- return rc == -1 ? rc : 0;
+ if (stsi(info, 1, 1, 1) == -ENOSYS)
+ return len;
+
+ EBCASC(info->manufacturer, sizeof(info->manufacturer));
+ EBCASC(info->type, sizeof(info->type));
+ EBCASC(info->model, sizeof(info->model));
+ EBCASC(info->sequence, sizeof(info->sequence));
+ EBCASC(info->plant, sizeof(info->plant));
+ EBCASC(info->model_capacity, sizeof(info->model_capacity));
+ len += sprintf(page + len, "Manufacturer: %-16.16s\n",
+ info->manufacturer);
+ len += sprintf(page + len, "Type: %-4.4s\n",
+ info->type);
+ if (info->model[0] != '\0')
+ /*
+ * Sigh: the model field has been renamed with System z9
+ * to model_capacity and a new model field has been added
+ * after the plant field. To avoid confusing older programs
+ * the "Model:" prints "model_capacity model" or just
+ * "model_capacity" if the model string is empty .
+ */
+ len += sprintf(page + len,
+ "Model: %-16.16s %-16.16s\n",
+ info->model_capacity, info->model);
+ else
+ len += sprintf(page + len, "Model: %-16.16s\n",
+ info->model_capacity);
+ len += sprintf(page + len, "Sequence Code: %-16.16s\n",
+ info->sequence);
+ len += sprintf(page + len, "Plant: %-4.4s\n",
+ info->plant);
+ len += sprintf(page + len, "Model Capacity: %-16.16s\n",
+ info->model_capacity);
+ return len;
}
-static inline int stsi_1_2_1 (struct sysinfo_1_2_1 *info)
+#if 0 /* Currently unused */
+static int stsi_1_2_1(struct sysinfo_1_2_1 *info, char *page, int len)
{
- int rc = stsi (info, 1, 2, 1);
- if (rc != -1)
- {
- EBCASC (info->sequence, sizeof(info->sequence));
- EBCASC (info->plant, sizeof(info->plant));
- }
- return rc == -1 ? rc : 0;
+ if (stsi(info, 1, 2, 1) == -ENOSYS)
+ return len;
+
+ len += sprintf(page + len, "\n");
+ EBCASC(info->sequence, sizeof(info->sequence));
+ EBCASC(info->plant, sizeof(info->plant));
+ len += sprintf(page + len, "Sequence Code of CPU: %-16.16s\n",
+ info->sequence);
+ len += sprintf(page + len, "Plant of CPU: %-16.16s\n",
+ info->plant);
+ return len;
}
+#endif
-static inline int stsi_1_2_2 (struct sysinfo_1_2_2 *info)
+static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
{
- int rc = stsi (info, 1, 2, 2);
- return rc == -1 ? rc : 0;
+ struct sysinfo_1_2_2_extension *ext;
+ int i;
+
+ if (stsi(info, 1, 2, 2) == -ENOSYS)
+ return len;
+ ext = (struct sysinfo_1_2_2_extension *)
+ ((unsigned long) info + info->acc_offset);
+
+ len += sprintf(page + len, "\n");
+ len += sprintf(page + len, "CPUs Total: %d\n",
+ info->cpus_total);
+ len += sprintf(page + len, "CPUs Configured: %d\n",
+ info->cpus_configured);
+ len += sprintf(page + len, "CPUs Standby: %d\n",
+ info->cpus_standby);
+ len += sprintf(page + len, "CPUs Reserved: %d\n",
+ info->cpus_reserved);
+
+ if (info->format == 1) {
+ /*
+ * Sigh 2. According to the specification the alternate
+ * capability field is a 32 bit floating point number
+ * if the higher order 8 bits are not zero. Printing
+ * a floating point number in the kernel is a no-no,
+ * always print the number as 32 bit unsigned integer.
+ * The user-space needs to know about the stange
+ * encoding of the alternate cpu capability.
+ */
+ len += sprintf(page + len, "Capability: %u %u\n",
+ info->capability, ext->alt_capability);
+ for (i = 2; i <= info->cpus_total; i++)
+ len += sprintf(page + len,
+ "Adjustment %02d-way: %u %u\n",
+ i, info->adjustment[i-2],
+ ext->alt_adjustment[i-2]);
+
+ } else {
+ len += sprintf(page + len, "Capability: %u\n",
+ info->capability);
+ for (i = 2; i <= info->cpus_total; i++)
+ len += sprintf(page + len,
+ "Adjustment %02d-way: %u\n",
+ i, info->adjustment[i-2]);
+ }
+
+ if (info->secondary_capability != 0)
+ len += sprintf(page + len, "Secondary Capability: %d\n",
+ info->secondary_capability);
+
+ return len;
}
-static inline int stsi_2_2_1 (struct sysinfo_2_2_1 *info)
+#if 0 /* Currently unused */
+static int stsi_2_2_1(struct sysinfo_2_2_1 *info, char *page, int len)
{
- int rc = stsi (info, 2, 2, 1);
- if (rc != -1)
- {
- EBCASC (info->sequence, sizeof(info->sequence));
- EBCASC (info->plant, sizeof(info->plant));
- }
- return rc == -1 ? rc : 0;
+ if (stsi(info, 2, 2, 1) == -ENOSYS)
+ return len;
+
+ len += sprintf(page + len, "\n");
+ EBCASC (info->sequence, sizeof(info->sequence));
+ EBCASC (info->plant, sizeof(info->plant));
+ len += sprintf(page + len, "Sequence Code of logical CPU: %-16.16s\n",
+ info->sequence);
+ len += sprintf(page + len, "Plant of logical CPU: %-16.16s\n",
+ info->plant);
+ return len;
}
+#endif
-static inline int stsi_2_2_2 (struct sysinfo_2_2_2 *info)
+static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len)
{
- int rc = stsi (info, 2, 2, 2);
- if (rc != -1)
- {
- EBCASC (info->name, sizeof(info->name));
- }
- return rc == -1 ? rc : 0;
+ if (stsi(info, 2, 2, 2) == -ENOSYS)
+ return len;
+
+ EBCASC (info->name, sizeof(info->name));
+
+ len += sprintf(page + len, "\n");
+ len += sprintf(page + len, "LPAR Number: %d\n",
+ info->lpar_number);
+
+ len += sprintf(page + len, "LPAR Characteristics: ");
+ if (info->characteristics & LPAR_CHAR_DEDICATED)
+ len += sprintf(page + len, "Dedicated ");
+ if (info->characteristics & LPAR_CHAR_SHARED)
+ len += sprintf(page + len, "Shared ");
+ if (info->characteristics & LPAR_CHAR_LIMITED)
+ len += sprintf(page + len, "Limited ");
+ len += sprintf(page + len, "\n");
+
+ len += sprintf(page + len, "LPAR Name: %-8.8s\n",
+ info->name);
+
+ len += sprintf(page + len, "LPAR Adjustment: %d\n",
+ info->caf);
+
+ len += sprintf(page + len, "LPAR CPUs Total: %d\n",
+ info->cpus_total);
+ len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
+ info->cpus_configured);
+ len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
+ info->cpus_standby);
+ len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
+ info->cpus_reserved);
+ len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
+ info->cpus_dedicated);
+ len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
+ info->cpus_shared);
+ return len;
}
-static inline int stsi_3_2_2 (struct sysinfo_3_2_2 *info)
+static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len)
{
- int rc = stsi (info, 3, 2, 2);
- if (rc != -1)
- {
- int i;
- for (i = 0; i < info->count; i++)
- {
- EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
- EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
- }
+ int i;
+
+ if (stsi(info, 3, 2, 2) == -ENOSYS)
+ return len;
+ for (i = 0; i < info->count; i++) {
+ EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
+ EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
+ len += sprintf(page + len, "\n");
+ len += sprintf(page + len, "VM%02d Name: %-8.8s\n",
+ i, info->vm[i].name);
+ len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n",
+ i, info->vm[i].cpi);
+
+ len += sprintf(page + len, "VM%02d Adjustment: %d\n",
+ i, info->vm[i].caf);
+
+ len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
+ i, info->vm[i].cpus_total);
+ len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
+ i, info->vm[i].cpus_configured);
+ len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
+ i, info->vm[i].cpus_standby);
+ len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
+ i, info->vm[i].cpus_reserved);
}
- return rc == -1 ? rc : 0;
+ return len;
}
@@ -227,118 +318,34 @@ static int proc_read_sysinfo(char *page, char **start,
off_t off, int count,
int *eof, void *data)
{
- unsigned long info_page = get_zeroed_page (GFP_KERNEL);
- union s390_sysinfo *info = (union s390_sysinfo *) info_page;
- int len = 0;
- int level;
- int i;
+ unsigned long info = get_zeroed_page (GFP_KERNEL);
+ int level, len;
if (!info)
return 0;
- level = stsi_0 ();
-
- if (level >= 1 && stsi_1_1_1 (&info->sysinfo_1_1_1) == 0)
- {
- len += sprintf (page+len, "Manufacturer: %-16.16s\n",
- info->sysinfo_1_1_1.manufacturer);
- len += sprintf (page+len, "Type: %-4.4s\n",
- info->sysinfo_1_1_1.type);
- len += sprintf (page+len, "Model: %-16.16s\n",
- info->sysinfo_1_1_1.model);
- len += sprintf (page+len, "Sequence Code: %-16.16s\n",
- info->sysinfo_1_1_1.sequence);
- len += sprintf (page+len, "Plant: %-4.4s\n",
- info->sysinfo_1_1_1.plant);
- }
-
- if (level >= 1 && stsi_1_2_2 (&info->sysinfo_1_2_2) == 0)
- {
- len += sprintf (page+len, "\n");
- len += sprintf (page+len, "CPUs Total: %d\n",
- info->sysinfo_1_2_2.cpus_total);
- len += sprintf (page+len, "CPUs Configured: %d\n",
- info->sysinfo_1_2_2.cpus_configured);
- len += sprintf (page+len, "CPUs Standby: %d\n",
- info->sysinfo_1_2_2.cpus_standby);
- len += sprintf (page+len, "CPUs Reserved: %d\n",
- info->sysinfo_1_2_2.cpus_reserved);
-
- len += sprintf (page+len, "Capability: %d\n",
- info->sysinfo_1_2_2.capability);
+ len = 0;
+ level = stsi_0();
+ if (level >= 1)
+ len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
- for (i = 2; i <= info->sysinfo_1_2_2.cpus_total; i++)
- len += sprintf (page+len, "Adjustment %02d-way: %d\n",
- i, info->sysinfo_1_2_2.adjustment[i-2]);
- }
+ if (level >= 1)
+ len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
- if (level >= 2 && stsi_2_2_2 (&info->sysinfo_2_2_2) == 0)
- {
- len += sprintf (page+len, "\n");
- len += sprintf (page+len, "LPAR Number: %d\n",
- info->sysinfo_2_2_2.lpar_number);
-
- len += sprintf (page+len, "LPAR Characteristics: ");
- if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_DEDICATED)
- len += sprintf (page+len, "Dedicated ");
- if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_SHARED)
- len += sprintf (page+len, "Shared ");
- if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_LIMITED)
- len += sprintf (page+len, "Limited ");
- len += sprintf (page+len, "\n");
-
- len += sprintf (page+len, "LPAR Name: %-8.8s\n",
- info->sysinfo_2_2_2.name);
-
- len += sprintf (page+len, "LPAR Adjustment: %d\n",
- info->sysinfo_2_2_2.caf);
-
- len += sprintf (page+len, "LPAR CPUs Total: %d\n",
- info->sysinfo_2_2_2.cpus_total);
- len += sprintf (page+len, "LPAR CPUs Configured: %d\n",
- info->sysinfo_2_2_2.cpus_configured);
- len += sprintf (page+len, "LPAR CPUs Standby: %d\n",
- info->sysinfo_2_2_2.cpus_standby);
- len += sprintf (page+len, "LPAR CPUs Reserved: %d\n",
- info->sysinfo_2_2_2.cpus_reserved);
- len += sprintf (page+len, "LPAR CPUs Dedicated: %d\n",
- info->sysinfo_2_2_2.cpus_dedicated);
- len += sprintf (page+len, "LPAR CPUs Shared: %d\n",
- info->sysinfo_2_2_2.cpus_shared);
- }
+ if (level >= 2)
+ len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len);
- if (level >= 3 && stsi_3_2_2 (&info->sysinfo_3_2_2) == 0)
- {
- for (i = 0; i < info->sysinfo_3_2_2.count; i++)
- {
- len += sprintf (page+len, "\n");
- len += sprintf (page+len, "VM%02d Name: %-8.8s\n",
- i, info->sysinfo_3_2_2.vm[i].name);
- len += sprintf (page+len, "VM%02d Control Program: %-16.16s\n",
- i, info->sysinfo_3_2_2.vm[i].cpi);
-
- len += sprintf (page+len, "VM%02d Adjustment: %d\n",
- i, info->sysinfo_3_2_2.vm[i].caf);
-
- len += sprintf (page+len, "VM%02d CPUs Total: %d\n",
- i, info->sysinfo_3_2_2.vm[i].cpus_total);
- len += sprintf (page+len, "VM%02d CPUs Configured: %d\n",
- i, info->sysinfo_3_2_2.vm[i].cpus_configured);
- len += sprintf (page+len, "VM%02d CPUs Standby: %d\n",
- i, info->sysinfo_3_2_2.vm[i].cpus_standby);
- len += sprintf (page+len, "VM%02d CPUs Reserved: %d\n",
- i, info->sysinfo_3_2_2.vm[i].cpus_reserved);
- }
- }
+ if (level >= 3)
+ len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len);
- free_page (info_page);
+ free_page (info);
return len;
}
static __init int create_proc_sysinfo(void)
{
- create_proc_read_entry ("sysinfo", 0444, NULL,
- proc_read_sysinfo, NULL);
+ create_proc_read_entry("sysinfo", 0444, NULL,
+ proc_read_sysinfo, NULL);
return 0;
}
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 293bb2fdb1d..2f698763ba5 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -145,8 +145,9 @@ static int opromgetprop(void __user *argp, struct device_node *dp, struct openpr
void *pval;
int len;
- pval = of_get_property(dp, op->oprom_array, &len);
- if (!pval || len <= 0 || len > bufsize)
+ if (!dp ||
+ !(pval = of_get_property(dp, op->oprom_array, &len)) ||
+ len <= 0 || len > bufsize)
return copyout(argp, op, sizeof(int));
memcpy(op->oprom_array, pval, len);
@@ -161,6 +162,8 @@ static int opromnxtprop(void __user *argp, struct device_node *dp, struct openpr
struct property *prop;
int len;
+ if (!dp)
+ return copyout(argp, op, sizeof(int));
if (op->oprom_array[0] == '\0') {
prop = dp->properties;
if (!prop)
@@ -266,9 +269,13 @@ static int oprompci2node(void __user *argp, struct device_node *dp, struct openp
static int oprompath2node(void __user *argp, struct device_node *dp, struct openpromio *op, int bufsize, DATA *data)
{
+ phandle ph = 0;
+
dp = of_find_node_by_path(op->oprom_array);
+ if (dp)
+ ph = dp->node;
data->current_node = dp;
- *((int *)op->oprom_array) = dp->node;
+ *((int *)op->oprom_array) = ph;
op->oprom_size = sizeof(int);
return copyout(argp, op, bufsize + sizeof(int));
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 2d20caf377f..a9bb3cb7e89 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -123,7 +123,8 @@ enum {
ich6_sata = 4,
ich6_sata_ahci = 5,
ich6m_sata_ahci = 6,
- ich8_sata_ahci = 7,
+ ich7m_sata_ahci = 7,
+ ich8_sata_ahci = 8,
/* constants for mapping table */
P0 = 0, /* port 0 */
@@ -188,7 +189,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* 82801GB/GR/GH (ICH7, identical to ICH6) */
{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
/* 2801GBM/GHM (ICH7M, identical to ICH6M) */
- { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
+ { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich7m_sata_ahci },
/* Enterprise Southbridge 2 (where's the datasheet?) */
{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
/* SATA Controller 1 IDE (ICH8, no datasheet yet) */
@@ -336,6 +337,24 @@ static const struct piix_map_db ich6m_map_db = {
},
};
+static const struct piix_map_db ich7m_map_db = {
+ .mask = 0x3,
+ .port_enable = 0x5,
+ .present_shift = 4,
+
+ /* Map 01b isn't specified in the doc but some notebooks use
+ * it anyway. ATM, the only case spotted carries subsystem ID
+ * 1025:0107. This is the only difference from ich6m.
+ */
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, P2, RV, RV }, /* 00b */
+ { IDE, IDE, P1, P3 }, /* 01b */
+ { P0, P2, IDE, IDE }, /* 10b */
+ { RV, RV, RV, RV },
+ },
+};
+
static const struct piix_map_db ich8_map_db = {
.mask = 0x3,
.port_enable = 0x3,
@@ -355,6 +374,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich6_sata] = &ich6_map_db,
[ich6_sata_ahci] = &ich6_map_db,
[ich6m_sata_ahci] = &ich6m_map_db,
+ [ich7m_sata_ahci] = &ich7m_map_db,
[ich8_sata_ahci] = &ich8_map_db,
};
@@ -444,6 +464,18 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
+ /* ich7m_sata_ahci */
+ {
+ .sht = &piix_sht,
+ .host_flags = ATA_FLAG_SATA |
+ PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+ PIIX_FLAG_AHCI,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = 0x7f, /* udma0-6 */
+ .port_ops = &piix_sata_ops,
+ },
+
/* ich8_sata_ahci */
{
.sht = &piix_sht,
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index ed22b96580c..01b8ac641eb 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -156,8 +156,8 @@ static void gather_partition_info(void)
{
struct device_node *rootdn;
- char *ppartition_name;
- unsigned int *p_number_ptr;
+ const char *ppartition_name;
+ const unsigned int *p_number_ptr;
/* Retrieve information about this partition */
rootdn = find_path_device("/");
@@ -165,14 +165,11 @@ static void gather_partition_info(void)
return;
}
- ppartition_name =
- get_property(rootdn, "ibm,partition-name", NULL);
+ ppartition_name = get_property(rootdn, "ibm,partition-name", NULL);
if (ppartition_name)
strncpy(partition_name, ppartition_name,
sizeof(partition_name));
- p_number_ptr =
- (unsigned int *)get_property(rootdn, "ibm,partition-no",
- NULL);
+ p_number_ptr = get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr)
partition_number = *p_number_ptr;
}
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index d91e8949c71..0a9dbc59663 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -108,7 +108,7 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc);
+ crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
buf->sg.length = tcp_conn->hdr_size;
}
@@ -468,7 +468,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
sg_init_one(&sg, (u8 *)hdr,
sizeof(struct iscsi_hdr) + ahslen);
- crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst);
+ crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
+ (u8 *)&cdgst);
rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
ahslen);
if (cdgst != rdgst) {
@@ -675,7 +676,7 @@ iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
}
static inline void
-partial_sg_digest_update(struct crypto_tfm *tfm, struct scatterlist *sg,
+partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
int offset, int length)
{
struct scatterlist temp;
@@ -683,7 +684,7 @@ partial_sg_digest_update(struct crypto_tfm *tfm, struct scatterlist *sg,
memcpy(&temp, sg, sizeof(struct scatterlist));
temp.offset = offset;
temp.length = length;
- crypto_digest_update(tfm, &temp, 1);
+ crypto_hash_update(desc, &temp, length);
}
static void
@@ -692,7 +693,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
struct scatterlist tmp;
sg_init_one(&tmp, buf, len);
- crypto_digest_update(tcp_conn->rx_tfm, &tmp, 1);
+ crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
}
static int iscsi_scsi_data_in(struct iscsi_conn *conn)
@@ -746,12 +747,12 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
if (!rc) {
if (conn->datadgst_en) {
if (!offset)
- crypto_digest_update(
- tcp_conn->rx_tfm,
+ crypto_hash_update(
+ &tcp_conn->rx_hash,
&sg[i], 1);
else
partial_sg_digest_update(
- tcp_conn->rx_tfm,
+ &tcp_conn->rx_hash,
&sg[i],
sg[i].offset + offset,
sg[i].length - offset);
@@ -765,9 +766,10 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
/*
* data-in is complete, but buffer not...
*/
- partial_sg_digest_update(tcp_conn->rx_tfm,
- &sg[i],
- sg[i].offset, sg[i].length-rc);
+ partial_sg_digest_update(&tcp_conn->rx_hash,
+ &sg[i],
+ sg[i].offset,
+ sg[i].length-rc);
rc = 0;
break;
}
@@ -885,7 +887,7 @@ more:
rc = iscsi_tcp_hdr_recv(conn);
if (!rc && tcp_conn->in.datalen) {
if (conn->datadgst_en)
- crypto_digest_init(tcp_conn->rx_tfm);
+ crypto_hash_init(&tcp_conn->rx_hash);
tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
} else if (rc) {
iscsi_conn_failure(conn, rc);
@@ -942,11 +944,11 @@ more:
tcp_conn->in.padding);
memset(pad, 0, tcp_conn->in.padding);
sg_init_one(&sg, pad, tcp_conn->in.padding);
- crypto_digest_update(tcp_conn->rx_tfm,
- &sg, 1);
+ crypto_hash_update(&tcp_conn->rx_hash,
+ &sg, sg.length);
}
- crypto_digest_final(tcp_conn->rx_tfm,
- (u8 *) &tcp_conn->in.datadgst);
+ crypto_hash_final(&tcp_conn->rx_hash,
+ (u8 *) &tcp_conn->in.datadgst);
debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
tcp_conn->data_copied = 0;
@@ -1191,7 +1193,7 @@ static inline void
iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
struct iscsi_tcp_cmd_task *tcp_ctask)
{
- crypto_digest_init(tcp_conn->tx_tfm);
+ crypto_hash_init(&tcp_conn->tx_hash);
tcp_ctask->digest_count = 4;
}
@@ -1447,8 +1449,9 @@ iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
tcp_ctask->pad_count);
if (conn->datadgst_en)
- crypto_digest_update(tcp_conn->tx_tfm,
- &tcp_ctask->sendbuf.sg, 1);
+ crypto_hash_update(&tcp_conn->tx_hash,
+ &tcp_ctask->sendbuf.sg,
+ tcp_ctask->sendbuf.sg.length);
} else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
return 0;
@@ -1480,7 +1483,7 @@ iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
tcp_conn = conn->dd_data;
if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
- crypto_digest_final(tcp_conn->tx_tfm, (u8*)digest);
+ crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
iscsi_buf_init_iov(buf, (char*)digest, 4);
}
tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
@@ -1514,7 +1517,7 @@ iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
*sent = *sent + buf_sent;
if (buf_sent && conn->datadgst_en)
- partial_sg_digest_update(tcp_conn->tx_tfm,
+ partial_sg_digest_update(&tcp_conn->tx_hash,
&sendbuf->sg, sendbuf->sg.offset + offset,
buf_sent);
if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
@@ -1768,18 +1771,22 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
/* initial operational parameters */
tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
- tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c", 0);
- if (!tcp_conn->tx_tfm)
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+ if (!tcp_conn->tx_hash.tfm)
goto free_tcp_conn;
- tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c", 0);
- if (!tcp_conn->rx_tfm)
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->rx_hash.flags = 0;
+ if (!tcp_conn->rx_hash.tfm)
goto free_tx_tfm;
return cls_conn;
free_tx_tfm:
- crypto_free_tfm(tcp_conn->tx_tfm);
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
free_tcp_conn:
kfree(tcp_conn);
tcp_conn_alloc_fail:
@@ -1819,10 +1826,10 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
/* now free tcp_conn */
if (digest) {
- if (tcp_conn->tx_tfm)
- crypto_free_tfm(tcp_conn->tx_tfm);
- if (tcp_conn->rx_tfm)
- crypto_free_tfm(tcp_conn->rx_tfm);
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
}
kfree(tcp_conn);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 609f4778d12..32736831790 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -49,6 +49,7 @@
#define ISCSI_SG_TABLESIZE SG_ALL
#define ISCSI_TCP_MAX_CMD_LEN 16
+struct crypto_hash;
struct socket;
/* Socket connection recieve helper */
@@ -81,6 +82,7 @@ struct iscsi_tcp_conn {
* stop to terminate */
/* iSCSI connection-wide sequencing */
int hdr_size; /* PDU header size */
+
/* control data */
struct iscsi_tcp_recv in; /* TCP receive context */
int in_progress; /* connection state machine */
@@ -91,8 +93,8 @@ struct iscsi_tcp_conn {
void (*old_write_space)(struct sock *);
/* data and header digests */
- struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */
- struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */
+ struct hash_desc tx_hash; /* CRC32C (Tx) */
+ struct hash_desc rx_hash; /* CRC32C (Rx) */
/* MIB custom statistics */
uint32_t sendpage_failures_cnt;
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 73dd6c8deed..427b73a3886 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1256,10 +1256,15 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
swap_buf_le16(id, ATA_ID_WORDS);
/* sanity check */
- if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
- rc = -EINVAL;
- reason = "device reports illegal type";
- goto err_out;
+ rc = -EINVAL;
+ reason = "device reports illegal type";
+
+ if (class == ATA_DEV_ATA) {
+ if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
+ goto err_out;
+ } else {
+ if (ata_id_is_ata(id))
+ goto err_out;
}
if (post_reset && class == ATA_DEV_ATA) {
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 89ef34df5a1..6422de72bf4 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -431,7 +431,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
struct fsc_state *state;
struct Scsi_Host *host;
void *dma_cmd_space;
- unsigned char *clkprop;
+ const unsigned char *clkprop;
int proplen, rc = -ENODEV;
if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 5572981a9f9..592b52afe65 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1850,7 +1850,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
{
struct device_node *mesh = macio_get_of_node(mdev);
struct pci_dev* pdev = macio_get_pci_dev(mdev);
- int tgt, *cfp, minper;
+ int tgt, minper;
+ const int *cfp;
struct mesh_state *ms;
struct Scsi_Host *mesh_host;
void *dma_cmd_space;
@@ -1939,7 +1940,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
ms->tgts[tgt].current_req = NULL;
}
- if ((cfp = (int *) get_property(mesh, "clock-frequency", NULL)))
+ if ((cfp = get_property(mesh, "clock-frequency", NULL)))
ms->clk_freq = *cfp;
else {
printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n");
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 1053c7c76b7..fa38a413d16 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -1961,8 +1961,7 @@ comreset_retry:
timeout = jiffies + msecs_to_jiffies(200);
do {
sata_scr_read(ap, SCR_STATUS, &sstatus);
- sstatus &= 0x3;
- if ((sstatus == 3) || (sstatus == 0))
+ if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
break;
__msleep(1, can_sleep);
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 7d0858095e1..6b70c3c76df 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -268,7 +268,7 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
/* Match it to a port node */
index = (ap == ap->host_set->ports[0]) ? 0 : 1;
for (np = np->child; np != NULL; np = np->sibling) {
- u32 *reg = (u32 *)get_property(np, "reg", NULL);
+ const u32 *reg = get_property(np, "reg", NULL);
if (!reg)
continue;
if (index == *reg)
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 01d40369a8a..a3727af8b9c 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -77,6 +77,7 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void vt6420_error_handler(struct ata_port *ap);
static const struct pci_device_id svia_pci_tbl[] = {
+ { 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
{ 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
{ 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a8ed5a22009..3d355d05461 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -466,7 +466,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
int old_result = scmd->result;
- DECLARE_COMPLETION(done);
+ DECLARE_COMPLETION_ONSTACK(done);
unsigned long timeleft;
unsigned long flags;
unsigned char old_cmnd[MAX_COMMAND_SIZE];
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index a1d322f8a16..851e4839d6d 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -458,11 +458,11 @@ static int pci_siig_setup(struct serial_private *priv,
* growing *huge*, we use this function to collapse some 70 entries
* in the PCI table into one, for sanity's and compactness's sake.
*/
-static unsigned short timedia_single_port[] = {
+static const unsigned short timedia_single_port[] = {
0x4025, 0x4027, 0x4028, 0x5025, 0x5027, 0
};
-static unsigned short timedia_dual_port[] = {
+static const unsigned short timedia_dual_port[] = {
0x0002, 0x4036, 0x4037, 0x4038, 0x4078, 0x4079, 0x4085,
0x4088, 0x4089, 0x5037, 0x5078, 0x5079, 0x5085, 0x6079,
0x7079, 0x8079, 0x8137, 0x8138, 0x8237, 0x8238, 0x9079,
@@ -470,35 +470,34 @@ static unsigned short timedia_dual_port[] = {
0xD079, 0
};
-static unsigned short timedia_quad_port[] = {
+static const unsigned short timedia_quad_port[] = {
0x4055, 0x4056, 0x4095, 0x4096, 0x5056, 0x8156, 0x8157,
0x8256, 0x8257, 0x9056, 0x9156, 0x9157, 0x9158, 0x9159,
0x9256, 0x9257, 0xA056, 0xA157, 0xA158, 0xA159, 0xB056,
0xB157, 0
};
-static unsigned short timedia_eight_port[] = {
+static const unsigned short timedia_eight_port[] = {
0x4065, 0x4066, 0x5065, 0x5066, 0x8166, 0x9066, 0x9166,
0x9167, 0x9168, 0xA066, 0xA167, 0xA168, 0
};
static const struct timedia_struct {
int num;
- unsigned short *ids;
+ const unsigned short *ids;
} timedia_data[] = {
{ 1, timedia_single_port },
{ 2, timedia_dual_port },
{ 4, timedia_quad_port },
- { 8, timedia_eight_port },
- { 0, NULL }
+ { 8, timedia_eight_port }
};
static int pci_timedia_init(struct pci_dev *dev)
{
- unsigned short *ids;
+ const unsigned short *ids;
int i, j;
- for (i = 0; timedia_data[i].num; i++) {
+ for (i = 0; i < ARRAY_SIZE(timedia_data); i++) {
ids = timedia_data[i].ids;
for (j = 0; ids[j]; j++)
if (dev->subsystem_device == ids[j])
@@ -936,6 +935,7 @@ enum pci_board_num_t {
pbn_b1_8_1382400,
pbn_b2_1_115200,
+ pbn_b2_2_115200,
pbn_b2_8_115200,
pbn_b2_1_460800,
@@ -1243,6 +1243,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [pbn_b2_2_115200] = {
+ .flags = FL_BASE2,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[pbn_b2_8_115200] = {
.flags = FL_BASE2,
.num_ports = 8,
@@ -2340,6 +2346,13 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_b0_1_115200 },
/*
+ * IntaShield IS-200
+ */
+ { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS200,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0811 */
+ pbn_b2_2_115200 },
+
+ /*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
*/
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index bfd2a22759e..a3b99caf80e 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -1400,8 +1400,8 @@ static struct uart_ops pmz_pops = {
static int __init pmz_init_port(struct uart_pmac_port *uap)
{
struct device_node *np = uap->node;
- char *conn;
- struct slot_names_prop {
+ const char *conn;
+ const struct slot_names_prop {
int count;
char name[1];
} *slots;
@@ -1458,7 +1458,7 @@ no_dma:
uap->flags |= PMACZILOG_FLAG_IS_IRDA;
uap->port_type = PMAC_SCC_ASYNC;
/* 1999 Powerbook G3 has slot-names property instead */
- slots = (struct slot_names_prop *)get_property(np, "slot-names", &len);
+ slots = get_property(np, "slot-names", &len);
if (slots && slots->count > 0) {
if (strcmp(slots->name, "IrDA") == 0)
uap->flags |= PMACZILOG_FLAG_IS_IRDA;
@@ -1470,7 +1470,8 @@ no_dma:
if (ZS_IS_INTMODEM(uap)) {
struct device_node* i2c_modem = find_devices("i2c-modem");
if (i2c_modem) {
- char* mid = get_property(i2c_modem, "modem-id", NULL);
+ const char* mid =
+ get_property(i2c_modem, "modem-id", NULL);
if (mid) switch(*mid) {
case 0x04 :
case 0x05 :
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 392bffcf96e..95738a19cde 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -1621,7 +1621,7 @@ static struct s3c24xx_uart_info s3c2412_uart_inf = {
static int s3c2412_serial_probe(struct platform_device *dev)
{
dbg("s3c2440_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c2440_uart_inf);
+ return s3c24xx_serial_probe(dev, &s3c2412_uart_inf);
}
static struct platform_driver s3c2412_serial_drv = {
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 80ef7d48275..372e47f7d59 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -2377,6 +2377,9 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2)
return (port1->iobase == port2->iobase) &&
(port1->hub6 == port2->hub6);
case UPIO_MEM:
+ case UPIO_MEM32:
+ case UPIO_AU:
+ case UPIO_TSI:
return (port1->mapbase == port2->mapbase);
}
return 0;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 301573373c3..cbede06cac2 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1579,7 +1579,7 @@ static int __init serial_console_setup(struct console *co, char *options)
h8300_sci_enable(port, sci_enable);
#endif
#elif defined(CONFIG_SUPERH64)
- port->uartclk = current_cpu_info.module_clock * 16;
+ port->uartclk = current_cpu_data.module_clock * 16;
#else
{
struct clk *clk = clk_get("module_clk");
@@ -1720,7 +1720,7 @@ static int __init sci_init(void)
#if defined(__H8300H__) || defined(__H8300S__)
sciport->port.uartclk = CONFIG_CPU_CLOCK;
#elif defined(CONFIG_SUPERH64)
- sciport->port.uartclk = current_cpu_info.module_clock * 16;
+ sciport->port.uartclk = current_cpu_data.module_clock * 16;
#else
struct clk *clk = clk_get("module_clk");
sciport->port.uartclk = clk_get_rate(clk) * 16;
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 4fe1bec1c25..30299c620d9 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -117,6 +117,8 @@ struct eth_dev {
struct usb_ep *in_ep, *out_ep, *status_ep;
const struct usb_endpoint_descriptor
*in, *out, *status;
+
+ spinlock_t req_lock;
struct list_head tx_reqs, rx_reqs;
struct net_device *net;
@@ -1066,21 +1068,31 @@ static void eth_reset_config (struct eth_dev *dev)
*/
if (dev->in) {
usb_ep_disable (dev->in_ep);
+ spin_lock(&dev->req_lock);
while (likely (!list_empty (&dev->tx_reqs))) {
req = container_of (dev->tx_reqs.next,
struct usb_request, list);
list_del (&req->list);
+
+ spin_unlock(&dev->req_lock);
usb_ep_free_request (dev->in_ep, req);
+ spin_lock(&dev->req_lock);
}
+ spin_unlock(&dev->req_lock);
}
if (dev->out) {
usb_ep_disable (dev->out_ep);
+ spin_lock(&dev->req_lock);
while (likely (!list_empty (&dev->rx_reqs))) {
req = container_of (dev->rx_reqs.next,
struct usb_request, list);
list_del (&req->list);
+
+ spin_unlock(&dev->req_lock);
usb_ep_free_request (dev->out_ep, req);
+ spin_lock(&dev->req_lock);
}
+ spin_unlock(&dev->req_lock);
}
if (dev->status) {
@@ -1659,9 +1671,9 @@ enomem:
if (retval) {
DEBUG (dev, "rx submit --> %d\n", retval);
dev_kfree_skb_any (skb);
- spin_lock (&dev->lock);
+ spin_lock(&dev->req_lock);
list_add (&req->list, &dev->rx_reqs);
- spin_unlock (&dev->lock);
+ spin_unlock(&dev->req_lock);
}
return retval;
}
@@ -1730,8 +1742,9 @@ quiesce:
dev_kfree_skb_any (skb);
if (!netif_running (dev->net)) {
clean:
- /* nobody reading rx_reqs, so no dev->lock */
+ spin_lock(&dev->req_lock);
list_add (&req->list, &dev->rx_reqs);
+ spin_unlock(&dev->req_lock);
req = NULL;
}
if (req)
@@ -1782,15 +1795,18 @@ static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags)
{
int status;
+ spin_lock(&dev->req_lock);
status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags);
if (status < 0)
goto fail;
status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags);
if (status < 0)
goto fail;
- return 0;
+ goto done;
fail:
DEBUG (dev, "can't alloc requests\n");
+done:
+ spin_unlock(&dev->req_lock);
return status;
}
@@ -1800,21 +1816,21 @@ static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags)
unsigned long flags;
/* fill unused rxq slots with some skb */
- spin_lock_irqsave (&dev->lock, flags);
+ spin_lock_irqsave(&dev->req_lock, flags);
while (!list_empty (&dev->rx_reqs)) {
req = container_of (dev->rx_reqs.next,
struct usb_request, list);
list_del_init (&req->list);
- spin_unlock_irqrestore (&dev->lock, flags);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
if (rx_submit (dev, req, gfp_flags) < 0) {
defer_kevent (dev, WORK_RX_MEMORY);
return;
}
- spin_lock_irqsave (&dev->lock, flags);
+ spin_lock_irqsave(&dev->req_lock, flags);
}
- spin_unlock_irqrestore (&dev->lock, flags);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
}
static void eth_work (void *_dev)
@@ -1848,9 +1864,9 @@ static void tx_complete (struct usb_ep *ep, struct usb_request *req)
}
dev->stats.tx_packets++;
- spin_lock (&dev->lock);
+ spin_lock(&dev->req_lock);
list_add (&req->list, &dev->tx_reqs);
- spin_unlock (&dev->lock);
+ spin_unlock(&dev->req_lock);
dev_kfree_skb_any (skb);
atomic_dec (&dev->tx_qlen);
@@ -1896,12 +1912,12 @@ static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
}
- spin_lock_irqsave (&dev->lock, flags);
+ spin_lock_irqsave(&dev->req_lock, flags);
req = container_of (dev->tx_reqs.next, struct usb_request, list);
list_del (&req->list);
if (list_empty (&dev->tx_reqs))
netif_stop_queue (net);
- spin_unlock_irqrestore (&dev->lock, flags);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
/* no buffer copies needed, unless the network stack did it
* or the hardware can't use skb buffers.
@@ -1955,11 +1971,11 @@ static int eth_start_xmit (struct sk_buff *skb, struct net_device *net)
drop:
dev->stats.tx_dropped++;
dev_kfree_skb_any (skb);
- spin_lock_irqsave (&dev->lock, flags);
+ spin_lock_irqsave(&dev->req_lock, flags);
if (list_empty (&dev->tx_reqs))
netif_start_queue (net);
list_add (&req->list, &dev->tx_reqs);
- spin_unlock_irqrestore (&dev->lock, flags);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
}
return 0;
}
@@ -2378,6 +2394,7 @@ autoconf_fail:
return status;
dev = netdev_priv(net);
spin_lock_init (&dev->lock);
+ spin_lock_init (&dev->req_lock);
INIT_WORK (&dev->work, eth_work, dev);
INIT_LIST_HEAD (&dev->tx_reqs);
INIT_LIST_HEAD (&dev->rx_reqs);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 66c3f61bc9d..431e8f31f1a 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -372,7 +372,7 @@ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
* need to change any toggles in this URB */
td = list_entry(urbp->td_list.next, struct uhci_td, list);
if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
- td = list_entry(urbp->td_list.next, struct uhci_td,
+ td = list_entry(urbp->td_list.prev, struct uhci_td,
list);
toggle = uhci_toggle(td_token(td)) ^ 1;
@@ -1348,7 +1348,7 @@ static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh,
}
uhci_giveback_urb(uhci, qh, urb, regs);
- if (status < 0)
+ if (status < 0 && qh->type != USB_ENDPOINT_XFER_ISOC)
break;
}
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index 8ea9c915fbf..3305fb6079e 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -1411,17 +1411,54 @@ void hid_init_reports(struct hid_device *hid)
warn("timeout initializing reports");
}
+#define USB_VENDOR_ID_GTCO 0x078c
+#define USB_DEVICE_ID_GTCO_90 0x0090
+#define USB_DEVICE_ID_GTCO_100 0x0100
+#define USB_DEVICE_ID_GTCO_101 0x0101
+#define USB_DEVICE_ID_GTCO_103 0x0103
+#define USB_DEVICE_ID_GTCO_104 0x0104
+#define USB_DEVICE_ID_GTCO_105 0x0105
+#define USB_DEVICE_ID_GTCO_106 0x0106
+#define USB_DEVICE_ID_GTCO_107 0x0107
+#define USB_DEVICE_ID_GTCO_108 0x0108
+#define USB_DEVICE_ID_GTCO_200 0x0200
+#define USB_DEVICE_ID_GTCO_201 0x0201
+#define USB_DEVICE_ID_GTCO_202 0x0202
+#define USB_DEVICE_ID_GTCO_203 0x0203
+#define USB_DEVICE_ID_GTCO_204 0x0204
+#define USB_DEVICE_ID_GTCO_205 0x0205
+#define USB_DEVICE_ID_GTCO_206 0x0206
+#define USB_DEVICE_ID_GTCO_207 0x0207
+#define USB_DEVICE_ID_GTCO_300 0x0300
+#define USB_DEVICE_ID_GTCO_301 0x0301
+#define USB_DEVICE_ID_GTCO_302 0x0302
+#define USB_DEVICE_ID_GTCO_303 0x0303
+#define USB_DEVICE_ID_GTCO_304 0x0304
+#define USB_DEVICE_ID_GTCO_305 0x0305
+#define USB_DEVICE_ID_GTCO_306 0x0306
+#define USB_DEVICE_ID_GTCO_307 0x0307
+#define USB_DEVICE_ID_GTCO_308 0x0308
+#define USB_DEVICE_ID_GTCO_309 0x0309
+#define USB_DEVICE_ID_GTCO_400 0x0400
+#define USB_DEVICE_ID_GTCO_401 0x0401
+#define USB_DEVICE_ID_GTCO_402 0x0402
+#define USB_DEVICE_ID_GTCO_403 0x0403
+#define USB_DEVICE_ID_GTCO_404 0x0404
+#define USB_DEVICE_ID_GTCO_405 0x0405
+#define USB_DEVICE_ID_GTCO_500 0x0500
+#define USB_DEVICE_ID_GTCO_501 0x0501
+#define USB_DEVICE_ID_GTCO_502 0x0502
+#define USB_DEVICE_ID_GTCO_503 0x0503
+#define USB_DEVICE_ID_GTCO_504 0x0504
+#define USB_DEVICE_ID_GTCO_1000 0x1000
+#define USB_DEVICE_ID_GTCO_1001 0x1001
+#define USB_DEVICE_ID_GTCO_1002 0x1002
+#define USB_DEVICE_ID_GTCO_1003 0x1003
+#define USB_DEVICE_ID_GTCO_1004 0x1004
+#define USB_DEVICE_ID_GTCO_1005 0x1005
+#define USB_DEVICE_ID_GTCO_1006 0x1006
+
#define USB_VENDOR_ID_WACOM 0x056a
-#define USB_DEVICE_ID_WACOM_PENPARTNER 0x0000
-#define USB_DEVICE_ID_WACOM_GRAPHIRE 0x0010
-#define USB_DEVICE_ID_WACOM_INTUOS 0x0020
-#define USB_DEVICE_ID_WACOM_PL 0x0030
-#define USB_DEVICE_ID_WACOM_INTUOS2 0x0040
-#define USB_DEVICE_ID_WACOM_VOLITO 0x0060
-#define USB_DEVICE_ID_WACOM_PTU 0x0003
-#define USB_DEVICE_ID_WACOM_INTUOS3 0x00B0
-#define USB_DEVICE_ID_WACOM_CINTIQ 0x003F
-#define USB_DEVICE_ID_WACOM_DTF 0x00C0
#define USB_VENDOR_ID_ACECAD 0x0460
#define USB_DEVICE_ID_ACECAD_FLAIR 0x0004
@@ -1588,6 +1625,51 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_8_IF_KIT, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005, HID_QUIRK_IGNORE },
+ { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY, HID_QUIRK_IGNORE },
@@ -1617,49 +1699,6 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PENPARTNER, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE + 1, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE + 2, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE + 3, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE + 4, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS + 1, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS + 2, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS + 3, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS + 4, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL + 1, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL + 2, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL + 3, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL + 4, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL + 5, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL + 7, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL + 8, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PL + 9, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS2 + 1, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS2 + 2, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS2 + 3, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS2 + 4, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS2 + 5, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS2 + 7, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_VOLITO, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_VOLITO + 1, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_VOLITO + 2, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_VOLITO + 3, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_VOLITO + 4, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE + 5, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE + 6, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_PTU, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS3, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS3 + 1, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS3 + 2, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS3 + 3, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS3 + 4, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS3 + 5, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_CINTIQ, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_DTF, HID_QUIRK_IGNORE },
- { USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_DTF + 3, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20, HID_QUIRK_IGNORE },
{ USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K, HID_QUIRK_IGNORE },
@@ -1778,6 +1817,10 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
char *rdesc;
int n, len, insize = 0;
+ /* Ignore all Wacom devices */
+ if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM)
+ return NULL;
+
for (n = 0; hid_blacklist[n].idVendor; n++)
if ((hid_blacklist[n].idVendor == le16_to_cpu(dev->descriptor.idVendor)) &&
(hid_blacklist[n].idProduct == le16_to_cpu(dev->descriptor.idProduct)))
diff --git a/drivers/usb/input/usbtouchscreen.c b/drivers/usb/input/usbtouchscreen.c
index 3b175aa482c..a338bf4c2d7 100644
--- a/drivers/usb/input/usbtouchscreen.c
+++ b/drivers/usb/input/usbtouchscreen.c
@@ -286,7 +286,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
static int itm_read_data(unsigned char *pkt, int *x, int *y, int *touch, int *press)
{
*x = ((pkt[0] & 0x1F) << 7) | (pkt[3] & 0x7F);
- *x = ((pkt[1] & 0x1F) << 7) | (pkt[4] & 0x7F);
+ *y = ((pkt[1] & 0x1F) << 7) | (pkt[4] & 0x7F);
*press = ((pkt[2] & 0x1F) << 7) | (pkt[5] & 0x7F);
*touch = ~pkt[7] & 0x20;
diff --git a/drivers/usb/input/yealink.c b/drivers/usb/input/yealink.c
index 575a4e672e9..7b45fd3de91 100644
--- a/drivers/usb/input/yealink.c
+++ b/drivers/usb/input/yealink.c
@@ -810,12 +810,9 @@ static int usb_cleanup(struct yealink_dev *yld, int err)
if (yld == NULL)
return err;
- if (yld->urb_irq) {
- usb_kill_urb(yld->urb_irq);
- usb_free_urb(yld->urb_irq);
- }
- if (yld->urb_ctl)
- usb_free_urb(yld->urb_ctl);
+ usb_kill_urb(yld->urb_irq); /* parameter validation in core/urb */
+ usb_kill_urb(yld->urb_ctl); /* parameter validation in core/urb */
+
if (yld->idev) {
if (err)
input_free_device(yld->idev);
@@ -831,6 +828,9 @@ static int usb_cleanup(struct yealink_dev *yld, int err)
if (yld->irq_data)
usb_buffer_free(yld->udev, USB_PKT_LEN,
yld->irq_data, yld->irq_dma);
+
+ usb_free_urb(yld->urb_irq); /* parameter validation in core/urb */
+ usb_free_urb(yld->urb_ctl); /* parameter validation in core/urb */
kfree(yld);
return err;
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 738bd7c7451..e16582f3733 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3435,6 +3435,8 @@ static void sisusb_disconnect(struct usb_interface *intf)
static struct usb_device_id sisusb_table [] = {
{ USB_DEVICE(0x0711, 0x0900) },
+ { USB_DEVICE(0x0711, 0x0901) },
+ { USB_DEVICE(0x0711, 0x0902) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index a54752ce149..006438069b6 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -131,6 +131,7 @@ struct usb_eth_dev {
#define VENDOR_COREGA 0x07aa
#define VENDOR_DLINK 0x2001
#define VENDOR_ELCON 0x0db7
+#define VENDOR_ELECOM 0x056e
#define VENDOR_ELSA 0x05cc
#define VENDOR_GIGABYTE 0x1044
#define VENDOR_HAWKING 0x0e66
@@ -233,6 +234,8 @@ PEGASUS_DEV( "D-Link DSB-650", VENDOR_DLINK, 0xabc1,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "GOLDPFEIL USB Adapter", VENDOR_ELCON, 0x0002,
DEFAULT_GPIO_RESET | PEGASUS_II | HAS_HOME_PNA )
+PEGASUS_DEV( "ELECOM USB Ethernet LD-USB20", VENDOR_ELECOM, 0x4010,
+ DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "EasiDock Ethernet", VENDOR_MOBILITY, 0x0304,
DEFAULT_GPIO_RESET )
PEGASUS_DEV( "Elsa Micolink USB2Ethernet", VENDOR_ELSA, 0x3000,
diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c
index bd09232ce13..a72685b9606 100644
--- a/drivers/usb/net/rtl8150.c
+++ b/drivers/usb/net/rtl8150.c
@@ -972,6 +972,7 @@ static void rtl8150_disconnect(struct usb_interface *intf)
if (dev) {
set_bit(RTL8150_UNPLUG, &dev->flags);
tasklet_disable(&dev->tl);
+ tasklet_kill(&dev->tl);
unregister_netdev(dev->netdev);
unlink_all_urbs(dev);
free_all_urbs(dev);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 15945e806f0..c6115aa1b44 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -506,6 +506,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
{ USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 8888cd80a49..77299996f7e 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -467,6 +467,11 @@
#define TESTO_VID 0x128D
#define TESTO_USB_INTERFACE_PID 0x0001
+/*
+ * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
+ */
+#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */
+
/* Commands */
#define FTDI_SIO_RESET 0 /* Reset the port */
#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 12c1694d322..e06a41bd0f3 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -464,8 +464,10 @@ static int serial_read_proc (char *page, char **start, off_t off, int count, int
length += sprintf (page+length, " path:%s", tmp);
length += sprintf (page+length, "\n");
- if ((length + begin) > (off + count))
+ if ((length + begin) > (off + count)) {
+ usb_serial_put(serial);
goto done;
+ }
if ((length + begin) < off) {
begin += length;
length = 0;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 4a803d69fa3..b130e170b4a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -241,16 +241,6 @@ UNUSUAL_DEV( 0x0482, 0x0103, 0x0100, 0x0100,
"Finecam S5",
US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY),
-/* Patch for Kyocera Finecam L3
- * Submitted by Michael Krauth <michael.krauth@web.de>
- * and Alessandro Fracchetti <al.fracchetti@tin.it>
- */
-UNUSUAL_DEV( 0x0482, 0x0105, 0x0100, 0x0100,
- "Kyocera",
- "Finecam L3",
- US_SC_SCSI, US_PR_BULK, NULL,
- US_FL_FIX_INQUIRY),
-
/* Reported by Paul Stewart <stewart@wetlogic.net>
* This entry is needed because the device reports Sub=ff */
UNUSUAL_DEV( 0x04a4, 0x0004, 0x0001, 0x0001,
@@ -599,6 +589,13 @@ UNUSUAL_DEV( 0x054c, 0x0099, 0x0000, 0x9999,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
+/* floppy reports multiple luns */
+UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
+ "SAMSUNG",
+ "SFD-321U [FW 0C]",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_SINGLE_LUN ),
+
UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299,
"Y-E Data",
@@ -1257,6 +1254,13 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_NO_WP_DETECT ),
+/* Reported by Emmanuel Vasilakis <evas@forthnet.gr> */
+UNUSUAL_DEV( 0x0fce, 0xe031, 0x0000, 0x0000,
+ "Sony Ericsson",
+ "M600i",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
* Tested on hardware version 1.10.
* Entry is needed only for the initializer function override.
diff --git a/drivers/video/S3triofb.c b/drivers/video/S3triofb.c
index afd146f5f68..397005eb392 100644
--- a/drivers/video/S3triofb.c
+++ b/drivers/video/S3triofb.c
@@ -349,30 +349,30 @@ static void __init s3triofb_of_init(struct device_node *dp)
s3trio_name[sizeof(s3trio_name)-1] = '\0';
strcpy(fb_fix.id, s3trio_name);
- if((pp = (int *)get_property(dp, "vendor-id", &len)) != NULL
+ if((pp = get_property(dp, "vendor-id", &len)) != NULL
&& *pp!=PCI_VENDOR_ID_S3) {
printk("%s: can't find S3 Trio board\n", dp->full_name);
return;
}
- if((pp = (int *)get_property(dp, "device-id", &len)) != NULL
+ if((pp = get_property(dp, "device-id", &len)) != NULL
&& *pp!=PCI_DEVICE_ID_S3_TRIO) {
printk("%s: can't find S3 Trio board\n", dp->full_name);
return;
}
- if ((pp = (int *)get_property(dp, "depth", &len)) != NULL
+ if ((pp = get_property(dp, "depth", &len)) != NULL
&& len == sizeof(int) && *pp != 8) {
printk("%s: can't use depth = %d\n", dp->full_name, *pp);
return;
}
- if ((pp = (int *)get_property(dp, "width", &len)) != NULL
+ if ((pp = get_property(dp, "width", &len)) != NULL
&& len == sizeof(int))
fb_var.xres = fb_var.xres_virtual = *pp;
- if ((pp = (int *)get_property(dp, "height", &len)) != NULL
+ if ((pp = get_property(dp, "height", &len)) != NULL
&& len == sizeof(int))
fb_var.yres = fb_var.yres_virtual = *pp;
- if ((pp = (int *)get_property(dp, "linebytes", &len)) != NULL
+ if ((pp = get_property(dp, "linebytes", &len)) != NULL
&& len == sizeof(int))
fb_fix.line_length = *pp;
else
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 3e827e04a2a..276a21530b9 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1801,10 +1801,14 @@ static struct backlight_properties aty128_bl_data = {
static void aty128_bl_set_power(struct fb_info *info, int power)
{
mutex_lock(&info->bl_mutex);
- up(&info->bl_dev->sem);
- info->bl_dev->props->power = power;
- __aty128_bl_update_status(info->bl_dev);
- down(&info->bl_dev->sem);
+
+ if (info->bl_dev) {
+ down(&info->bl_dev->sem);
+ info->bl_dev->props->power = power;
+ __aty128_bl_update_status(info->bl_dev);
+ up(&info->bl_dev->sem);
+ }
+
mutex_unlock(&info->bl_mutex);
}
@@ -1828,7 +1832,7 @@ static void aty128_bl_init(struct aty128fb_par *par)
bd = backlight_device_register(name, par, &aty128_bl_data);
if (IS_ERR(bd)) {
info->bl_dev = NULL;
- printk("aty128: Backlight registration failed\n");
+ printk(KERN_WARNING "aty128: Backlight registration failed\n");
goto error;
}
@@ -1839,11 +1843,11 @@ static void aty128_bl_init(struct aty128fb_par *par)
219 * FB_BACKLIGHT_MAX / MAX_LEVEL);
mutex_unlock(&info->bl_mutex);
- up(&bd->sem);
+ down(&bd->sem);
bd->props->brightness = aty128_bl_data.max_brightness;
bd->props->power = FB_BLANK_UNBLANK;
bd->props->update_status(bd);
- down(&bd->sem);
+ up(&bd->sem);
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 053ff63365b..19a71f04578 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2200,10 +2200,14 @@ static struct backlight_properties aty_bl_data = {
static void aty_bl_set_power(struct fb_info *info, int power)
{
mutex_lock(&info->bl_mutex);
- up(&info->bl_dev->sem);
- info->bl_dev->props->power = power;
- __aty_bl_update_status(info->bl_dev);
- down(&info->bl_dev->sem);
+
+ if (info->bl_dev) {
+ down(&info->bl_dev->sem);
+ info->bl_dev->props->power = power;
+ __aty_bl_update_status(info->bl_dev);
+ up(&info->bl_dev->sem);
+ }
+
mutex_unlock(&info->bl_mutex);
}
@@ -2223,7 +2227,7 @@ static void aty_bl_init(struct atyfb_par *par)
bd = backlight_device_register(name, par, &aty_bl_data);
if (IS_ERR(bd)) {
info->bl_dev = NULL;
- printk("aty: Backlight registration failed\n");
+ printk(KERN_WARNING "aty: Backlight registration failed\n");
goto error;
}
@@ -2234,11 +2238,11 @@ static void aty_bl_init(struct atyfb_par *par)
0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL);
mutex_unlock(&info->bl_mutex);
- up(&bd->sem);
+ down(&bd->sem);
bd->props->brightness = aty_bl_data.max_brightness;
bd->props->power = FB_BLANK_UNBLANK;
bd->props->update_status(bd);
- down(&bd->sem);
+ up(&bd->sem);
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 1755dddf189..585eb7b9e63 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -195,11 +195,11 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo)
217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL);
mutex_unlock(&rinfo->info->bl_mutex);
- up(&bd->sem);
+ down(&bd->sem);
bd->props->brightness = radeon_bl_data.max_brightness;
bd->props->power = FB_BLANK_UNBLANK;
bd->props->update_status(bd);
- down(&bd->sem);
+ up(&bd->sem);
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 8e3400d5dd2..0ed577e7cc2 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -413,11 +413,11 @@ static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo)
{
struct device_node *dp = rinfo->of_node;
- u32 *val;
+ const u32 *val;
if (dp == NULL)
return -ENODEV;
- val = (u32 *) get_property(dp, "ATY,RefCLK", NULL);
+ val = get_property(dp, "ATY,RefCLK", NULL);
if (!val || !*val) {
printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
return -EINVAL;
@@ -425,11 +425,11 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo)
rinfo->pll.ref_clk = (*val) / 10;
- val = (u32 *) get_property(dp, "ATY,SCLK", NULL);
+ val = get_property(dp, "ATY,SCLK", NULL);
if (val && *val)
rinfo->pll.sclk = (*val) / 10;
- val = (u32 *) get_property(dp, "ATY,MCLK", NULL);
+ val = get_property(dp, "ATY,MCLK", NULL);
if (val && *val)
rinfo->pll.mclk = (*val) / 10;
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c
index 98c05bc0de4..ea531a6f45d 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/aty/radeon_monitor.c
@@ -64,13 +64,13 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_
{
static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID",
"EDID1", "EDID2", NULL };
- u8 *pedid = NULL;
- u8 *pmt = NULL;
+ const u8 *pedid = NULL;
+ const u8 *pmt = NULL;
u8 *tmp;
int i, mt = MT_NONE;
RTRACE("analyzing OF properties...\n");
- pmt = (u8 *)get_property(dp, "display-type", NULL);
+ pmt = get_property(dp, "display-type", NULL);
if (!pmt)
return MT_NONE;
RTRACE("display-type: %s\n", pmt);
@@ -89,7 +89,7 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_
}
for (i = 0; propnames[i] != NULL; ++i) {
- pedid = (u8 *)get_property(dp, propnames[i], NULL);
+ pedid = get_property(dp, propnames[i], NULL);
if (pedid != NULL)
break;
}
@@ -124,14 +124,14 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
return MT_NONE;
if (rinfo->has_CRTC2) {
- char *pname;
+ const char *pname;
int len, second = 0;
dp = dp->child;
do {
if (!dp)
return MT_NONE;
- pname = (char *)get_property(dp, "name", NULL);
+ pname = get_property(dp, "name", NULL);
if (!pname)
return MT_NONE;
len = strlen(pname);
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index f31e606a2de..e308ed2d249 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -1268,7 +1268,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
0x21320032, 0xa1320032, 0x21320032, 0xffffffff,
0x31320032 };
- u32 *mrtable = default_mrtable;
+ const u32 *mrtable = default_mrtable;
int i, mrtable_size = ARRAY_SIZE(default_mrtable);
mdelay(30);
@@ -1287,7 +1287,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
if (rinfo->of_node != NULL) {
int size;
- mrtable = (u32 *)get_property(rinfo->of_node, "ATY,MRT", &size);
+ mrtable = get_property(rinfo->of_node, "ATY,MRT", &size);
if (mrtable)
mrtable_size = size >> 2;
else
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 390439b3d89..1b4f75d1f8a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3197,11 +3197,11 @@ static void fbcon_exit(void)
return;
#ifdef CONFIG_ATARI
- free_irq(IRQ_AUTO_4, fbcon_vbl_handler);
+ free_irq(IRQ_AUTO_4, fb_vbl_handler);
#endif
#ifdef CONFIG_MAC
if (MACH_IS_MAC && vbl_detected)
- free_irq(IRQ_MAC_VBL, fbcon_vbl_handler);
+ free_irq(IRQ_MAC_VBL, fb_vbl_handler);
#endif
kfree((void *)softback_buf);
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index b45f577094a..5b75ae4e945 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -113,10 +113,14 @@ static struct backlight_properties nvidia_bl_data = {
void nvidia_bl_set_power(struct fb_info *info, int power)
{
mutex_lock(&info->bl_mutex);
- up(&info->bl_dev->sem);
- info->bl_dev->props->power = power;
- __nvidia_bl_update_status(info->bl_dev);
- down(&info->bl_dev->sem);
+
+ if (info->bl_dev) {
+ down(&info->bl_dev->sem);
+ info->bl_dev->props->power = power;
+ __nvidia_bl_update_status(info->bl_dev);
+ up(&info->bl_dev->sem);
+ }
+
mutex_unlock(&info->bl_mutex);
}
@@ -140,7 +144,7 @@ void nvidia_bl_init(struct nvidia_par *par)
bd = backlight_device_register(name, par, &nvidia_bl_data);
if (IS_ERR(bd)) {
info->bl_dev = NULL;
- printk("nvidia: Backlight registration failed\n");
+ printk(KERN_WARNING "nvidia: Backlight registration failed\n");
goto error;
}
@@ -151,11 +155,11 @@ void nvidia_bl_init(struct nvidia_par *par)
0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
mutex_unlock(&info->bl_mutex);
- up(&bd->sem);
+ down(&bd->sem);
bd->props->brightness = nvidia_bl_data.max_brightness;
bd->props->power = FB_BLANK_UNBLANK;
bd->props->update_status(bd);
- down(&bd->sem);
+ up(&bd->sem);
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
diff --git a/drivers/video/nvidia/nv_of.c b/drivers/video/nvidia/nv_of.c
index 8209106e26e..d9af88c2b58 100644
--- a/drivers/video/nvidia/nv_of.c
+++ b/drivers/video/nvidia/nv_of.c
@@ -32,7 +32,7 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid)
{
struct nvidia_par *par = info->par;
struct device_node *parent, *dp;
- unsigned char *pedid = NULL;
+ const unsigned char *pedid = NULL;
static char *propnames[] = {
"DFP,EDID", "LCD,EDID", "EDID", "EDID1",
"EDID,B", "EDID,A", NULL };
@@ -42,20 +42,19 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid)
if (parent == NULL)
return -1;
if (par->twoHeads) {
- char *pname;
+ const char *pname;
int len;
for (dp = NULL;
(dp = of_get_next_child(parent, dp)) != NULL;) {
- pname = (char *)get_property(dp, "name", NULL);
+ pname = get_property(dp, "name", NULL);
if (!pname)
continue;
len = strlen(pname);
if ((pname[len-1] == 'A' && conn == 1) ||
(pname[len-1] == 'B' && conn == 2)) {
for (i = 0; propnames[i] != NULL; ++i) {
- pedid = (unsigned char *)
- get_property(dp, propnames[i],
+ pedid = get_property(dp, propnames[i],
NULL);
if (pedid != NULL)
break;
@@ -67,8 +66,7 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid)
}
if (pedid == NULL) {
for (i = 0; propnames[i] != NULL; ++i) {
- pedid = (unsigned char *)
- get_property(parent, propnames[i], NULL);
+ pedid = get_property(parent, propnames[i], NULL);
if (pedid != NULL)
break;
}
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 0013311e056..bad0e98fb3b 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -409,30 +409,30 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
unsigned int flags, rsize, addr_prop = 0;
unsigned long max_size = 0;
u64 rstart, address = OF_BAD_ADDR;
- u32 *pp, *addrp, *up;
+ const u32 *pp, *addrp, *up;
u64 asize;
- pp = (u32 *)get_property(dp, "linux,bootx-depth", &len);
+ pp = get_property(dp, "linux,bootx-depth", &len);
if (pp == NULL)
- pp = (u32 *)get_property(dp, "depth", &len);
+ pp = get_property(dp, "depth", &len);
if (pp && len == sizeof(u32))
depth = *pp;
- pp = (u32 *)get_property(dp, "linux,bootx-width", &len);
+ pp = get_property(dp, "linux,bootx-width", &len);
if (pp == NULL)
- pp = (u32 *)get_property(dp, "width", &len);
+ pp = get_property(dp, "width", &len);
if (pp && len == sizeof(u32))
width = *pp;
- pp = (u32 *)get_property(dp, "linux,bootx-height", &len);
+ pp = get_property(dp, "linux,bootx-height", &len);
if (pp == NULL)
- pp = (u32 *)get_property(dp, "height", &len);
+ pp = get_property(dp, "height", &len);
if (pp && len == sizeof(u32))
height = *pp;
- pp = (u32 *)get_property(dp, "linux,bootx-linebytes", &len);
+ pp = get_property(dp, "linux,bootx-linebytes", &len);
if (pp == NULL)
- pp = (u32 *)get_property(dp, "linebytes", &len);
+ pp = get_property(dp, "linebytes", &len);
if (pp && len == sizeof(u32))
pitch = *pp;
else
@@ -450,9 +450,9 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
* ranges and pick one that is both big enough and if possible encloses
* the "address" property. If none match, we pick the biggest
*/
- up = (u32 *)get_property(dp, "linux,bootx-addr", &len);
+ up = get_property(dp, "linux,bootx-addr", &len);
if (up == NULL)
- up = (u32 *)get_property(dp, "address", &len);
+ up = get_property(dp, "address", &len);
if (up && len == sizeof(u32))
addr_prop = *up;
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 76fc9d355eb..67d1e1c8813 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -355,10 +355,14 @@ static struct backlight_properties riva_bl_data = {
static void riva_bl_set_power(struct fb_info *info, int power)
{
mutex_lock(&info->bl_mutex);
- up(&info->bl_dev->sem);
- info->bl_dev->props->power = power;
- __riva_bl_update_status(info->bl_dev);
- down(&info->bl_dev->sem);
+
+ if (info->bl_dev) {
+ down(&info->bl_dev->sem);
+ info->bl_dev->props->power = power;
+ __riva_bl_update_status(info->bl_dev);
+ up(&info->bl_dev->sem);
+ }
+
mutex_unlock(&info->bl_mutex);
}
@@ -382,7 +386,7 @@ static void riva_bl_init(struct riva_par *par)
bd = backlight_device_register(name, par, &riva_bl_data);
if (IS_ERR(bd)) {
info->bl_dev = NULL;
- printk("riva: Backlight registration failed\n");
+ printk(KERN_WARNING "riva: Backlight registration failed\n");
goto error;
}
@@ -393,11 +397,11 @@ static void riva_bl_init(struct riva_par *par)
0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
mutex_unlock(&info->bl_mutex);
- up(&bd->sem);
+ down(&bd->sem);
bd->props->brightness = riva_bl_data.max_brightness;
bd->props->power = FB_BLANK_UNBLANK;
bd->props->update_status(bd);
- down(&bd->sem);
+ up(&bd->sem);
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
@@ -1831,14 +1835,13 @@ static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
NVTRACE_ENTER();
dp = pci_device_to_OF_node(pd);
for (; dp != NULL; dp = dp->child) {
- disptype = (unsigned char *)get_property(dp, "display-type", NULL);
+ disptype = get_property(dp, "display-type", NULL);
if (disptype == NULL)
continue;
if (strncmp(disptype, "LCD", 3) != 0)
continue;
for (i = 0; propnames[i] != NULL; ++i) {
- pedid = (unsigned char *)
- get_property(dp, propnames[i], NULL);
+ pedid = get_property(dp, propnames[i], NULL);
if (pedid != NULL) {
par->EDID = pedid;
NVTRACE("LCD found.\n");