summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-08-29 19:06:29 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-08-29 19:06:29 -0400
commit502c7f1dd566e7ca8aabdb755182664c5fdaebf1 (patch)
tree31934a4da45c0cb83acd118a181db4c51967ac8a
parent2cba582a49f1535c1a12a687cfb3dab713c22cc4 (diff)
parent8f3d17fb7bcb7c255197d11469fb5e9695c9d2f4 (diff)
Merge /spare/repo/linux-2.6/
-rw-r--r--Documentation/networking/phy.txt288
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/signal.c11
-rw-r--r--arch/arm/Kconfig39
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/gic.c166
-rw-r--r--arch/arm/kernel/signal.c5
-rw-r--r--arch/arm/mach-sa1100/assabet.c7
-rw-r--r--arch/arm/mach-sa1100/cerf.c7
-rw-r--r--arch/arm/mach-sa1100/generic.c5
-rw-r--r--arch/arm/mach-sa1100/generic.h3
-rw-r--r--arch/arm/mach-sa1100/lart.c12
-rw-r--r--arch/arm/mach-sa1100/shannon.c7
-rw-r--r--arch/arm/mach-sa1100/simpad.c7
-rw-r--r--arch/arm26/kernel/signal.c13
-rw-r--r--arch/cris/arch-v10/kernel/signal.c11
-rw-r--r--arch/cris/arch-v32/kernel/signal.c11
-rw-r--r--arch/frv/kernel/signal.c11
-rw-r--r--arch/h8300/kernel/signal.c11
-rw-r--r--arch/i386/kernel/signal.c5
-rw-r--r--arch/ia64/kernel/signal.c15
-rw-r--r--arch/m32r/kernel/signal.c11
-rw-r--r--arch/m68knommu/kernel/signal.c11
-rw-r--r--arch/mips/kernel/irixsig.c11
-rw-r--r--arch/mips/kernel/signal.c11
-rw-r--r--arch/mips/kernel/signal32.c11
-rw-r--r--arch/parisc/kernel/signal.c11
-rw-r--r--arch/ppc/kernel/signal.c11
-rw-r--r--arch/ppc64/kernel/signal.c5
-rw-r--r--arch/ppc64/kernel/signal32.c5
-rw-r--r--arch/s390/kernel/compat_signal.c11
-rw-r--r--arch/s390/kernel/signal.c11
-rw-r--r--arch/sh/kernel/signal.c11
-rw-r--r--arch/sh64/kernel/signal.c11
-rw-r--r--arch/sparc/kernel/setup.c1
-rw-r--r--arch/sparc/kernel/signal.c11
-rw-r--r--arch/sparc/kernel/tick14.c1
-rw-r--r--arch/sparc/kernel/time.c1
-rw-r--r--arch/sparc/mm/fault.c1
-rw-r--r--arch/sparc/mm/init.c1
-rw-r--r--arch/sparc64/kernel/entry.S293
-rw-r--r--arch/sparc64/kernel/pci_iommu.c2
-rw-r--r--arch/sparc64/kernel/process.c2
-rw-r--r--arch/sparc64/kernel/sbus.c2
-rw-r--r--arch/sparc64/kernel/setup.c1
-rw-r--r--arch/sparc64/kernel/signal.c11
-rw-r--r--arch/sparc64/kernel/signal32.c33
-rw-r--r--arch/sparc64/kernel/smp.c30
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c32
-rw-r--r--arch/sparc64/kernel/traps.c269
-rw-r--r--arch/sparc64/kernel/ttable.S27
-rw-r--r--arch/sparc64/kernel/unaligned.c18
-rw-r--r--arch/sparc64/kernel/winfixup.S6
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/debuglocks.c56
-rw-r--r--arch/sparc64/lib/mb.S73
-rw-r--r--arch/sparc64/solaris/misc.c6
-rw-r--r--arch/um/kernel/signal_kern.c6
-rw-r--r--arch/v850/kernel/signal.c11
-rw-r--r--arch/x86_64/kernel/signal.c5
-rw-r--r--arch/xtensa/kernel/signal.c11
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/agent.c13
-rw-r--r--drivers/infiniband/core/agent_priv.h10
-rw-r--r--drivers/infiniband/core/cache.c6
-rw-r--r--drivers/infiniband/core/cm.c125
-rw-r--r--drivers/infiniband/core/cm_msgs.h194
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/fmr_pool.c8
-rw-r--r--drivers/infiniband/core/mad.c15
-rw-r--r--drivers/infiniband/core/mad_priv.h10
-rw-r--r--drivers/infiniband/core/mad_rmpp.c311
-rw-r--r--drivers/infiniband/core/packer.c3
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/smi.c13
-rw-r--r--drivers/infiniband/core/sysfs.c40
-rw-r--r--drivers/infiniband/core/ucm.c464
-rw-r--r--drivers/infiniband/core/ucm.h13
-rw-r--r--drivers/infiniband/core/ud_header.c11
-rw-r--r--drivers/infiniband/core/user_mad.c10
-rw-r--r--drivers/infiniband/core/uverbs.h11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c182
-rw-r--r--drivers/infiniband/core/uverbs_main.c22
-rw-r--r--drivers/infiniband/core/uverbs_mem.c1
-rw-r--r--drivers/infiniband/core/verbs.c65
-rw-r--r--drivers/infiniband/hw/mthca/Makefile4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c116
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c106
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h20
-rw-r--r--drivers/infiniband/hw/mthca/mthca_config_reg.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c256
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h52
-rw-r--r--drivers/infiniband/hw/mthca/mthca_doorbell.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c63
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c179
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c36
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_pd.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c115
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h54
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c362
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c591
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_wqe.h114
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/media/dvb/frontends/tda80xx.c1
-rw-r--r--drivers/mfd/Kconfig16
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/mcp-core.c255
-rw-r--r--drivers/mfd/mcp-sa11x0.c275
-rw-r--r--drivers/mfd/mcp.h66
-rw-r--r--drivers/mmc/mmc.c29
-rw-r--r--drivers/mmc/mmc.h5
-rw-r--r--drivers/mmc/mmc_sysfs.c90
-rw-r--r--drivers/mmc/mmci.c4
-rw-r--r--drivers/mmc/wbsd.c2
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c12
-rw-r--r--drivers/net/bonding/bond_alb.c17
-rw-r--r--drivers/net/bonding/bond_main.c58
-rw-r--r--drivers/net/bonding/bonding.h3
-rw-r--r--drivers/net/e1000/e1000_main.c4
-rw-r--r--drivers/net/eepro100.c8
-rw-r--r--drivers/net/forcedeth.c582
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hamradio/baycom_par.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c3
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c3
-rw-r--r--drivers/net/hamradio/mkiss.c1086
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c170
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c59
-rw-r--r--drivers/net/ixgb/ixgb_hw.h9
-rw-r--r--drivers/net/ixgb/ixgb_main.c53
-rw-r--r--drivers/net/jazzsonic.c186
-rw-r--r--drivers/net/loopback.c22
-rw-r--r--drivers/net/macsonic.c538
-rw-r--r--drivers/net/mv643xx_eth.c29
-rw-r--r--drivers/net/mv643xx_eth.h4
-rw-r--r--drivers/net/pci-skeleton.c6
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c25
-rw-r--r--drivers/net/phy/Kconfig57
-rw-r--r--drivers/net/phy/Makefile10
-rw-r--r--drivers/net/phy/cicada.c134
-rw-r--r--drivers/net/phy/davicom.c195
-rw-r--r--drivers/net/phy/lxt.c179
-rw-r--r--drivers/net/phy/marvell.c140
-rw-r--r--drivers/net/phy/mdio_bus.c176
-rw-r--r--drivers/net/phy/phy.c871
-rw-r--r--drivers/net/phy/phy_device.c696
-rw-r--r--drivers/net/phy/qsemi.c143
-rw-r--r--drivers/net/r8169.c1
-rw-r--r--drivers/net/s2io-regs.h87
-rw-r--r--drivers/net/s2io.c3085
-rw-r--r--drivers/net/s2io.h364
-rw-r--r--drivers/net/skge.c65
-rw-r--r--drivers/net/skge.h19
-rw-r--r--drivers/net/smc-ultra.c1
-rw-r--r--drivers/net/sonic.c676
-rw-r--r--drivers/net/sonic.h460
-rw-r--r--drivers/net/tokenring/Kconfig4
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/madgemc.c521
-rw-r--r--drivers/net/tokenring/proteon.c104
-rw-r--r--drivers/net/tokenring/skisa.c104
-rw-r--r--drivers/net/tokenring/tms380tr.c46
-rw-r--r--drivers/net/tokenring/tms380tr.h9
-rw-r--r--drivers/net/tokenring/tmspci.c4
-rw-r--r--drivers/net/wan/cycx_drv.c24
-rw-r--r--drivers/net/wireless/orinoco.c78
-rw-r--r--drivers/parport/parport_serial.c339
-rw-r--r--drivers/scsi/ahci.c52
-rw-r--r--drivers/scsi/ata_piix.c72
-rw-r--r--drivers/scsi/libata-core.c299
-rw-r--r--drivers/scsi/libata-scsi.c119
-rw-r--r--drivers/scsi/libata.h47
-rw-r--r--drivers/scsi/sata_nv.c62
-rw-r--r--drivers/scsi/sata_promise.c45
-rw-r--r--drivers/scsi/sata_promise.h31
-rw-r--r--drivers/scsi/sata_qstor.c43
-rw-r--r--drivers/scsi/sata_sil.c69
-rw-r--r--drivers/scsi/sata_sis.c35
-rw-r--r--drivers/scsi/sata_svw.c43
-rw-r--r--drivers/scsi/sata_sx4.c179
-rw-r--r--drivers/scsi/sata_uli.c35
-rw-r--r--drivers/scsi/sata_via.c64
-rw-r--r--drivers/scsi/sata_vsc.c31
-rw-r--r--drivers/serial/8250_pci.c474
-rw-r--r--fs/adfs/adfs.h4
-rw-r--r--include/asm-arm/arch-sa1100/mcp.h21
-rw-r--r--include/asm-arm/hardware/gic.h41
-rw-r--r--include/asm-sparc/processor.h1
-rw-r--r--include/asm-sparc/segment.h6
-rw-r--r--include/asm-sparc/system.h1
-rw-r--r--include/asm-sparc64/atomic.h8
-rw-r--r--include/asm-sparc64/bitops.h4
-rw-r--r--include/asm-sparc64/processor.h1
-rw-r--r--include/asm-sparc64/segment.h6
-rw-r--r--include/asm-sparc64/sfafsr.h82
-rw-r--r--include/asm-sparc64/spinlock.h42
-rw-r--r--include/asm-sparc64/system.h17
-rw-r--r--include/linux/8250_pci.h39
-rw-r--r--include/linux/ata.h45
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/libata.h49
-rw-r--r--include/linux/mii.h9
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/linux/mod_devicetable.h10
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/phy.h377
-rw-r--r--include/linux/serialP.h40
-rw-r--r--include/rdma/ib_cache.h (renamed from drivers/infiniband/include/ib_cache.h)4
-rw-r--r--include/rdma/ib_cm.h (renamed from drivers/infiniband/include/ib_cm.h)93
-rw-r--r--include/rdma/ib_fmr_pool.h (renamed from drivers/infiniband/include/ib_fmr_pool.h)2
-rw-r--r--include/rdma/ib_mad.h (renamed from drivers/infiniband/include/ib_mad.h)26
-rw-r--r--include/rdma/ib_pack.h (renamed from drivers/infiniband/include/ib_pack.h)2
-rw-r--r--include/rdma/ib_sa.h (renamed from drivers/infiniband/include/ib_sa.h)22
-rw-r--r--include/rdma/ib_smi.h (renamed from drivers/infiniband/include/ib_smi.h)20
-rw-r--r--include/rdma/ib_user_cm.h (renamed from drivers/infiniband/include/ib_user_cm.h)28
-rw-r--r--include/rdma/ib_user_mad.h (renamed from drivers/infiniband/include/ib_user_mad.h)10
-rw-r--r--include/rdma/ib_user_verbs.h (renamed from drivers/infiniband/include/ib_user_verbs.h)39
-rw-r--r--include/rdma/ib_verbs.h (renamed from drivers/infiniband/include/ib_verbs.h)118
241 files changed, 13874 insertions, 6714 deletions
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
new file mode 100644
index 00000000000..29ccae40903
--- /dev/null
+++ b/Documentation/networking/phy.txt
@@ -0,0 +1,288 @@
+
+-------
+PHY Abstraction Layer
+(Updated 2005-07-21)
+
+Purpose
+
+ Most network devices consist of set of registers which provide an interface
+ to a MAC layer, which communicates with the physical connection through a
+ PHY. The PHY concerns itself with negotiating link parameters with the link
+ partner on the other side of the network connection (typically, an ethernet
+ cable), and provides a register interface to allow drivers to determine what
+ settings were chosen, and to configure what settings are allowed.
+
+ While these devices are distinct from the network devices, and conform to a
+ standard layout for the registers, it has been common practice to integrate
+ the PHY management code with the network driver. This has resulted in large
+ amounts of redundant code. Also, on embedded systems with multiple (and
+ sometimes quite different) ethernet controllers connected to the same
+ management bus, it is difficult to ensure safe use of the bus.
+
+ Since the PHYs are devices, and the management busses through which they are
+ accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
+ In doing so, it has these goals:
+
+ 1) Increase code-reuse
+ 2) Increase overall code-maintainability
+ 3) Speed development time for new network drivers, and for new systems
+
+ Basically, this layer is meant to provide an interface to PHY devices which
+ allows network driver writers to write as little code as possible, while
+ still providing a full feature set.
+
+The MDIO bus
+
+ Most network devices are connected to a PHY by means of a management bus.
+ Different devices use different busses (though some share common interfaces).
+ In order to take advantage of the PAL, each bus interface needs to be
+ registered as a distinct device.
+
+ 1) read and write functions must be implemented. Their prototypes are:
+
+ int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
+ int read(struct mii_bus *bus, int mii_id, int regnum);
+
+ mii_id is the address on the bus for the PHY, and regnum is the register
+ number. These functions are guaranteed not to be called from interrupt
+ time, so it is safe for them to block, waiting for an interrupt to signal
+ the operation is complete
+
+ 2) A reset function is necessary. This is used to return the bus to an
+ initialized state.
+
+ 3) A probe function is needed. This function should set up anything the bus
+ driver needs, setup the mii_bus structure, and register with the PAL using
+ mdiobus_register. Similarly, there's a remove function to undo all of
+ that (use mdiobus_unregister).
+
+ 4) Like any driver, the device_driver structure must be configured, and init
+ exit functions are used to register the driver.
+
+ 5) The bus must also be declared somewhere as a device, and registered.
+
+ As an example for how one driver implemented an mdio bus driver, see
+ drivers/net/gianfar_mii.c and arch/ppc/syslib/mpc85xx_devices.c
+
+Connecting to a PHY
+
+ Sometime during startup, the network driver needs to establish a connection
+ between the PHY device, and the network device. At this time, the PHY's bus
+ and drivers need to all have been loaded, so it is ready for the connection.
+ At this point, there are several ways to connect to the PHY:
+
+ 1) The PAL handles everything, and only calls the network driver when
+ the link state changes, so it can react.
+
+ 2) The PAL handles everything except interrupts (usually because the
+ controller has the interrupt registers).
+
+ 3) The PAL handles everything, but checks in with the driver every second,
+ allowing the network driver to react first to any changes before the PAL
+ does.
+
+ 4) The PAL serves only as a library of functions, with the network device
+ manually calling functions to update status, and configure the PHY
+
+
+Letting the PHY Abstraction Layer do Everything
+
+ If you choose option 1 (The hope is that every driver can, but to still be
+ useful to drivers that can't), connecting to the PHY is simple:
+
+ First, you need a function to react to changes in the link state. This
+ function follows this protocol:
+
+ static void adjust_link(struct net_device *dev);
+
+ Next, you need to know the device name of the PHY connected to this device.
+ The name will look something like, "phy0:0", where the first number is the
+ bus id, and the second is the PHY's address on that bus.
+
+ Now, to connect, just call this function:
+
+ phydev = phy_connect(dev, phy_name, &adjust_link, flags);
+
+ phydev is a pointer to the phy_device structure which represents the PHY. If
+ phy_connect is successful, it will return the pointer. dev, here, is the
+ pointer to your net_device. Once done, this function will have started the
+ PHY's software state machine, and registered for the PHY's interrupt, if it
+ has one. The phydev structure will be populated with information about the
+ current state, though the PHY will not yet be truly operational at this
+ point.
+
+ flags is a u32 which can optionally contain phy-specific flags.
+ This is useful if the system has put hardware restrictions on
+ the PHY/controller, of which the PHY needs to be aware.
+
+ Now just make sure that phydev->supported and phydev->advertising have any
+ values pruned from them which don't make sense for your controller (a 10/100
+ controller may be connected to a gigabit capable PHY, so you would need to
+ mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions
+ for these bitfields. Note that you should not SET any bits, or the PHY may
+ get put into an unsupported state.
+
+ Lastly, once the controller is ready to handle network traffic, you call
+ phy_start(phydev). This tells the PAL that you are ready, and configures the
+ PHY to connect to the network. If you want to handle your own interrupts,
+ just set phydev->irq to PHY_IGNORE_INTERRUPT before you call phy_start.
+ Similarly, if you don't want to use interrupts, set phydev->irq to PHY_POLL.
+
+ When you want to disconnect from the network (even if just briefly), you call
+ phy_stop(phydev).
+
+Keeping Close Tabs on the PAL
+
+ It is possible that the PAL's built-in state machine needs a little help to
+ keep your network device and the PHY properly in sync. If so, you can
+ register a helper function when connecting to the PHY, which will be called
+ every second before the state machine reacts to any changes. To do this, you
+ need to manually call phy_attach() and phy_prepare_link(), and then call
+ phy_start_machine() with the second argument set to point to your special
+ handler.
+
+ Currently there are no examples of how to use this functionality, and testing
+ on it has been limited because the author does not have any drivers which use
+ it (they all use option 1). So Caveat Emptor.
+
+Doing it all yourself
+
+ There's a remote chance that the PAL's built-in state machine cannot track
+ the complex interactions between the PHY and your network device. If this is
+ so, you can simply call phy_attach(), and not call phy_start_machine or
+ phy_prepare_link(). This will mean that phydev->state is entirely yours to
+ handle (phy_start and phy_stop toggle between some of the states, so you
+ might need to avoid them).
+
+ An effort has been made to make sure that useful functionality can be
+ accessed without the state-machine running, and most of these functions are
+ descended from functions which did not interact with a complex state-machine.
+ However, again, no effort has been made so far to test running without the
+ state machine, so tryer beware.
+
+ Here is a brief rundown of the functions:
+
+ int phy_read(struct phy_device *phydev, u16 regnum);
+ int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
+
+ Simple read/write primitives. They invoke the bus's read/write function
+ pointers.
+
+ void phy_print_status(struct phy_device *phydev);
+
+ A convenience function to print out the PHY status neatly.
+
+ int phy_clear_interrupt(struct phy_device *phydev);
+ int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
+
+ Clear the PHY's interrupt, and configure which ones are allowed,
+ respectively. Currently only supports all on, or all off.
+
+ int phy_enable_interrupts(struct phy_device *phydev);
+ int phy_disable_interrupts(struct phy_device *phydev);
+
+ Functions which enable/disable PHY interrupts, clearing them
+ before and after, respectively.
+
+ int phy_start_interrupts(struct phy_device *phydev);
+ int phy_stop_interrupts(struct phy_device *phydev);
+
+ Requests the IRQ for the PHY interrupts, then enables them for
+ start, or disables then frees them for stop.
+
+ struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
+ u32 flags);
+
+ Attaches a network device to a particular PHY, binding the PHY to a generic
+ driver if none was found during bus initialization. Passes in
+ any phy-specific flags as needed.
+
+ int phy_start_aneg(struct phy_device *phydev);
+
+ Using variables inside the phydev structure, either configures advertising
+ and resets autonegotiation, or disables autonegotiation, and configures
+ forced settings.
+
+ static inline int phy_read_status(struct phy_device *phydev);
+
+ Fills the phydev structure with up-to-date information about the current
+ settings in the PHY.
+
+ void phy_sanitize_settings(struct phy_device *phydev)
+
+ Resolves differences between currently desired settings, and
+ supported settings for the given PHY device. Does not make
+ the changes in the hardware, though.
+
+ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+
+ Ethtool convenience functions.
+
+ int phy_mii_ioctl(struct phy_device *phydev,
+ struct mii_ioctl_data *mii_data, int cmd);
+
+ The MII ioctl. Note that this function will completely screw up the state
+ machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to
+ use this only to write registers which are not standard, and don't set off
+ a renegotiation.
+
+
+PHY Device Drivers
+
+ With the PHY Abstraction Layer, adding support for new PHYs is
+ quite easy. In some cases, no work is required at all! However,
+ many PHYs require a little hand-holding to get up-and-running.
+
+Generic PHY driver
+
+ If the desired PHY doesn't have any errata, quirks, or special
+ features you want to support, then it may be best to not add
+ support, and let the PHY Abstraction Layer's Generic PHY Driver
+ do all of the work.
+
+Writing a PHY driver
+
+ If you do need to write a PHY driver, the first thing to do is
+ make sure it can be matched with an appropriate PHY device.
+ This is done during bus initialization by reading the device's
+ UID (stored in registers 2 and 3), then comparing it to each
+ driver's phy_id field by ANDing it with each driver's
+ phy_id_mask field. Also, it needs a name. Here's an example:
+
+ static struct phy_driver dm9161_driver = {
+ .phy_id = 0x0181b880,
+ .name = "Davicom DM9161E",
+ .phy_id_mask = 0x0ffffff0,
+ ...
+ }
+
+ Next, you need to specify what features (speed, duplex, autoneg,
+ etc) your PHY device and driver support. Most PHYs support
+ PHY_BASIC_FEATURES, but you can look in include/mii.h for other
+ features.
+
+ Each driver consists of a number of function pointers:
+
+ config_init: configures PHY into a sane state after a reset.
+ For instance, a Davicom PHY requires descrambling disabled.
+ probe: Does any setup needed by the driver
+ suspend/resume: power management
+ config_aneg: Changes the speed/duplex/negotiation settings
+ read_status: Reads the current speed/duplex/negotiation settings
+ ack_interrupt: Clear a pending interrupt
+ config_intr: Enable or disable interrupts
+ remove: Does any driver take-down
+
+ Of these, only config_aneg and read_status are required to be
+ assigned by the driver code. The rest are optional. Also, it is
+ preferred to use the generic phy driver's versions of these two
+ functions if at all possible: genphy_read_status and
+ genphy_config_aneg. If this is not possible, it is likely that
+ you only need to perform some actions before and after invoking
+ these functions, and so your functions will wrap the generic
+ ones.
+
+ Feel free to look at the Marvell, Cicada, and Davicom drivers in
+ drivers/net/phy/ for examples (the lxt and qsemi drivers have
+ not been tested as of this writing)
diff --git a/Makefile b/Makefile
index 5acd1fc68d1..3d84df581cf 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@ VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 13
EXTRAVERSION =
-NAME=Woozy Numbat
+NAME=Affluent Albatross
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 08fe8071a7f..2e45e8604e3 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -566,13 +566,12 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
if (ka->sa.sa_flags & SA_RESETHAND)
ka->sa.sa_handler = SIG_DFL;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
static inline void
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c65c6eb9810..4bf0e8737e1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -635,10 +635,6 @@ config PM
and the Battery Powered Linux mini-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
- Note that, even if you say N here, Linux on the x86 architecture
- will issue the hlt instruction if nothing is to be done, thereby
- sending the processor to sleep and saving power.
-
config APM
tristate "Advanced Power Management Emulation"
depends on PM
@@ -650,12 +646,6 @@ config APM
battery status information, and user-space programs will receive
notification of APM "events" (e.g. battery status change).
- If you select "Y" here, you can disable actual use of the APM
- BIOS by passing the "apm=off" option to the kernel at boot time.
-
- Note that the APM support is almost completely disabled for
- machines with more than one CPU.
-
In order to use APM, you will need supporting software. For location
and more information, read <file:Documentation/pm.txt> and the
Battery Powered Linux mini-HOWTO, available from
@@ -665,39 +655,12 @@ config APM
manpage ("man 8 hdparm") for that), and it doesn't turn off
VESA-compliant "green" monitors.
- This driver does not support the TI 4000M TravelMate and the ACER
- 486/DX4/75 because they don't have compliant BIOSes. Many "green"
- desktop machines also don't have compliant BIOSes, and this driver
- may cause those machines to panic during the boot phase.
-
Generally, if you don't have a battery in your machine, there isn't
much point in using this driver and you should say N. If you get
random kernel OOPSes or reboots that don't seem to be related to
anything, try disabling/enabling this option (or disabling/enabling
APM in your BIOS).
- Some other things you should try when experiencing seemingly random,
- "weird" problems:
-
- 1) make sure that you have enough swap space and that it is
- enabled.
- 2) pass the "no-hlt" option to the kernel
- 3) switch on floating point emulation in the kernel and pass
- the "no387" option to the kernel
- 4) pass the "floppy=nodma" option to the kernel
- 5) pass the "mem=4M" option to the kernel (thereby disabling
- all but the first 4 MB of RAM)
- 6) make sure that the CPU is not over clocked.
- 7) read the sig11 FAQ at <http://www.bitwizard.nl/sig11/>
- 8) disable the cache from your BIOS settings
- 9) install a fan for the video card or exchange video RAM
- 10) install a better fan for the CPU
- 11) exchange RAM chips
- 12) exchange the motherboard.
-
- To compile this driver as a module, choose M here: the
- module will be called apm.
-
endmenu
source "net/Kconfig"
@@ -752,6 +715,8 @@ source "drivers/hwmon/Kconfig"
source "drivers/misc/Kconfig"
+source "drivers/mfd/Kconfig"
+
source "drivers/media/Kconfig"
source "drivers/video/Kconfig"
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 692af6b5e8f..666ba393575 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -1,6 +1,9 @@
config ICST525
bool
+config ARM_GIC
+ bool
+
config ICST307
bool
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 11f20a43ee3..a87886564b1 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -4,6 +4,7 @@
obj-y += rtctime.o
obj-$(CONFIG_ARM_AMBA) += amba.o
+obj-$(CONFIG_ARM_GIC) += gic.o
obj-$(CONFIG_ICST525) += icst525.o
obj-$(CONFIG_ICST307) += icst307.o
obj-$(CONFIG_SA1111) += sa1111.o
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
new file mode 100644
index 00000000000..51dbf5489b6
--- /dev/null
+++ b/arch/arm/common/gic.c
@@ -0,0 +1,166 @@
+/*
+ * linux/arch/arm/common/gic.c
+ *
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Interrupt architecture for the GIC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ * from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ * by the Distributor, and interrupts generated locally, to the
+ * associated CPU.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/mach/irq.h>
+#include <asm/hardware/gic.h>
+
+static void __iomem *gic_dist_base;
+static void __iomem *gic_cpu_base;
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ *
+ * Linux assumes that when we're done with an interrupt we need to
+ * unmask it, in the same way we need to unmask an interrupt when
+ * we first enable it.
+ *
+ * The GIC has a seperate notion of "end of interrupt" to re-enable
+ * an interrupt after handling, in order to support hardware
+ * prioritisation.
+ *
+ * We can make the GIC behave in the way that Linux expects by making
+ * our "acknowledge" routine disable the interrupt, then mark it as
+ * complete.
+ */
+static void gic_ack_irq(unsigned int irq)
+{
+ u32 mask = 1 << (irq % 32);
+ writel(mask, gic_dist_base + GIC_DIST_ENABLE_CLEAR + (irq / 32) * 4);
+ writel(irq, gic_cpu_base + GIC_CPU_EOI);
+}
+
+static void gic_mask_irq(unsigned int irq)
+{
+ u32 mask = 1 << (irq % 32);
+ writel(mask, gic_dist_base + GIC_DIST_ENABLE_CLEAR + (irq / 32) * 4);
+}
+
+static void gic_unmask_irq(unsigned int irq)
+{
+ u32 mask = 1 << (irq % 32);
+ writel(mask, gic_dist_base + GIC_DIST_ENABLE_SET + (irq / 32) * 4);
+}
+
+static void gic_set_cpu(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
+{
+ void __iomem *reg = gic_dist_base + GIC_DIST_TARGET + (irq & ~3);
+ unsigned int shift = (irq % 4) * 8;
+ u32 val;
+
+ val = readl(reg) & ~(0xff << shift);
+ val |= 1 << (cpu + shift);
+ writel(val, reg);
+}
+
+static struct irqchip gic_chip = {
+ .ack = gic_ack_irq,
+ .mask = gic_mask_irq,
+ .unmask = gic_unmask_irq,
+#ifdef CONFIG_SMP
+ .set_cpu = gic_set_cpu,
+#endif
+};
+
+void __init gic_dist_init(void __iomem *base)
+{
+ unsigned int max_irq, i;
+ u32 cpumask = 1 << smp_processor_id();
+
+ cpumask |= cpumask << 8;
+ cpumask |= cpumask << 16;
+
+ gic_dist_base = base;
+
+ writel(0, base + GIC_DIST_CTRL);
+
+ /*
+ * Find out how many interrupts are supported.
+ */
+ max_irq = readl(base + GIC_DIST_CTR) & 0x1f;
+ max_irq = (max_irq + 1) * 32;
+
+ /*
+ * The GIC only supports up to 1020 interrupt sources.
+ * Limit this to either the architected maximum, or the
+ * platform maximum.
+ */
+ if (max_irq > max(1020, NR_IRQS))
+ max_irq = max(1020, NR_IRQS);
+
+ /*
+ * Set all global interrupts to be level triggered, active low.
+ */
+ for (i = 32; i < max_irq; i += 16)
+ writel(0, base + GIC_DIST_CONFIG + i * 4 / 16);
+
+ /*
+ * Set all global interrupts to this CPU only.
+ */
+ for (i = 32; i < max_irq; i += 4)
+ writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
+
+ /*
+ * Set priority on all interrupts.
+ */
+ for (i = 0; i < max_irq; i += 4)
+ writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+ /*
+ * Disable all interrupts.
+ */
+ for (i = 0; i < max_irq; i += 32)
+ writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+
+ /*
+ * Setup the Linux IRQ subsystem.
+ */
+ for (i = 29; i < max_irq; i++) {
+ set_irq_chip(i, &gic_chip);
+ set_irq_handler(i, do_level_IRQ);
+ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+ }
+
+ writel(1, base + GIC_DIST_CTRL);
+}
+
+void __cpuinit gic_cpu_init(void __iomem *base)
+{
+ gic_cpu_base = base;
+ writel(0xf0, base + GIC_CPU_PRIMASK);
+ writel(1, base + GIC_CPU_CTRL);
+}
+
+#ifdef CONFIG_SMP
+void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
+{
+ unsigned long map = *cpus_addr(cpumask);
+
+ writel(map << 16 | irq, gic_dist_base + GIC_DIST_SOFTINT);
+}
+#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 5e435e42dac..a94d75fef59 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -658,11 +658,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
/*
* Block the signal if we were unsuccessful.
*/
- if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) {
+ if (ret != 0) {
spin_lock_irq(&tsk->sighand->siglock);
sigorsets(&tsk->blocked, &tsk->blocked,
&ka->sa.sa_mask);
- sigaddset(&tsk->blocked, sig);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&tsk->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&tsk->sighand->siglock);
}
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 4d4d303ee3a..24687f511bf 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -35,6 +35,7 @@
#include <asm/mach/map.h>
#include <asm/mach/serial_sa1100.h>
#include <asm/arch/assabet.h>
+#include <asm/arch/mcp.h>
#include "generic.h"
@@ -198,6 +199,11 @@ static struct irda_platform_data assabet_irda_data = {
.set_speed = assabet_irda_set_speed,
};
+static struct mcp_plat_data assabet_mcp_data = {
+ .mccr0 = MCCR0_ADM,
+ .sclk_rate = 11981000,
+};
+
static void __init assabet_init(void)
{
/*
@@ -246,6 +252,7 @@ static void __init assabet_init(void)
sa11x0_set_flash_data(&assabet_flash_data, assabet_flash_resources,
ARRAY_SIZE(assabet_flash_resources));
sa11x0_set_irda_data(&assabet_irda_data);
+ sa11x0_set_mcp_data(&assabet_mcp_data);
}
/*
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c
index 0aa918e24c3..9484be7dc67 100644
--- a/arch/arm/mach-sa1100/cerf.c
+++ b/arch/arm/mach-sa1100/cerf.c
@@ -29,6 +29,7 @@
#include <asm/mach/serial_sa1100.h>
#include <asm/arch/cerf.h>
+#include <asm/arch/mcp.h>
#include "generic.h"
static struct resource cerfuart2_resources[] = {
@@ -116,10 +117,16 @@ static void __init cerf_map_io(void)
GPDR |= CERF_GPIO_CF_RESET;
}
+static struct mcp_plat_data cerf_mcp_data = {
+ .mccr0 = MCCR0_ADM,
+ .sclk_rate = 11981000,
+};
+
static void __init cerf_init(void)
{
platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices));
sa11x0_set_flash_data(&cerf_flash_data, &cerf_flash_resource, 1);
+ sa11x0_set_mcp_data(&cerf_mcp_data);
}
MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube")
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 95ae217be1b..3f1e358455e 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -221,6 +221,11 @@ static struct platform_device sa11x0mcp_device = {
.resource = sa11x0mcp_resources,
};
+void sa11x0_set_mcp_data(struct mcp_plat_data *data)
+{
+ sa11x0mcp_device.dev.platform_data = data;
+}
+
static struct resource sa11x0ssp_resources[] = {
[0] = {
.start = 0x80070000,
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index bfe41da9923..279e3afa3c3 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -34,5 +34,8 @@ struct resource;
extern void sa11x0_set_flash_data(struct flash_platform_data *flash,
struct resource *res, int nr);
+struct sa11x0_ssp_plat_ops;
+extern void sa11x0_set_ssp_data(struct sa11x0_ssp_plat_ops *ops);
+
struct irda_platform_data;
void sa11x0_set_irda_data(struct irda_platform_data *irda);
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c
index 870b488aeda..ed6744d480a 100644
--- a/arch/arm/mach-sa1100/lart.c
+++ b/arch/arm/mach-sa1100/lart.c
@@ -13,12 +13,23 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/serial_sa1100.h>
+#include <asm/arch/mcp.h>
#include "generic.h"
#warning "include/asm/arch-sa1100/ide.h needs fixing for lart"
+static struct mcp_plat_data lart_mcp_data = {
+ .mccr0 = MCCR0_ADM,
+ .sclk_rate = 11981000,
+};
+
+static void __init lart_init(void)
+{
+ sa11x0_set_mcp_data(&lart_mcp_data);
+}
+
static struct map_desc lart_io_desc[] __initdata = {
/* virtual physical length type */
{ 0xe8000000, 0x00000000, 0x00400000, MT_DEVICE }, /* main flash memory */
@@ -47,5 +58,6 @@ MACHINE_START(LART, "LART")
.boot_params = 0xc0000100,
.map_io = lart_map_io,
.init_irq = sa1100_init_irq,
+ .init_machine = lart_init,
.timer = &sa1100_timer,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index 43a00359fcd..7482288278d 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -18,6 +18,7 @@
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <asm/mach/serial_sa1100.h>
+#include <asm/arch/mcp.h>
#include <asm/arch/shannon.h>
#include "generic.h"
@@ -52,9 +53,15 @@ static struct resource shannon_flash_resource = {
.flags = IORESOURCE_MEM,
};
+static struct mcp_plat_data shannon_mcp_data = {
+ .mccr0 = MCCR0_ADM,
+ .sclk_rate = 11981000,
+};
+
static void __init shannon_init(void)
{
sa11x0_set_flash_data(&shannon_flash_data, &shannon_flash_resource, 1);
+ sa11x0_set_mcp_data(&shannon_mcp_data);
}
static void __init shannon_map_io(void)
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index 77978586b12..07f6d5fd7bb 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -23,6 +23,7 @@
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <asm/mach/serial_sa1100.h>
+#include <asm/arch/mcp.h>
#include <asm/arch/simpad.h>
#include <linux/serial_core.h>
@@ -123,6 +124,11 @@ static struct resource simpad_flash_resources [] = {
}
};
+static struct mcp_plat_data simpad_mcp_data = {
+ .mccr0 = MCCR0_ADM,
+ .sclk_rate = 11981000,
+};
+
static void __init simpad_map_io(void)
@@ -157,6 +163,7 @@ static void __init simpad_map_io(void)
sa11x0_set_flash_data(&simpad_flash_data, simpad_flash_resources,
ARRAY_SIZE(simpad_flash_resources));
+ sa11x0_set_mcp_data(&simpad_mcp_data);
}
static void simpad_power_off(void)
diff --git a/arch/arm26/kernel/signal.c b/arch/arm26/kernel/signal.c
index 356d9809cc0..ce2055bdc9e 100644
--- a/arch/arm26/kernel/signal.c
+++ b/arch/arm26/kernel/signal.c
@@ -454,14 +454,13 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&tsk->sighand->siglock);
- sigorsets(&tsk->blocked, &tsk->blocked,
- &ka->sa.sa_mask);
+ spin_lock_irq(&tsk->sighand->siglock);
+ sigorsets(&tsk->blocked, &tsk->blocked,
+ &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&tsk->blocked, sig);
- recalc_sigpending();
- spin_unlock_irq(&tsk->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&tsk->sighand->siglock);
return;
}
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c
index 85e0032e664..693771961f8 100644
--- a/arch/cris/arch-v10/kernel/signal.c
+++ b/arch/cris/arch-v10/kernel/signal.c
@@ -517,13 +517,12 @@ handle_signal(int canrestart, unsigned long sig,
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index fb4c79d5b76..0a3614dab88 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -568,13 +568,12 @@ handle_signal(int canrestart, unsigned long sig,
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 36a2dffc8eb..d4ccc0728df 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -506,13 +506,12 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
else
setup_frame(sig, ka, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked, sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
} /* end handle_signal() */
/*****************************************************************************/
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index 5aab87eae1f..f13d5e82d4b 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -488,13 +488,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
else
setup_frame(sig, ka, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 89ef7adc63a..140e340569c 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -577,10 +577,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
else
ret = setup_frame(sig, ka, oldset, regs);
- if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
+ if (ret) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- sigaddset(&current->blocked,sig);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index b8a0a7d257a..774f34b675c 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -467,15 +467,12 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
if (!setup_frame(sig, ka, info, oldset, scr))
return 0;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- {
- sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
- sigaddset(&current->blocked, sig);
- recalc_sigpending();
- }
- spin_unlock_irq(&current->sighand->siglock);
- }
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
return 1;
}
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 5aef7e406ef..71763f7a1d1 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -341,13 +341,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up the stack frame */
setup_rt_frame(sig, ka, info, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c
index 30dceb59a46..43a2726c0d0 100644
--- a/arch/m68knommu/kernel/signal.c
+++ b/arch/m68knommu/kernel/signal.c
@@ -732,13 +732,12 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 40244782a8e..4c114ae2179 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -155,13 +155,12 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
else
setup_irix_frame(ka, regs, sig, oldset);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 65ee15396ff..0209c1dd142 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -425,13 +425,12 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
setup_frame(ka, regs, sig, oldset);
#endif
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index c1a69cf232f..f6875f023a2 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -751,13 +751,12 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
else
setup_frame(ka, regs, sig, oldset);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
int do_signal32(sigset_t *oldset, struct pt_regs *regs)
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 9421bb98ea6..55d71c15e1f 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -517,13 +517,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
return 0;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
return 1;
}
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
index 8aaeb6f4e75..2244bf91e59 100644
--- a/arch/ppc/kernel/signal.c
+++ b/arch/ppc/kernel/signal.c
@@ -759,13 +759,12 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
else
handle_signal(signr, &ka, &info, oldset, regs, newsp);
- if (!(ka.sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
+ if (!(ka.sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
return 1;
}
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
index bf782276984..49a79a55c32 100644
--- a/arch/ppc64/kernel/signal.c
+++ b/arch/ppc64/kernel/signal.c
@@ -481,10 +481,11 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
/* Set up Signal Frame */
ret = setup_rt_frame(sig, ka, info, oldset, regs);
- if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
+ if (ret) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
- sigaddset(&current->blocked,sig);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
diff --git a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c
index 3c2fa5c284c..46f4d6cc7fc 100644
--- a/arch/ppc64/kernel/signal32.c
+++ b/arch/ppc64/kernel/signal32.c
@@ -976,11 +976,12 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
else
ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp);
- if (ret && !(ka.sa.sa_flags & SA_NODEFER)) {
+ if (ret) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked,
&ka.sa.sa_mask);
- sigaddset(&current->blocked, signr);
+ if (!(ka.sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked, signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index d05d65ac969..7358cdb8441 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -637,12 +637,11 @@ handle_signal32(unsigned long sig, struct k_sigaction *ka,
else
setup_frame32(sig, ka, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 610c1d03e97..6a3f5b7473a 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -429,13 +429,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
else
setup_frame(sig, ka, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 8022243f017..b475c4d2405 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -546,13 +546,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
index c6a14a87c59..3ea8929e483 100644
--- a/arch/sh64/kernel/signal.c
+++ b/arch/sh64/kernel/signal.c
@@ -664,13 +664,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
else
setup_frame(sig, ka, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index 55352ed85e8..53c192a4982 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -32,7 +32,6 @@
#include <linux/spinlock.h>
#include <linux/root_dev.h>
-#include <asm/segment.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
index 011ff35057a..5f34d7dc2b8 100644
--- a/arch/sparc/kernel/signal.c
+++ b/arch/sparc/kernel/signal.c
@@ -1034,13 +1034,12 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
else
setup_frame(&ka->sa, regs, signr, oldset, info);
}
- if (!(ka->sa.sa_flags & SA_NOMASK)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
index fd8005a3e6b..591547af4c6 100644
--- a/arch/sparc/kernel/tick14.c
+++ b/arch/sparc/kernel/tick14.c
@@ -19,7 +19,6 @@
#include <linux/interrupt.h>
#include <asm/oplib.h>
-#include <asm/segment.h>
#include <asm/timer.h>
#include <asm/mostek.h>
#include <asm/system.h>
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 6486cbf2efe..3b759aefc17 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -32,7 +32,6 @@
#include <linux/profile.h>
#include <asm/oplib.h>
-#include <asm/segment.h>
#include <asm/timer.h>
#include <asm/mostek.h>
#include <asm/system.h>
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
index 37f4107bae6..2bbd53f3caf 100644
--- a/arch/sparc/mm/fault.c
+++ b/arch/sparc/mm/fault.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <asm/system.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/memreg.h>
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index ec2e05028a1..c03babaa049 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -25,7 +25,6 @@
#include <linux/bootmem.h>
#include <asm/system.h>
-#include <asm/segment.h>
#include <asm/vac-ops.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 88332f00094..cecdc0a7521 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -21,6 +21,7 @@
#include <asm/visasm.h>
#include <asm/estate.h>
#include <asm/auxio.h>
+#include <asm/sfafsr.h>
#define curptr g6
@@ -690,14 +691,159 @@ netbsd_syscall:
retl
nop
- /* These next few routines must be sure to clear the
- * SFSR FaultValid bit so that the fast tlb data protection
- * handler does not flush the wrong context and lock up the
- * box.
+ /* We need to carefully read the error status, ACK
+ * the errors, prevent recursive traps, and pass the
+ * information on to C code for logging.
+ *
+ * We pass the AFAR in as-is, and we encode the status
+ * information as described in asm-sparc64/sfafsr.h
+ */
+ .globl __spitfire_access_error
+__spitfire_access_error:
+ /* Disable ESTATE error reporting so that we do not
+ * take recursive traps and RED state the processor.
+ */
+ stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
+ membar #Sync
+
+ mov UDBE_UE, %g1
+ ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
+
+ /* __spitfire_cee_trap branches here with AFSR in %g4 and
+ * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the
+ * ESTATE Error Enable register.
+ */
+__spitfire_cee_trap_continue:
+ ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
+
+ rdpr %tt, %g3
+ and %g3, 0x1ff, %g3 ! Paranoia
+ sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
+ or %g4, %g3, %g4
+ rdpr %tl, %g3
+ cmp %g3, 1
+ mov 1, %g3
+ bleu %xcc, 1f
+ sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
+
+ or %g4, %g3, %g4
+
+ /* Read in the UDB error register state, clearing the
+ * sticky error bits as-needed. We only clear them if
+ * the UE bit is set. Likewise, __spitfire_cee_trap
+ * below will only do so if the CE bit is set.
+ *
+ * NOTE: UltraSparc-I/II have high and low UDB error
+ * registers, corresponding to the two UDB units
+ * present on those chips. UltraSparc-IIi only
+ * has a single UDB, called "SDB" in the manual.
+ * For IIi the upper UDB register always reads
+ * as zero so for our purposes things will just
+ * work with the checks below.
*/
- .globl __do_data_access_exception
- .globl __do_data_access_exception_tl1
-__do_data_access_exception_tl1:
+1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
+ and %g3, 0x3ff, %g7 ! Paranoia
+ sllx %g7, SFSTAT_UDBH_SHIFT, %g7
+ or %g4, %g7, %g4
+ andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
+ be,pn %xcc, 1f
+ nop
+ stxa %g3, [%g0] ASI_UDB_ERROR_W
+ membar #Sync
+
+1: mov 0x18, %g3
+ ldxa [%g3] ASI_UDBL_ERROR_R, %g3
+ and %g3, 0x3ff, %g7 ! Paranoia
+ sllx %g7, SFSTAT_UDBL_SHIFT, %g7
+ or %g4, %g7, %g4
+ andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
+ be,pn %xcc, 1f
+ nop
+ mov 0x18, %g7
+ stxa %g3, [%g7] ASI_UDB_ERROR_W
+ membar #Sync
+
+1: /* Ok, now that we've latched the error state,
+ * clear the sticky bits in the AFSR.
+ */
+ stxa %g4, [%g0] ASI_AFSR
+ membar #Sync
+
+ rdpr %tl, %g2
+ cmp %g2, 1
+ rdpr %pil, %g2
+ bleu,pt %xcc, 1f
+ wrpr %g0, 15, %pil
+
+ ba,pt %xcc, etraptl1
+ rd %pc, %g7
+
+ ba,pt %xcc, 2f
+ nop
+
+1: ba,pt %xcc, etrap_irq
+ rd %pc, %g7
+
+2: mov %l4, %o1
+ mov %l5, %o2
+ call spitfire_access_error
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ /* This is the trap handler entry point for ECC correctable
+ * errors. They are corrected, but we listen for the trap
+ * so that the event can be logged.
+ *
+ * Disrupting errors are either:
+ * 1) single-bit ECC errors during UDB reads to system
+ * memory
+ * 2) data parity errors during write-back events
+ *
+ * As far as I can make out from the manual, the CEE trap
+ * is only for correctable errors during memory read
+ * accesses by the front-end of the processor.
+ *
+ * The code below is only for trap level 1 CEE events,
+ * as it is the only situation where we can safely record
+ * and log. For trap level >1 we just clear the CE bit
+ * in the AFSR and return.
+ *
+ * This is just like __spiftire_access_error above, but it
+ * specifically handles correctable errors. If an
+ * uncorrectable error is indicated in the AFSR we
+ * will branch directly above to __spitfire_access_error
+ * to handle it instead. Uncorrectable therefore takes
+ * priority over correctable, and the error logging
+ * C code will notice this case by inspecting the
+ * trap type.
+ */
+ .globl __spitfire_cee_trap
+__spitfire_cee_trap:
+ ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
+ mov 1, %g3
+ sllx %g3, SFAFSR_UE_SHIFT, %g3
+ andcc %g4, %g3, %g0 ! Check for UE
+ bne,pn %xcc, __spitfire_access_error
+ nop
+
+ /* Ok, in this case we only have a correctable error.
+ * Indicate we only wish to capture that state in register
+ * %g1, and we only disable CE error reporting unlike UE
+ * handling which disables all errors.
+ */
+ ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
+ andn %g3, ESTATE_ERR_CE, %g3
+ stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
+ membar #Sync
+
+ /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
+ ba,pt %xcc, __spitfire_cee_trap_continue
+ mov UDBE_CE, %g1
+
+ .globl __spitfire_data_access_exception
+ .globl __spitfire_data_access_exception_tl1
+__spitfire_data_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
@@ -706,9 +852,25 @@ __do_data_access_exception_tl1:
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
+ rdpr %tt, %g3
+ cmp %g3, 0x80 ! first win spill/fill trap
+ blu,pn %xcc, 1f
+ cmp %g3, 0xff ! last win spill/fill trap
+ bgu,pn %xcc, 1f
+ nop
ba,pt %xcc, winfix_dax
rdpr %tpc, %g3
-__do_data_access_exception:
+1: sethi %hi(109f), %g7
+ ba,pt %xcc, etraptl1
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call spitfire_data_access_exception_tl1
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+__spitfire_data_access_exception:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
@@ -722,20 +884,19 @@ __do_data_access_exception:
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
- call data_access_exception
+ call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
- .globl __do_instruction_access_exception
- .globl __do_instruction_access_exception_tl1
-__do_instruction_access_exception_tl1:
+ .globl __spitfire_insn_access_exception
+ .globl __spitfire_insn_access_exception_tl1
+__spitfire_insn_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
- mov DMMU_SFAR, %g5
- ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
- ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
+ ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
+ rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
@@ -743,18 +904,17 @@ __do_instruction_access_exception_tl1:
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
- call instruction_access_exception_tl1
+ call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
-__do_instruction_access_exception:
+__spitfire_insn_access_exception:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
- mov DMMU_SFAR, %g5
- ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
- ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
+ ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
+ rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
@@ -762,102 +922,11 @@ __do_instruction_access_exception:
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
- call instruction_access_exception
+ call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
- /* This is the trap handler entry point for ECC correctable
- * errors. They are corrected, but we listen for the trap
- * so that the event can be logged.
- *
- * Disrupting errors are either:
- * 1) single-bit ECC errors during UDB reads to system
- * memory
- * 2) data parity errors during write-back events
- *
- * As far as I can make out from the manual, the CEE trap
- * is only for correctable errors during memory read
- * accesses by the front-end of the processor.
- *
- * The code below is only for trap level 1 CEE events,
- * as it is the only situation where we can safely record
- * and log. For trap level >1 we just clear the CE bit
- * in the AFSR and return.
- */
-
- /* Our trap handling infrastructure allows us to preserve
- * two 64-bit values during etrap for arguments to
- * subsequent C code. Therefore we encode the information
- * as follows:
- *
- * value 1) Full 64-bits of AFAR
- * value 2) Low 33-bits of AFSR, then bits 33-->42
- * are UDBL error status and bits 43-->52
- * are UDBH error status
- */
- .align 64
- .globl cee_trap
-cee_trap:
- ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR
- ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR
- sllx %g1, 31, %g1 ! Clear reserved bits
- srlx %g1, 31, %g1 ! in AFSR
-
- /* NOTE: UltraSparc-I/II have high and low UDB error
- * registers, corresponding to the two UDB units
- * present on those chips. UltraSparc-IIi only
- * has a single UDB, called "SDB" in the manual.
- * For IIi the upper UDB register always reads
- * as zero so for our purposes things will just
- * work with the checks below.
- */
- ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status
- andcc %g3, (1 << 8), %g4 ! Check CE bit
- sllx %g3, (64 - 10), %g3 ! Clear reserved bits
- srlx %g3, (64 - 10), %g3 ! in UDB-Low error status
-
- sllx %g3, (33 + 0), %g3 ! Shift up to encoding area
- or %g1, %g3, %g1 ! Or it in
- be,pn %xcc, 1f ! Branch if CE bit was clear
- nop
- stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL
- membar #Sync ! Synchronize ASI stores
-1: mov 0x18, %g5 ! Addr of UDB-High error status
- ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it
-
- andcc %g3, (1 << 8), %g4 ! Check CE bit
- sllx %g3, (64 - 10), %g3 ! Clear reserved bits
- srlx %g3, (64 - 10), %g3 ! in UDB-High error status
- sllx %g3, (33 + 10), %g3 ! Shift up to encoding area
- or %g1, %g3, %g1 ! Or it in
- be,pn %xcc, 1f ! Branch if CE bit was clear
- nop
- nop
-
- stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH
- membar #Sync ! Synchronize ASI stores
-1: mov 1, %g5 ! AFSR CE bit is
- sllx %g5, 20, %g5 ! bit 20
- stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR
- membar #Sync ! Synchronize ASI stores
- sllx %g2, (64 - 41), %g2 ! Clear reserved bits
- srlx %g2, (64 - 41), %g2 ! in latched AFAR
-
- andn %g2, 0x0f, %g2 ! Finish resv bit clearing
- mov %g1, %g4 ! Move AFSR+UDB* into save reg
- mov %g2, %g5 ! Move AFAR into save reg
- rdpr %pil, %g2
- wrpr %g0, 15, %pil
- ba,pt %xcc, etrap_irq
- rd %pc, %g7
- mov %l4, %o0
-
- mov %l5, %o1
- call cee_log
- add %sp, PTREGS_OFF, %o2
- ba,a,pt %xcc, rtrap_irq
-
/* Capture I/D/E-cache state into per-cpu error scoreboard.
*
* %g1: (TL>=0) ? 1 : 0
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 2803bc7c2c7..425c60cfea1 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -466,7 +466,7 @@ do_flush_sync:
if (!limit)
break;
udelay(1);
- membar("#LoadLoad");
+ rmb();
}
if (!limit)
printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 07424b07593..66255434128 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -103,7 +103,7 @@ void cpu_idle(void)
* other cpus see our increasing idleness for the buddy
* redistribution algorithm. -DaveM
*/
- membar("#StoreStore | #StoreLoad");
+ membar_storeload_storestore();
}
}
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 89f5e019f24..e09ddf92765 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -147,7 +147,7 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long
if (!limit)
break;
udelay(1);
- membar("#LoadLoad");
+ rmb();
}
if (!limit)
printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index b7e6a91952b..fbdfed3798d 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -33,7 +33,6 @@
#include <linux/cpu.h>
#include <linux/initrd.h>
-#include <asm/segment.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index b27934671c3..60f5dfabb1e 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -574,13 +574,12 @@ static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
{
setup_rt_frame(ka, regs, signr, oldset,
(ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
- if (!(ka->sa.sa_flags & SA_NOMASK)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked,signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index f28428f4170..aecccd0df1d 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -877,11 +877,12 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long page = (unsigned long)
page_address(pte_page(*ptep));
- __asm__ __volatile__(
- " membar #StoreStore\n"
- " flush %0 + %1"
- : : "r" (page), "r" (address & (PAGE_SIZE - 1))
- : "memory");
+ wmb();
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (page),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
}
pte_unmap(ptep);
preempt_enable();
@@ -1292,11 +1293,12 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long page = (unsigned long)
page_address(pte_page(*ptep));
- __asm__ __volatile__(
- " membar #StoreStore\n"
- " flush %0 + %1"
- : : "r" (page), "r" (address & (PAGE_SIZE - 1))
- : "memory");
+ wmb();
+ __asm__ __volatile__("flush %0 + %1"
+ : /* no outputs */
+ : "r" (page),
+ "r" (address & (PAGE_SIZE - 1))
+ : "memory");
}
pte_unmap(ptep);
preempt_enable();
@@ -1325,13 +1327,12 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
else
setup_frame32(&ka->sa, regs, signr, oldset, info);
}
- if (!(ka->sa.sa_flags & SA_NOMASK)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked,signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b9b42491e11..b4fc6a5462b 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -144,7 +144,7 @@ void __init smp_callin(void)
current->active_mm = &init_mm;
while (!cpu_isset(cpuid, smp_commenced_mask))
- membar("#LoadLoad");
+ rmb();
cpu_set(cpuid, cpu_online_map);
}
@@ -184,11 +184,11 @@ static inline long get_delta (long *rt, long *master)
for (i = 0; i < NUM_ITERS; i++) {
t0 = tick_ops->get_tick();
go[MASTER] = 1;
- membar("#StoreLoad");
+ membar_storeload();
while (!(tm = go[SLAVE]))
- membar("#LoadLoad");
+ rmb();
go[SLAVE] = 0;
- membar("#StoreStore");
+ wmb();
t1 = tick_ops->get_tick();
if (t1 - t0 < best_t1 - best_t0)
@@ -221,7 +221,7 @@ void smp_synchronize_tick_client(void)
go[MASTER] = 1;
while (go[MASTER])
- membar("#LoadLoad");
+ rmb();
local_irq_save(flags);
{
@@ -273,21 +273,21 @@ static void smp_synchronize_one_tick(int cpu)
/* wait for client to be ready */
while (!go[MASTER])
- membar("#LoadLoad");
+ rmb();
/* now let the client proceed into his loop */
go[MASTER] = 0;
- membar("#StoreLoad");
+ membar_storeload();
spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
- membar("#LoadLoad");
+ rmb();
go[MASTER] = 0;
- membar("#StoreStore");
+ wmb();
go[SLAVE] = tick_ops->get_tick();
- membar("#StoreLoad");
+ membar_storeload();
}
}
spin_unlock_irqrestore(&itc_sync_lock, flags);
@@ -927,11 +927,11 @@ void smp_capture(void)
smp_processor_id());
#endif
penguins_are_doing_time = 1;
- membar("#StoreStore | #LoadStore");
+ membar_storestore_loadstore();
atomic_inc(&smp_capture_registry);
smp_cross_call(&xcall_capture, 0, 0, 0);
while (atomic_read(&smp_capture_registry) != ncpus)
- membar("#LoadLoad");
+ rmb();
#ifdef CAPTURE_DEBUG
printk("done\n");
#endif
@@ -947,7 +947,7 @@ void smp_release(void)
smp_processor_id());
#endif
penguins_are_doing_time = 0;
- membar("#StoreStore | #StoreLoad");
+ membar_storeload_storestore();
atomic_dec(&smp_capture_registry);
}
}
@@ -970,9 +970,9 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
save_alternate_globals(global_save);
prom_world(1);
atomic_inc(&smp_capture_registry);
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
while (penguins_are_doing_time)
- membar("#LoadLoad");
+ rmb();
restore_alternate_globals(global_save);
atomic_dec(&smp_capture_registry);
prom_world(0);
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 9202d925a9c..a3ea697f1ad 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -99,17 +99,6 @@ extern int __ashrdi3(int, int);
extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
-#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
-extern void _do_spin_lock (spinlock_t *lock, char *str);
-extern void _do_spin_unlock (spinlock_t *lock);
-extern int _spin_trylock (spinlock_t *lock);
-extern void _do_read_lock(rwlock_t *rw, char *str);
-extern void _do_read_unlock(rwlock_t *rw, char *str);
-extern void _do_write_lock(rwlock_t *rw, char *str);
-extern void _do_write_unlock(rwlock_t *rw);
-extern int _do_write_trylock(rwlock_t *rw, char *str);
-#endif
-
extern unsigned long phys_base;
extern unsigned long pfn_base;
@@ -152,18 +141,6 @@ EXPORT_SYMBOL(_mcount);
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(phys_cpu_present_map);
-/* Spinlock debugging library, optional. */
-#ifdef CONFIG_DEBUG_SPINLOCK
-EXPORT_SYMBOL(_do_spin_lock);
-EXPORT_SYMBOL(_do_spin_unlock);
-EXPORT_SYMBOL(_spin_trylock);
-EXPORT_SYMBOL(_do_read_lock);
-EXPORT_SYMBOL(_do_read_unlock);
-EXPORT_SYMBOL(_do_write_lock);
-EXPORT_SYMBOL(_do_write_unlock);
-EXPORT_SYMBOL(_do_write_trylock);
-#endif
-
EXPORT_SYMBOL(smp_call_function);
#endif /* CONFIG_SMP */
@@ -429,3 +406,12 @@ EXPORT_SYMBOL(xor_vis_4);
EXPORT_SYMBOL(xor_vis_5);
EXPORT_SYMBOL(prom_palette);
+
+/* memory barriers */
+EXPORT_SYMBOL(mb);
+EXPORT_SYMBOL(rmb);
+EXPORT_SYMBOL(wmb);
+EXPORT_SYMBOL(membar_storeload);
+EXPORT_SYMBOL(membar_storeload_storestore);
+EXPORT_SYMBOL(membar_storeload_loadload);
+EXPORT_SYMBOL(membar_storestore_loadstore);
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 0c9e54b2f0c..b280b2ef674 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -33,6 +33,7 @@
#include <asm/dcu.h>
#include <asm/estate.h>
#include <asm/chafsr.h>
+#include <asm/sfafsr.h>
#include <asm/psrcompat.h>
#include <asm/processor.h>
#include <asm/timer.h>
@@ -143,8 +144,7 @@ void do_BUG(const char *file, int line)
}
#endif
-void instruction_access_exception(struct pt_regs *regs,
- unsigned long sfsr, unsigned long sfar)
+void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
siginfo_t info;
@@ -153,8 +153,8 @@ void instruction_access_exception(struct pt_regs *regs,
return;
if (regs->tstate & TSTATE_PRIV) {
- printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
- sfsr, sfar);
+ printk("spitfire_insn_access_exception: SFSR[%016lx] "
+ "SFAR[%016lx], going.\n", sfsr, sfar);
die_if_kernel("Iax", regs);
}
if (test_thread_flag(TIF_32BIT)) {
@@ -169,19 +169,17 @@ void instruction_access_exception(struct pt_regs *regs,
force_sig_info(SIGSEGV, &info, current);
}
-void instruction_access_exception_tl1(struct pt_regs *regs,
- unsigned long sfsr, unsigned long sfar)
+void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
0, 0x8, SIGTRAP) == NOTIFY_STOP)
return;
dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
- instruction_access_exception(regs, sfsr, sfar);
+ spitfire_insn_access_exception(regs, sfsr, sfar);
}
-void data_access_exception(struct pt_regs *regs,
- unsigned long sfsr, unsigned long sfar)
+void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
{
siginfo_t info;
@@ -207,8 +205,8 @@ void data_access_exception(struct pt_regs *regs,
return;
}
/* Shit... */
- printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
- sfsr, sfar);
+ printk("spitfire_data_access_exception: SFSR[%016lx] "
+ "SFAR[%016lx], going.\n", sfsr, sfar);
die_if_kernel("Dax", regs);
}
@@ -220,6 +218,16 @@ void data_access_exception(struct pt_regs *regs,
force_sig_info(SIGSEGV, &info, current);
}
+void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
+{
+ if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
+ 0, 0x30, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ spitfire_data_access_exception(regs, sfsr, sfar);
+}
+
#ifdef CONFIG_PCI
/* This is really pathetic... */
extern volatile int pci_poke_in_progress;
@@ -253,54 +261,13 @@ static void spitfire_clean_and_reenable_l1_caches(void)
: "memory");
}
-void do_iae(struct pt_regs *regs)
+static void spitfire_enable_estate_errors(void)
{
- siginfo_t info;
-
- spitfire_clean_and_reenable_l1_caches();
-
- if (notify_die(DIE_TRAP, "instruction access exception", regs,
- 0, 0x8, SIGTRAP) == NOTIFY_STOP)
- return;
-
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_OBJERR;
- info.si_addr = (void *)0;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
-}
-
-void do_dae(struct pt_regs *regs)
-{
- siginfo_t info;
-
-#ifdef CONFIG_PCI
- if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
- spitfire_clean_and_reenable_l1_caches();
-
- pci_poke_faulted = 1;
-
- /* Why the fuck did they have to change this? */
- if (tlb_type == cheetah || tlb_type == cheetah_plus)
- regs->tpc += 4;
-
- regs->tnpc = regs->tpc + 4;
- return;
- }
-#endif
- spitfire_clean_and_reenable_l1_caches();
-
- if (notify_die(DIE_TRAP, "data access exception", regs,
- 0, 0x30, SIGTRAP) == NOTIFY_STOP)
- return;
-
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_OBJERR;
- info.si_addr = (void *)0;
- info.si_trapno = 0;
- force_sig_info(SIGBUS, &info, current);
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (ESTATE_ERR_ALL),
+ "i" (ASI_ESTATE_ERROR_EN));
}
static char ecc_syndrome_table[] = {
@@ -338,65 +305,15 @@ static char ecc_syndrome_table[] = {
0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
};
-/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
- * in the following format. The AFAR is left as is, with
- * reserved bits cleared, and is a raw 40-bit physical
- * address.
- */
-#define CE_STATUS_UDBH_UE (1UL << (43 + 9))
-#define CE_STATUS_UDBH_CE (1UL << (43 + 8))
-#define CE_STATUS_UDBH_ESYNDR (0xffUL << 43)
-#define CE_STATUS_UDBH_SHIFT 43
-#define CE_STATUS_UDBL_UE (1UL << (33 + 9))
-#define CE_STATUS_UDBL_CE (1UL << (33 + 8))
-#define CE_STATUS_UDBL_ESYNDR (0xffUL << 33)
-#define CE_STATUS_UDBL_SHIFT 33
-#define CE_STATUS_AFSR_MASK (0x1ffffffffUL)
-#define CE_STATUS_AFSR_ME (1UL << 32)
-#define CE_STATUS_AFSR_PRIV (1UL << 31)
-#define CE_STATUS_AFSR_ISAP (1UL << 30)
-#define CE_STATUS_AFSR_ETP (1UL << 29)
-#define CE_STATUS_AFSR_IVUE (1UL << 28)
-#define CE_STATUS_AFSR_TO (1UL << 27)
-#define CE_STATUS_AFSR_BERR (1UL << 26)
-#define CE_STATUS_AFSR_LDP (1UL << 25)
-#define CE_STATUS_AFSR_CP (1UL << 24)
-#define CE_STATUS_AFSR_WP (1UL << 23)
-#define CE_STATUS_AFSR_EDP (1UL << 22)
-#define CE_STATUS_AFSR_UE (1UL << 21)
-#define CE_STATUS_AFSR_CE (1UL << 20)
-#define CE_STATUS_AFSR_ETS (0xfUL << 16)
-#define CE_STATUS_AFSR_ETS_SHIFT 16
-#define CE_STATUS_AFSR_PSYND (0xffffUL << 0)
-#define CE_STATUS_AFSR_PSYND_SHIFT 0
-
-/* Layout of Ecache TAG Parity Syndrome of AFSR */
-#define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */
-#define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */
-#define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */
-#define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */
-
static char *syndrome_unknown = "<Unknown>";
-asmlinkage void cee_log(unsigned long ce_status,
- unsigned long afar,
- struct pt_regs *regs)
+static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
{
- char memmod_str[64];
- char *p;
- unsigned short scode, udb_reg;
+ unsigned short scode;
+ char memmod_str[64], *p;
- printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
- "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
- smp_processor_id(),
- (ce_status & CE_STATUS_AFSR_MASK),
- afar,
- ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
- ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
-
- udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
- if (udb_reg & (1 << 8)) {
- scode = ecc_syndrome_table[udb_reg & 0xff];
+ if (udbl & bit) {
+ scode = ecc_syndrome_table[udbl & 0xff];
if (prom_getunumber(scode, afar,
memmod_str, sizeof(memmod_str)) == -1)
p = syndrome_unknown;
@@ -407,9 +324,8 @@ asmlinkage void cee_log(unsigned long ce_status,
smp_processor_id(), scode, p);
}
- udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
- if (udb_reg & (1 << 8)) {
- scode = ecc_syndrome_table[udb_reg & 0xff];
+ if (udbh & bit) {
+ scode = ecc_syndrome_table[udbh & 0xff];
if (prom_getunumber(scode, afar,
memmod_str, sizeof(memmod_str)) == -1)
p = syndrome_unknown;
@@ -419,6 +335,127 @@ asmlinkage void cee_log(unsigned long ce_status,
"Memory Module \"%s\"\n",
smp_processor_id(), scode, p);
}
+
+}
+
+static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
+{
+
+ printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
+ "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
+ smp_processor_id(), afsr, afar, udbl, udbh, tl1);
+
+ spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
+
+ /* We always log it, even if someone is listening for this
+ * trap.
+ */
+ notify_die(DIE_TRAP, "Correctable ECC Error", regs,
+ 0, TRAP_TYPE_CEE, SIGTRAP);
+
+ /* The Correctable ECC Error trap does not disable I/D caches. So
+ * we only have to restore the ESTATE Error Enable register.
+ */
+ spitfire_enable_estate_errors();
+}
+
+static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
+ "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
+ smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
+
+ /* XXX add more human friendly logging of the error status
+ * XXX as is implemented for cheetah
+ */
+
+ spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
+
+ /* We always log it, even if someone is listening for this
+ * trap.
+ */
+ notify_die(DIE_TRAP, "Uncorrectable Error", regs,
+ 0, tt, SIGTRAP);
+
+ if (regs->tstate & TSTATE_PRIV) {
+ if (tl1)
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("UE", regs);
+ }
+
+ /* XXX need more intelligent processing here, such as is implemented
+ * XXX for cheetah errors, in fact if the E-cache still holds the
+ * XXX line with bad parity this will loop
+ */
+
+ spitfire_clean_and_reenable_l1_caches();
+ spitfire_enable_estate_errors();
+
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_OBJERR;
+ info.si_addr = (void *)0;
+ info.si_trapno = 0;
+ force_sig_info(SIGBUS, &info, current);
+}
+
+void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
+{
+ unsigned long afsr, tt, udbh, udbl;
+ int tl1;
+
+ afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
+ tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
+ tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
+ udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
+ udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
+
+#ifdef CONFIG_PCI
+ if (tt == TRAP_TYPE_DAE &&
+ pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
+ spitfire_clean_and_reenable_l1_caches();
+ spitfire_enable_estate_errors();
+
+ pci_poke_faulted = 1;
+ regs->tnpc = regs->tpc + 4;
+ return;
+ }
+#endif
+
+ if (afsr & SFAFSR_UE)
+ spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
+
+ if (tt == TRAP_TYPE_CEE) {
+ /* Handle the case where we took a CEE trap, but ACK'd
+ * only the UE state in the UDB error registers.
+ */
+ if (afsr & SFAFSR_UE) {
+ if (udbh & UDBE_CE) {
+ __asm__ __volatile__(
+ "stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (udbh & UDBE_CE),
+ "r" (0x0), "i" (ASI_UDB_ERROR_W));
+ }
+ if (udbl & UDBE_CE) {
+ __asm__ __volatile__(
+ "stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (udbl & UDBE_CE),
+ "r" (0x18), "i" (ASI_UDB_ERROR_W));
+ }
+ }
+
+ spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
+ }
}
int cheetah_pcache_forced_on;
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 491bb3681f9..8365bc1f81f 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -18,9 +18,10 @@ sparc64_ttable_tl0:
tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
tl0_iax: membar #Sync
- TRAP_NOSAVE_7INSNS(__do_instruction_access_exception)
+ TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception)
tl0_resv009: BTRAP(0x9)
-tl0_iae: TRAP(do_iae)
+tl0_iae: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
tl0_ill: membar #Sync
TRAP_7INSNS(do_illegal_instruction)
@@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW
tl0_div0: TRAP(do_div0)
tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
tl0_resv02f: BTRAP(0x2f)
-tl0_dax: TRAP_NOSAVE(__do_data_access_exception)
+tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception)
tl0_resv031: BTRAP(0x31)
-tl0_dae: TRAP(do_dae)
+tl0_dae: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl0_resv033: BTRAP(0x33)
tl0_mna: TRAP_NOSAVE(do_mna)
tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
@@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
tl0_ivec: TRAP_IVEC
tl0_paw: TRAP(do_paw)
tl0_vaw: TRAP(do_vaw)
-tl0_cee: TRAP_NOSAVE(cee_trap)
+tl0_cee: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_cee_trap)
tl0_iamiss:
#include "itlb_base.S"
tl0_damiss:
@@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
sparc64_ttable_tl1:
tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
-tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1)
+tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1)
tl1_resv009: BTRAPTL1(0x9)
-tl1_iae: TRAPTL1(do_iae_tl1)
+tl1_iae: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
tl1_ill: TRAPTL1(do_ill_tl1)
tl1_privop: BTRAPTL1(0x11)
@@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW
tl1_div0: TRAPTL1(do_div0_tl1)
tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
-tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1)
+tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1)
tl1_resv031: BTRAPTL1(0x31)
-tl1_dae: TRAPTL1(do_dae_tl1)
+tl1_dae: membar #Sync
+ TRAP_NOSAVE_7INSNS(__spitfire_access_error)
tl1_resv033: BTRAPTL1(0x33)
tl1_mna: TRAP_NOSAVE(do_mna)
tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
@@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1)
tl1_vaw: TRAPTL1(do_vaw_tl1)
/* The grotty trick to save %g1 into current->thread.cee_stuff
- * is because when we take this trap we could be interrupting trap
- * code already using the trap alternate global registers.
+ * is because when we take this trap we could be interrupting
+ * trap code already using the trap alternate global registers.
*
* We cross our fingers and pray that this store/load does
* not cause yet another CEE trap.
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index 11c3e88732e..da9739f0d43 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -349,9 +349,9 @@ int handle_popc(u32 insn, struct pt_regs *regs)
extern void do_fpother(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
-extern void data_access_exception(struct pt_regs *regs,
- unsigned long sfsr,
- unsigned long sfar);
+extern void spitfire_data_access_exception(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
int handle_ldf_stq(u32 insn, struct pt_regs *regs)
{
@@ -394,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
break;
}
default:
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (put_user (first >> 32, (u32 __user *)addr) ||
__put_user ((u32)first, (u32 __user *)(addr + 4)) ||
__put_user (second >> 32, (u32 __user *)(addr + 8)) ||
__put_user ((u32)second, (u32 __user *)(addr + 12))) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
} else {
@@ -414,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
do_privact(regs);
return 1;
} else if (asi > ASI_SNFL) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
switch (insn & 0x180000) {
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
}
if (err && !(asi & 0x2 /* NF */)) {
- data_access_exception(regs, 0, addr);
+ spitfire_data_access_exception(regs, 0, addr);
return 1;
}
if (asi & 0x8) /* Little */ {
@@ -534,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
*(u64 *)(f->regs + freg) = value;
current_thread_info()->fpsaved[0] |= flag;
} else {
-daex: data_access_exception(regs, sfsr, sfar);
+daex: spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
@@ -578,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
__put_user ((u32)value, (u32 __user *)(sfar + 4)))
goto daex;
} else {
-daex: data_access_exception(regs, sfsr, sfar);
+daex: spitfire_data_access_exception(regs, sfsr, sfar);
return;
}
advance(regs);
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index dfbc7e0dcf7..99c809a1e5a 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -318,7 +318,7 @@ fill_fixup_dax:
nop
rdpr %pstate, %l1 ! Prepare to change globals.
mov %g4, %o1 ! Setup args for
- mov %g5, %o2 ! final call to data_access_exception.
+ mov %g5, %o2 ! final call to spitfire_data_access_exception.
andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
mov %g6, %o7 ! Stash away current.
@@ -330,7 +330,7 @@ fill_fixup_dax:
mov TSB_REG, %g1
ldxa [%g1] ASI_IMMU, %g5
#endif
- call data_access_exception
+ call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
b,pt %xcc, rtrap
@@ -391,7 +391,7 @@ window_dax_from_user_common:
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
- call data_access_exception
+ call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 40dbeec7e5d..6201f104098 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
U1memcpy.o U1copy_from_user.o U1copy_to_user.o \
U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
copy_in_user.o user_fixup.o memmove.o \
- mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
+ mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o mb.o
lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c
index f03344cf784..f5f0b5586f0 100644
--- a/arch/sparc64/lib/debuglocks.c
+++ b/arch/sparc64/lib/debuglocks.c
@@ -12,8 +12,6 @@
#ifdef CONFIG_SMP
-#define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC))
-
static inline void show (char *str, spinlock_t *lock, unsigned long caller)
{
int cpu = smp_processor_id();
@@ -51,20 +49,19 @@ static inline void show_write (char *str, rwlock_t *lock, unsigned long caller)
#undef INIT_STUCK
#define INIT_STUCK 100000000
-void _do_spin_lock(spinlock_t *lock, char *str)
+void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
again:
__asm__ __volatile__("ldstub [%1], %0"
: "=r" (val)
: "r" (&(lock->lock))
: "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (val) {
while (lock->lock) {
if (!--stuck) {
@@ -72,7 +69,7 @@ again:
show(str, lock, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
goto again;
}
@@ -84,17 +81,16 @@ again:
put_cpu();
}
-int _do_spin_trylock(spinlock_t *lock)
+int _do_spin_trylock(spinlock_t *lock, unsigned long caller)
{
- unsigned long val, caller;
+ unsigned long val;
int cpu = get_cpu();
- GET_CALLER(caller);
__asm__ __volatile__("ldstub [%1], %0"
: "=r" (val)
: "r" (&(lock->lock))
: "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (!val) {
lock->owner_pc = ((unsigned int)caller);
lock->owner_cpu = cpu;
@@ -111,21 +107,20 @@ void _do_spin_unlock(spinlock_t *lock)
{
lock->owner_pc = 0;
lock->owner_cpu = NO_PROC_ID;
- membar("#StoreStore | #LoadStore");
+ membar_storestore_loadstore();
lock->lock = 0;
current->thread.smp_lock_count--;
}
/* Keep INIT_STUCK the same... */
-void _do_read_lock (rwlock_t *rw, char *str)
+void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
wlock_again:
/* Wait for any writer to go away. */
while (((long)(rw->lock)) < 0) {
@@ -134,7 +129,7 @@ wlock_again:
show_read(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
/* Try once to increment the counter. */
__asm__ __volatile__(
@@ -147,7 +142,7 @@ wlock_again:
"2:" : "=r" (val)
: "0" (&(rw->lock))
: "g1", "g7", "memory");
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
if (val)
goto wlock_again;
rw->reader_pc[cpu] = ((unsigned int)caller);
@@ -157,15 +152,13 @@ wlock_again:
put_cpu();
}
-void _do_read_unlock (rwlock_t *rw, char *str)
+void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
-
/* Drop our identity _first_. */
rw->reader_pc[cpu] = 0;
current->thread.smp_lock_count--;
@@ -193,14 +186,13 @@ runlock_again:
put_cpu();
}
-void _do_write_lock (rwlock_t *rw, char *str)
+void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int cpu = get_cpu();
int shown = 0;
- GET_CALLER(caller);
wlock_again:
/* Spin while there is another writer. */
while (((long)rw->lock) < 0) {
@@ -209,7 +201,7 @@ wlock_again:
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
/* Try to acuire the write bit. */
@@ -264,7 +256,7 @@ wlock_again:
show_write(str, rw, caller);
stuck = INIT_STUCK;
}
- membar("#LoadLoad");
+ rmb();
}
goto wlock_again;
}
@@ -278,14 +270,12 @@ wlock_again:
put_cpu();
}
-void _do_write_unlock(rwlock_t *rw)
+void _do_write_unlock(rwlock_t *rw, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int stuck = INIT_STUCK;
int shown = 0;
- GET_CALLER(caller);
-
/* Drop our identity _first_ */
rw->writer_pc = 0;
rw->writer_cpu = NO_PROC_ID;
@@ -313,13 +303,11 @@ wlock_again:
}
}
-int _do_write_trylock (rwlock_t *rw, char *str)
+int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller)
{
- unsigned long caller, val;
+ unsigned long val;
int cpu = get_cpu();
- GET_CALLER(caller);
-
/* Try to acuire the write bit. */
__asm__ __volatile__(
" mov 1, %%g3\n"
diff --git a/arch/sparc64/lib/mb.S b/arch/sparc64/lib/mb.S
new file mode 100644
index 00000000000..4004f748619
--- /dev/null
+++ b/arch/sparc64/lib/mb.S
@@ -0,0 +1,73 @@
+/* mb.S: Out of line memory barriers.
+ *
+ * Copyright (C) 2005 David S. Miller (davem@davemloft.net)
+ */
+
+ /* These are here in an effort to more fully work around
+ * Spitfire Errata #51. Essentially, if a memory barrier
+ * occurs soon after a mispredicted branch, the chip can stop
+ * executing instructions until a trap occurs. Therefore, if
+ * interrupts are disabled, the chip can hang forever.
+ *
+ * It used to be believed that the memory barrier had to be
+ * right in the delay slot, but a case has been traced
+ * recently wherein the memory barrier was one instruction
+ * after the branch delay slot and the chip still hung. The
+ * offending sequence was the following in sym_wakeup_done()
+ * of the sym53c8xx_2 driver:
+ *
+ * call sym_ccb_from_dsa, 0
+ * movge %icc, 0, %l0
+ * brz,pn %o0, .LL1303
+ * mov %o0, %l2
+ * membar #LoadLoad
+ *
+ * The branch has to be mispredicted for the bug to occur.
+ * Therefore, we put the memory barrier explicitly into a
+ * "branch always, predicted taken" delay slot to avoid the
+ * problem case.
+ */
+
+ .text
+
+99: retl
+ nop
+
+ .globl mb
+mb: ba,pt %xcc, 99b
+ membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad
+ .size mb, .-mb
+
+ .globl rmb
+rmb: ba,pt %xcc, 99b
+ membar #LoadLoad
+ .size rmb, .-rmb
+
+ .globl wmb
+wmb: ba,pt %xcc, 99b
+ membar #StoreStore
+ .size wmb, .-wmb
+
+ .globl membar_storeload
+membar_storeload:
+ ba,pt %xcc, 99b
+ membar #StoreLoad
+ .size membar_storeload, .-membar_storeload
+
+ .globl membar_storeload_storestore
+membar_storeload_storestore:
+ ba,pt %xcc, 99b
+ membar #StoreLoad | #StoreStore
+ .size membar_storeload_storestore, .-membar_storeload_storestore
+
+ .globl membar_storeload_loadload
+membar_storeload_loadload:
+ ba,pt %xcc, 99b
+ membar #StoreLoad | #LoadLoad
+ .size membar_storeload_loadload, .-membar_storeload_loadload
+
+ .globl membar_storestore_loadstore
+membar_storestore_loadstore:
+ ba,pt %xcc, 99b
+ membar #StoreStore | #LoadStore
+ .size membar_storestore_loadstore, .-membar_storestore_loadstore
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
index 15b4cfe0755..302efbcba70 100644
--- a/arch/sparc64/solaris/misc.c
+++ b/arch/sparc64/solaris/misc.c
@@ -737,7 +737,8 @@ MODULE_LICENSE("GPL");
extern u32 tl0_solaris[8];
#define update_ttable(x) \
tl0_solaris[3] = (((long)(x) - (long)tl0_solaris - 3) >> 2) | 0x40000000; \
- __asm__ __volatile__ ("membar #StoreStore; flush %0" : : "r" (&tl0_solaris[3]))
+ wmb(); \
+ __asm__ __volatile__ ("flush %0" : : "r" (&tl0_solaris[3]))
#else
#endif
@@ -761,7 +762,8 @@ int init_module(void)
entry64_personality_patch |=
(offsetof(struct task_struct, personality) +
(sizeof(unsigned long) - 1));
- __asm__ __volatile__("membar #StoreStore; flush %0"
+ wmb();
+ __asm__ __volatile__("flush %0"
: : "r" (&entry64_personality_patch));
return 0;
}
diff --git a/arch/um/kernel/signal_kern.c b/arch/um/kernel/signal_kern.c
index 7807a3e8c42..03618bd13d5 100644
--- a/arch/um/kernel/signal_kern.c
+++ b/arch/um/kernel/signal_kern.c
@@ -87,12 +87,12 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
force_sigsegv(signr, current);
- }
- else if(!(ka->sa.sa_flags & SA_NODEFER)){
+ } else {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked,
&ka->sa.sa_mask);
- sigaddset(&current->blocked, signr);
+ if(!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked, signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
diff --git a/arch/v850/kernel/signal.c b/arch/v850/kernel/signal.c
index 37061e32e1a..633e4e1b825 100644
--- a/arch/v850/kernel/signal.c
+++ b/arch/v850/kernel/signal.c
@@ -462,13 +462,12 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
else
setup_frame(sig, ka, oldset, regs);
- if (!(ka->sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
}
/*
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 98590a989f3..d642fbf3da2 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -394,10 +394,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
#endif
ret = setup_rt_frame(sig, ka, info, oldset, regs);
- if (ret && !(ka->sa.sa_flags & SA_NODEFER)) {
+ if (ret) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- sigaddset(&current->blocked,sig);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index df6e1e17b09..dc42cede939 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -702,12 +702,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
if (ka.sa.sa_flags & SA_ONESHOT)
ka.sa.sa_handler = SIG_DFL;
- if (!(ka.sa.sa_flags & SA_NODEFER)) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
+ if (!(ka.sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked, signr);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
return 1;
}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index cecab0acc3f..46d655fab11 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -48,6 +48,8 @@ source "drivers/hwmon/Kconfig"
source "drivers/misc/Kconfig"
+source "drivers/mfd/Kconfig"
+
source "drivers/media/Kconfig"
source "drivers/video/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 126a851d565..9663132ed82 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -26,7 +26,7 @@ obj-$(CONFIG_FB_INTEL) += video/intelfb/
obj-$(CONFIG_SERIO) += input/serio/
obj-y += serial/
obj-$(CONFIG_PARPORT) += parport/
-obj-y += base/ block/ misc/ net/ media/
+obj-y += base/ block/ misc/ mfd/ net/ media/
obj-$(CONFIG_NUBUS) += nubus/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_PPC_PMAC) += macintosh/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 10be36731ed..678a7e097f3 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,5 +1,3 @@
-EXTRA_CFLAGS += -Idrivers/infiniband/include
-
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
ib_cm.o ib_umad.o ib_ucm.o
obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 729f0b0d983..5ac86f566dc 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -1,9 +1,10 @@
/*
- * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
- * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
- * Copyright (c) 2004 Intel Corporation. All rights reserved.
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -40,7 +41,7 @@
#include <asm/bug.h>
-#include <ib_smi.h>
+#include <rdma/ib_smi.h>
#include "smi.h"
#include "agent_priv.h"
diff --git a/drivers/infiniband/core/agent_priv.h b/drivers/infiniband/core/agent_priv.h
index 17435af1e91..2ec6d7f1b7d 100644
--- a/drivers/infiniband/core/agent_priv.h
+++ b/drivers/infiniband/core/agent_priv.h
@@ -1,9 +1,9 @@
/*
- * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
- * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
- * Copyright (c) 2004 Intel Corporation. All rights reserved.
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3042360c97e..f014e639088 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1,5 +1,8 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -32,12 +35,11 @@
* $Id: cache.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <ib_cache.h>
+#include <rdma/ib_cache.h>
#include "core_priv.h"
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 403ed125d8f..4de93ba274a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -43,8 +43,8 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <ib_cache.h>
-#include <ib_cm.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_cm.h>
#include "cm_msgs.h"
MODULE_AUTHOR("Sean Hefty");
@@ -83,7 +83,7 @@ struct cm_port {
struct cm_device {
struct list_head list;
struct ib_device *device;
- u64 ca_guid;
+ __be64 ca_guid;
struct cm_port port[0];
};
@@ -100,8 +100,8 @@ struct cm_work {
struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
- u32 local_id; /* Established / timewait */
- u32 remote_id;
+ __be32 local_id; /* Established / timewait */
+ __be32 remote_id;
struct ib_cm_event cm_event;
struct ib_sa_path_rec path[0];
};
@@ -110,8 +110,8 @@ struct cm_timewait_info {
struct cm_work work; /* Must be first. */
struct rb_node remote_qp_node;
struct rb_node remote_id_node;
- u64 remote_ca_guid;
- u32 remote_qpn;
+ __be64 remote_ca_guid;
+ __be32 remote_qpn;
u8 inserted_remote_qp;
u8 inserted_remote_id;
};
@@ -132,11 +132,11 @@ struct cm_id_private {
struct cm_av alt_av;
void *private_data;
- u64 tid;
- u32 local_qpn;
- u32 remote_qpn;
- u32 sq_psn;
- u32 rq_psn;
+ __be64 tid;
+ __be32 local_qpn;
+ __be32 remote_qpn;
+ __be32 sq_psn;
+ __be32 rq_psn;
int timeout_ms;
enum ib_mtu path_mtu;
u8 private_data_len;
@@ -253,7 +253,7 @@ static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
u16 dlid, u8 sl, u16 src_path_bits)
{
memset(ah_attr, 0, sizeof ah_attr);
- ah_attr->dlid = be16_to_cpu(dlid);
+ ah_attr->dlid = dlid;
ah_attr->sl = sl;
ah_attr->src_path_bits = src_path_bits;
ah_attr->port_num = port_num;
@@ -264,7 +264,7 @@ static void cm_init_av_for_response(struct cm_port *port,
{
av->port = port;
av->pkey_index = wc->pkey_index;
- cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid),
+ cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
wc->sl, wc->dlid_path_bits);
}
@@ -295,8 +295,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
return ret;
av->port = port;
- cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid,
- path->sl, path->slid & 0x7F);
+ cm_set_ah_attr(&av->ah_attr, av->port->port_num,
+ be16_to_cpu(path->dlid), path->sl,
+ be16_to_cpu(path->slid) & 0x7F);
av->packet_life_time = path->packet_life_time;
return 0;
}
@@ -309,26 +310,26 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
do {
spin_lock_irqsave(&cm.lock, flags);
ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
- (int *) &cm_id_priv->id.local_id);
+ (__force int *) &cm_id_priv->id.local_id);
spin_unlock_irqrestore(&cm.lock, flags);
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
return ret;
}
-static void cm_free_id(u32 local_id)
+static void cm_free_id(__be32 local_id)
{
unsigned long flags;
spin_lock_irqsave(&cm.lock, flags);
- idr_remove(&cm.local_id_table, (int) local_id);
+ idr_remove(&cm.local_id_table, (__force int) local_id);
spin_unlock_irqrestore(&cm.lock, flags);
}
-static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id)
+static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
- cm_id_priv = idr_find(&cm.local_id_table, (int) local_id);
+ cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
@@ -339,7 +340,7 @@ static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id)
return cm_id_priv;
}
-static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id)
+static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
unsigned long flags;
@@ -356,8 +357,8 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
struct rb_node **link = &cm.listen_service_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
- u64 service_id = cm_id_priv->id.service_id;
- u64 service_mask = cm_id_priv->id.service_mask;
+ __be64 service_id = cm_id_priv->id.service_id;
+ __be64 service_mask = cm_id_priv->id.service_mask;
while (*link) {
parent = *link;
@@ -376,7 +377,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
return NULL;
}
-static struct cm_id_private * cm_find_listen(u64 service_id)
+static struct cm_id_private * cm_find_listen(__be64 service_id)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
@@ -400,8 +401,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
struct rb_node **link = &cm.remote_id_table.rb_node;
struct rb_node *parent = NULL;
struct cm_timewait_info *cur_timewait_info;
- u64 remote_ca_guid = timewait_info->remote_ca_guid;
- u32 remote_id = timewait_info->work.remote_id;
+ __be64 remote_ca_guid = timewait_info->remote_ca_guid;
+ __be32 remote_id = timewait_info->work.remote_id;
while (*link) {
parent = *link;
@@ -424,8 +425,8 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
return NULL;
}
-static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid,
- u32 remote_id)
+static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
+ __be32 remote_id)
{
struct rb_node *node = cm.remote_id_table.rb_node;
struct cm_timewait_info *timewait_info;
@@ -453,8 +454,8 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
struct rb_node **link = &cm.remote_qp_table.rb_node;
struct rb_node *parent = NULL;
struct cm_timewait_info *cur_timewait_info;
- u64 remote_ca_guid = timewait_info->remote_ca_guid;
- u32 remote_qpn = timewait_info->remote_qpn;
+ __be64 remote_ca_guid = timewait_info->remote_ca_guid;
+ __be32 remote_qpn = timewait_info->remote_qpn;
while (*link) {
parent = *link;
@@ -484,7 +485,7 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
union ib_gid *port_gid = &cm_id_priv->av.dgid;
- u32 remote_id = cm_id_priv->id.remote_id;
+ __be32 remote_id = cm_id_priv->id.remote_id;
while (*link) {
parent = *link;
@@ -598,7 +599,7 @@ static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
spin_unlock_irqrestore(&cm.lock, flags);
}
-static struct cm_timewait_info * cm_create_timewait_info(u32 local_id)
+static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
{
struct cm_timewait_info *timewait_info;
@@ -715,14 +716,15 @@ retest:
EXPORT_SYMBOL(ib_destroy_cm_id);
int ib_cm_listen(struct ib_cm_id *cm_id,
- u64 service_id,
- u64 service_mask)
+ __be64 service_id,
+ __be64 service_mask)
{
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
unsigned long flags;
int ret = 0;
- service_mask = service_mask ? service_mask : ~0ULL;
+ service_mask = service_mask ? service_mask :
+ __constant_cpu_to_be64(~0ULL);
service_id &= service_mask;
if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
(service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -735,8 +737,8 @@ int ib_cm_listen(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm.lock, flags);
if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
- cm_id->service_id = __cpu_to_be64(cm.listen_service_id++);
- cm_id->service_mask = ~0ULL;
+ cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
+ cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
} else {
cm_id->service_id = service_id;
cm_id->service_mask = service_mask;
@@ -752,18 +754,19 @@ int ib_cm_listen(struct ib_cm_id *cm_id,
}
EXPORT_SYMBOL(ib_cm_listen);
-static u64 cm_form_tid(struct cm_id_private *cm_id_priv,
- enum cm_msg_sequence msg_seq)
+static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
+ enum cm_msg_sequence msg_seq)
{
u64 hi_tid, low_tid;
hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
- low_tid = (u64) (cm_id_priv->id.local_id | (msg_seq << 30));
+ low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
+ (msg_seq << 30));
return cpu_to_be64(hi_tid | low_tid);
}
static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
- enum cm_msg_attr_id attr_id, u64 tid)
+ __be16 attr_id, __be64 tid)
{
hdr->base_version = IB_MGMT_BASE_VERSION;
hdr->mgmt_class = IB_MGMT_CLASS_CM;
@@ -896,7 +899,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto error1;
}
cm_id->service_id = param->service_id;
- cm_id->service_mask = ~0ULL;
+ cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
cm_id_priv->timeout_ms = cm_convert_to_ms(
param->primary_path->packet_life_time) * 2 +
cm_convert_to_ms(
@@ -963,7 +966,7 @@ static int cm_issue_rej(struct cm_port *port,
rej_msg->remote_comm_id = rcv_msg->local_comm_id;
rej_msg->local_comm_id = rcv_msg->remote_comm_id;
cm_rej_set_msg_rejected(rej_msg, msg_rejected);
- rej_msg->reason = reason;
+ rej_msg->reason = cpu_to_be16(reason);
if (ari && ari_length) {
cm_rej_set_reject_info_len(rej_msg, ari_length);
@@ -977,8 +980,8 @@ static int cm_issue_rej(struct cm_port *port,
return ret;
}
-static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid,
- u32 local_qpn, u32 remote_qpn)
+static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
+ __be32 local_qpn, __be32 remote_qpn)
{
return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
((local_ca_guid == remote_ca_guid) &&
@@ -1137,7 +1140,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
break;
}
- rej_msg->reason = reason;
+ rej_msg->reason = cpu_to_be16(reason);
if (ari && ari_length) {
cm_rej_set_reject_info_len(rej_msg, ari_length);
memcpy(rej_msg->ari, ari, ari_length);
@@ -1276,7 +1279,7 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = listen_cm_id_priv->id.context;
cm_id_priv->id.service_id = req_msg->service_id;
- cm_id_priv->id.service_mask = ~0ULL;
+ cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
@@ -1969,7 +1972,7 @@ static void cm_format_rej_event(struct cm_work *work)
param = &work->cm_event.param.rej_rcvd;
param->ari = rej_msg->ari;
param->ari_length = cm_rej_get_reject_info_len(rej_msg);
- param->reason = rej_msg->reason;
+ param->reason = __be16_to_cpu(rej_msg->reason);
work->cm_event.private_data = &rej_msg->private_data;
}
@@ -1978,20 +1981,20 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
unsigned long flags;
- u32 remote_id;
+ __be32 remote_id;
remote_id = rej_msg->local_comm_id;
- if (rej_msg->reason == IB_CM_REJ_TIMEOUT) {
+ if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
spin_lock_irqsave(&cm.lock, flags);
- timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari),
+ timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
remote_id);
if (!timewait_info) {
spin_unlock_irqrestore(&cm.lock, flags);
return NULL;
}
cm_id_priv = idr_find(&cm.local_id_table,
- (int) timewait_info->work.local_id);
+ (__force int) timewait_info->work.local_id);
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
@@ -2032,7 +2035,7 @@ static int cm_rej_handler(struct cm_work *work)
/* fall through */
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
- if (rej_msg->reason == IB_CM_REJ_STALE_CONN)
+ if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
cm_enter_timewait(cm_id_priv);
else
cm_reset_to_idle(cm_id_priv);
@@ -2553,7 +2556,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
sidr_req_msg->request_id = cm_id_priv->id.local_id;
- sidr_req_msg->pkey = param->pkey;
+ sidr_req_msg->pkey = cpu_to_be16(param->pkey);
sidr_req_msg->service_id = param->service_id;
if (param->private_data && param->private_data_len)
@@ -2580,7 +2583,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
goto out;
cm_id->service_id = param->service_id;
- cm_id->service_mask = ~0ULL;
+ cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries;
ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2621,7 +2624,7 @@ static void cm_format_sidr_req_event(struct cm_work *work,
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_req_rcvd;
- param->pkey = sidr_req_msg->pkey;
+ param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
param->listen_id = listen_id;
param->device = work->port->mad_agent->device;
param->port = work->port->port_num;
@@ -2645,7 +2648,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
wc = work->mad_recv_wc->wc;
- cm_id_priv->av.dgid.global.subnet_prefix = wc->slid;
+ cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
&cm_id_priv->av);
@@ -2673,7 +2676,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = cur_cm_id_priv->id.context;
cm_id_priv->id.service_id = sidr_req_msg->service_id;
- cm_id_priv->id.service_mask = ~0ULL;
+ cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
cm_process_work(cm_id_priv, work);
@@ -3175,10 +3178,10 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
}
EXPORT_SYMBOL(ib_cm_init_qp_attr);
-static u64 cm_get_ca_guid(struct ib_device *device)
+static __be64 cm_get_ca_guid(struct ib_device *device)
{
struct ib_device_attr *device_attr;
- u64 guid;
+ __be64 guid;
int ret;
device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 15a309a77b2..813ab70bf6d 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -34,7 +34,7 @@
#if !defined(CM_MSGS_H)
#define CM_MSGS_H
-#include <ib_mad.h>
+#include <rdma/ib_mad.h>
/*
* Parameters to routines below should be in network-byte order, and values
@@ -43,19 +43,17 @@
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
-enum cm_msg_attr_id {
- CM_REQ_ATTR_ID = __constant_htons(0x0010),
- CM_MRA_ATTR_ID = __constant_htons(0x0011),
- CM_REJ_ATTR_ID = __constant_htons(0x0012),
- CM_REP_ATTR_ID = __constant_htons(0x0013),
- CM_RTU_ATTR_ID = __constant_htons(0x0014),
- CM_DREQ_ATTR_ID = __constant_htons(0x0015),
- CM_DREP_ATTR_ID = __constant_htons(0x0016),
- CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017),
- CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018),
- CM_LAP_ATTR_ID = __constant_htons(0x0019),
- CM_APR_ATTR_ID = __constant_htons(0x001A)
-};
+#define CM_REQ_ATTR_ID __constant_htons(0x0010)
+#define CM_MRA_ATTR_ID __constant_htons(0x0011)
+#define CM_REJ_ATTR_ID __constant_htons(0x0012)
+#define CM_REP_ATTR_ID __constant_htons(0x0013)
+#define CM_RTU_ATTR_ID __constant_htons(0x0014)
+#define CM_DREQ_ATTR_ID __constant_htons(0x0015)
+#define CM_DREP_ATTR_ID __constant_htons(0x0016)
+#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
+#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
+#define CM_LAP_ATTR_ID __constant_htons(0x0019)
+#define CM_APR_ATTR_ID __constant_htons(0x001A)
enum cm_msg_sequence {
CM_MSG_SEQUENCE_REQ,
@@ -67,35 +65,35 @@ enum cm_msg_sequence {
struct cm_req_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 rsvd4;
- u64 service_id;
- u64 local_ca_guid;
- u32 rsvd24;
- u32 local_qkey;
+ __be32 local_comm_id;
+ __be32 rsvd4;
+ __be64 service_id;
+ __be64 local_ca_guid;
+ __be32 rsvd24;
+ __be32 local_qkey;
/* local QPN:24, responder resources:8 */
- u32 offset32;
+ __be32 offset32;
/* local EECN:24, initiator depth:8 */
- u32 offset36;
+ __be32 offset36;
/*
* remote EECN:24, remote CM response timeout:5,
* transport service type:2, end-to-end flow control:1
*/
- u32 offset40;
+ __be32 offset40;
/* starting PSN:24, local CM response timeout:5, retry count:3 */
- u32 offset44;
- u16 pkey;
+ __be32 offset44;
+ __be16 pkey;
/* path MTU:4, RDC exists:1, RNR retry count:3. */
u8 offset50;
/* max CM Retries:4, SRQ:1, rsvd:3 */
u8 offset51;
- u16 primary_local_lid;
- u16 primary_remote_lid;
+ __be16 primary_local_lid;
+ __be16 primary_remote_lid;
union ib_gid primary_local_gid;
union ib_gid primary_remote_gid;
/* flow label:20, rsvd:6, packet rate:6 */
- u32 primary_offset88;
+ __be32 primary_offset88;
u8 primary_traffic_class;
u8 primary_hop_limit;
/* SL:4, subnet local:1, rsvd:3 */
@@ -103,12 +101,12 @@ struct cm_req_msg {
/* local ACK timeout:5, rsvd:3 */
u8 primary_offset95;
- u16 alt_local_lid;
- u16 alt_remote_lid;
+ __be16 alt_local_lid;
+ __be16 alt_remote_lid;
union ib_gid alt_local_gid;
union ib_gid alt_remote_gid;
/* flow label:20, rsvd:6, packet rate:6 */
- u32 alt_offset132;
+ __be32 alt_offset132;
u8 alt_traffic_class;
u8 alt_hop_limit;
/* SL:4, subnet local:1, rsvd:3 */
@@ -120,12 +118,12 @@ struct cm_req_msg {
} __attribute__ ((packed));
-static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
+static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
{
return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
}
-static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn)
+static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
{
req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(req_msg->offset32) &
@@ -208,13 +206,13 @@ static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
0xFFFFFFFE));
}
-static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
+static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
{
return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
}
static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
- u32 starting_psn)
+ __be32 starting_psn)
{
req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
(be32_to_cpu(req_msg->offset44) & 0x000000FF));
@@ -288,13 +286,13 @@ static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
((srq & 0x1) << 3));
}
-static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
+static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
{
- return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12));
+ return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
}
static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
- u32 flow_label)
+ __be32 flow_label)
{
req_msg->primary_offset88 = cpu_to_be32(
(be32_to_cpu(req_msg->primary_offset88) &
@@ -350,13 +348,13 @@ static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_m
(local_ack_timeout << 3));
}
-static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
+static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
{
- return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12));
+ return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
}
static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
- u32 flow_label)
+ __be32 flow_label)
{
req_msg->alt_offset132 = cpu_to_be32(
(be32_to_cpu(req_msg->alt_offset132) &
@@ -422,8 +420,8 @@ enum cm_msg_response {
struct cm_mra_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 remote_comm_id;
+ __be32 local_comm_id;
+ __be32 remote_comm_id;
/* message MRAed:2, rsvd:6 */
u8 offset8;
/* service timeout:5, rsvd:3 */
@@ -458,13 +456,13 @@ static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
struct cm_rej_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 remote_comm_id;
+ __be32 local_comm_id;
+ __be32 remote_comm_id;
/* message REJected:2, rsvd:6 */
u8 offset8;
/* reject info length:7, rsvd:1. */
u8 offset9;
- u16 reason;
+ __be16 reason;
u8 ari[IB_CM_REJ_ARI_LENGTH];
u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
@@ -495,45 +493,45 @@ static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
struct cm_rep_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 remote_comm_id;
- u32 local_qkey;
+ __be32 local_comm_id;
+ __be32 remote_comm_id;
+ __be32 local_qkey;
/* local QPN:24, rsvd:8 */
- u32 offset12;
+ __be32 offset12;
/* local EECN:24, rsvd:8 */
- u32 offset16;
+ __be32 offset16;
/* starting PSN:24 rsvd:8 */
- u32 offset20;
+ __be32 offset20;
u8 resp_resources;
u8 initiator_depth;
/* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
u8 offset26;
/* RNR retry count:3, SRQ:1, rsvd:5 */
u8 offset27;
- u64 local_ca_guid;
+ __be64 local_ca_guid;
u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
-static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
+static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
{
return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
}
-static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn)
+static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
{
rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(rep_msg->offset12) & 0x000000FF));
}
-static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
+static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
{
return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
}
static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
- u32 starting_psn)
+ __be32 starting_psn)
{
rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
(be32_to_cpu(rep_msg->offset20) & 0x000000FF));
@@ -600,8 +598,8 @@ static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
struct cm_rtu_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 remote_comm_id;
+ __be32 local_comm_id;
+ __be32 remote_comm_id;
u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
@@ -610,21 +608,21 @@ struct cm_rtu_msg {
struct cm_dreq_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 remote_comm_id;
+ __be32 local_comm_id;
+ __be32 remote_comm_id;
/* remote QPN/EECN:24, rsvd:8 */
- u32 offset8;
+ __be32 offset8;
u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
-static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
+static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
{
return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
}
-static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
+static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
{
dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
@@ -633,8 +631,8 @@ static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
struct cm_drep_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 remote_comm_id;
+ __be32 local_comm_id;
+ __be32 remote_comm_id;
u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
@@ -643,37 +641,37 @@ struct cm_drep_msg {
struct cm_lap_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 remote_comm_id;
+ __be32 local_comm_id;
+ __be32 remote_comm_id;
- u32 rsvd8;
+ __be32 rsvd8;
/* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
- u32 offset12;
- u32 rsvd16;
+ __be32 offset12;
+ __be32 rsvd16;
- u16 alt_local_lid;
- u16 alt_remote_lid;
+ __be16 alt_local_lid;
+ __be16 alt_remote_lid;
union ib_gid alt_local_gid;
union ib_gid alt_remote_gid;
/* flow label:20, rsvd:4, traffic class:8 */
- u32 offset56;
+ __be32 offset56;
u8 alt_hop_limit;
/* rsvd:2, packet rate:6 */
- uint8_t offset61;
+ u8 offset61;
/* SL:4, subnet local:1, rsvd:3 */
- uint8_t offset62;
+ u8 offset62;
/* local ACK timeout:5, rsvd:3 */
- uint8_t offset63;
+ u8 offset63;
u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
-static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
+static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
{
return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
}
-static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn)
+static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
{
lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(lap_msg->offset12) &
@@ -693,17 +691,17 @@ static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
0xFFFFFF07));
}
-static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
+static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
{
- return be32_to_cpu(lap_msg->offset56) >> 12;
+ return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
}
static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
- u32 flow_label)
+ __be32 flow_label)
{
- lap_msg->offset56 = cpu_to_be32((flow_label << 12) |
- (be32_to_cpu(lap_msg->offset56) &
- 0x00000FFF));
+ lap_msg->offset56 = cpu_to_be32(
+ (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
+ (be32_to_cpu(flow_label) << 12));
}
static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
@@ -766,8 +764,8 @@ static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
struct cm_apr_msg {
struct ib_mad_hdr hdr;
- u32 local_comm_id;
- u32 remote_comm_id;
+ __be32 local_comm_id;
+ __be32 remote_comm_id;
u8 info_length;
u8 ap_status;
@@ -779,10 +777,10 @@ struct cm_apr_msg {
struct cm_sidr_req_msg {
struct ib_mad_hdr hdr;
- u32 request_id;
- u16 pkey;
- u16 rsvd;
- u64 service_id;
+ __be32 request_id;
+ __be16 pkey;
+ __be16 rsvd;
+ __be64 service_id;
u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
@@ -790,26 +788,26 @@ struct cm_sidr_req_msg {
struct cm_sidr_rep_msg {
struct ib_mad_hdr hdr;
- u32 request_id;
+ __be32 request_id;
u8 status;
u8 info_length;
- u16 rsvd;
+ __be16 rsvd;
/* QPN:24, rsvd:8 */
- u32 offset8;
- u64 service_id;
- u32 qkey;
+ __be32 offset8;
+ __be64 service_id;
+ __be32 qkey;
u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
-static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
+static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
{
return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
}
static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
- u32 qpn)
+ __be32 qpn)
{
sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
(be32_to_cpu(sidr_rep_msg->offset8) &
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 797049626ff..7ad47a4b166 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,7 +38,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <ib_verbs.h>
+#include <rdma/ib_verbs.h>
int ib_device_register_sysfs(struct ib_device *device);
void ib_device_unregister_sysfs(struct ib_device *device);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9197e92d708..d3cf84e0158 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 7763b31abba..d34a6f1c4f4 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -39,7 +39,7 @@
#include <linux/jhash.h>
#include <linux/kthread.h>
-#include <ib_fmr_pool.h>
+#include <rdma/ib_fmr_pool.h>
#include "core_priv.h"
@@ -334,6 +334,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
{
struct ib_pool_fmr *fmr;
struct ib_pool_fmr *tmp;
+ LIST_HEAD(fmr_list);
int i;
kthread_stop(pool->thread);
@@ -341,6 +342,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
i = 0;
list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
+ if (fmr->remap_count) {
+ INIT_LIST_HEAD(&fmr_list);
+ list_add_tail(&fmr->fmr->list, &fmr_list);
+ ib_unmap_fmr(&fmr_list);
+ }
ib_dealloc_fmr(fmr->fmr);
list_del(&fmr->list);
kfree(fmr);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b97e210ce9c..a4a4d9c1eef 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -693,7 +693,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
goto out;
}
- build_smp_wc(send_wr->wr_id, smp->dr_slid, send_wr->wr.ud.pkey_index,
+ build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
+ send_wr->wr.ud.pkey_index,
send_wr->wr.ud.port_num, &mad_wc);
/* No GRH for DR SMP */
@@ -1554,7 +1555,7 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
}
struct ib_mad_send_wr_private*
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid)
+ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
{
struct ib_mad_send_wr_private *mad_send_wr;
@@ -1597,7 +1598,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_send_wr_private *mad_send_wr;
struct ib_mad_send_wc mad_send_wc;
unsigned long flags;
- u64 tid;
+ __be64 tid;
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -2165,7 +2166,8 @@ static void local_completions(void *data)
* Defined behavior is to complete response
* before request
*/
- build_smp_wc(local->wr_id, IB_LID_PERMISSIVE,
+ build_smp_wc(local->wr_id,
+ be16_to_cpu(IB_LID_PERMISSIVE),
0 /* pkey index */,
recv_mad_agent->agent.port_num, &wc);
@@ -2294,7 +2296,7 @@ static void timeout_sends(void *data)
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
}
-static void ib_mad_thread_completion_handler(struct ib_cq *cq)
+static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
{
struct ib_mad_port_private *port_priv = cq->cq_context;
@@ -2574,8 +2576,7 @@ static int ib_mad_port_open(struct ib_device *device,
cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
port_priv->cq = ib_create_cq(port_priv->device,
- (ib_comp_handler)
- ib_mad_thread_completion_handler,
+ ib_mad_thread_completion_handler,
NULL, port_priv, cq_size);
if (IS_ERR(port_priv->cq)) {
printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 568da10b05a..f1ba794e0da 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -40,8 +40,8 @@
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
-#include <ib_mad.h>
-#include <ib_smi.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
#define PFX "ib_mad: "
@@ -121,7 +121,7 @@ struct ib_mad_send_wr_private {
struct ib_send_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
u64 wr_id; /* client WR ID */
- u64 tid;
+ __be64 tid;
unsigned long timeout;
int retries;
int retry;
@@ -144,7 +144,7 @@ struct ib_mad_local_private {
struct ib_send_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
u64 wr_id; /* client WR ID */
- u64 tid;
+ __be64 tid;
};
struct ib_mad_mgmt_method_table {
@@ -210,7 +210,7 @@ extern kmem_cache_t *ib_mad_cache;
int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
struct ib_mad_send_wr_private *
-ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid);
+ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid);
void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 8f1eb80e421..43fd805e026 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -61,7 +61,7 @@ struct mad_rmpp_recv {
int seg_num;
int newwin;
- u64 tid;
+ __be64 tid;
u32 src_qp;
u16 slid;
u8 mgmt_class;
@@ -100,6 +100,121 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
}
}
+static int data_offset(u8 mgmt_class)
+{
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
+ return offsetof(struct ib_sa_mad, data);
+ else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
+ (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
+ return offsetof(struct ib_vendor_mad, data);
+ else
+ return offsetof(struct ib_rmpp_mad, data);
+}
+
+static void format_ack(struct ib_rmpp_mad *ack,
+ struct ib_rmpp_mad *data,
+ struct mad_rmpp_recv *rmpp_recv)
+{
+ unsigned long flags;
+
+ memcpy(&ack->mad_hdr, &data->mad_hdr,
+ data_offset(data->mad_hdr.mgmt_class));
+
+ ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
+ ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
+ ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
+
+ spin_lock_irqsave(&rmpp_recv->lock, flags);
+ rmpp_recv->last_ack = rmpp_recv->seg_num;
+ ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
+ ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
+ spin_unlock_irqrestore(&rmpp_recv->lock, flags);
+}
+
+static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
+ struct ib_mad_recv_wc *recv_wc)
+{
+ struct ib_mad_send_buf *msg;
+ struct ib_send_wr *bad_send_wr;
+ int hdr_len, ret;
+
+ hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
+ msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
+ recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
+ hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
+ GFP_KERNEL);
+ if (!msg)
+ return;
+
+ format_ack((struct ib_rmpp_mad *) msg->mad,
+ (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
+ ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
+ &bad_send_wr);
+ if (ret)
+ ib_free_send_mad(msg);
+}
+
+static int alloc_response_msg(struct ib_mad_agent *agent,
+ struct ib_mad_recv_wc *recv_wc,
+ struct ib_mad_send_buf **msg)
+{
+ struct ib_mad_send_buf *m;
+ struct ib_ah *ah;
+ int hdr_len;
+
+ ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
+ recv_wc->recv_buf.grh, agent->port_num);
+ if (IS_ERR(ah))
+ return PTR_ERR(ah);
+
+ hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
+ m = ib_create_send_mad(agent, recv_wc->wc->src_qp,
+ recv_wc->wc->pkey_index, ah, 1, hdr_len,
+ sizeof(struct ib_rmpp_mad) - hdr_len,
+ GFP_KERNEL);
+ if (IS_ERR(m)) {
+ ib_destroy_ah(ah);
+ return PTR_ERR(m);
+ }
+ *msg = m;
+ return 0;
+}
+
+static void free_msg(struct ib_mad_send_buf *msg)
+{
+ ib_destroy_ah(msg->send_wr.wr.ud.ah);
+ ib_free_send_mad(msg);
+}
+
+static void nack_recv(struct ib_mad_agent_private *agent,
+ struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
+{
+ struct ib_mad_send_buf *msg;
+ struct ib_rmpp_mad *rmpp_mad;
+ struct ib_send_wr *bad_send_wr;
+ int ret;
+
+ ret = alloc_response_msg(&agent->agent, recv_wc, &msg);
+ if (ret)
+ return;
+
+ rmpp_mad = (struct ib_rmpp_mad *) msg->mad;
+ memcpy(rmpp_mad, recv_wc->recv_buf.mad,
+ data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
+
+ rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
+ rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
+ rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
+ ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
+ rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
+ rmpp_mad->rmpp_hdr.seg_num = 0;
+ rmpp_mad->rmpp_hdr.paylen_newwin = 0;
+
+ ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr);
+ if (ret)
+ free_msg(msg);
+}
+
static void recv_timeout_handler(void *data)
{
struct mad_rmpp_recv *rmpp_recv = data;
@@ -115,8 +230,8 @@ static void recv_timeout_handler(void *data)
list_del(&rmpp_recv->list);
spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
- /* TODO: send abort. */
rmpp_wc = rmpp_recv->rmpp_wc;
+ nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
destroy_rmpp_recv(rmpp_recv);
ib_free_recv_mad(rmpp_wc);
}
@@ -230,60 +345,6 @@ insert_rmpp_recv(struct ib_mad_agent_private *agent,
return cur_rmpp_recv;
}
-static int data_offset(u8 mgmt_class)
-{
- if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
- return offsetof(struct ib_sa_mad, data);
- else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
- (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
- return offsetof(struct ib_vendor_mad, data);
- else
- return offsetof(struct ib_rmpp_mad, data);
-}
-
-static void format_ack(struct ib_rmpp_mad *ack,
- struct ib_rmpp_mad *data,
- struct mad_rmpp_recv *rmpp_recv)
-{
- unsigned long flags;
-
- memcpy(&ack->mad_hdr, &data->mad_hdr,
- data_offset(data->mad_hdr.mgmt_class));
-
- ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
- ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
- ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
-
- spin_lock_irqsave(&rmpp_recv->lock, flags);
- rmpp_recv->last_ack = rmpp_recv->seg_num;
- ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
- ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
- spin_unlock_irqrestore(&rmpp_recv->lock, flags);
-}
-
-static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
- struct ib_mad_recv_wc *recv_wc)
-{
- struct ib_mad_send_buf *msg;
- struct ib_send_wr *bad_send_wr;
- int hdr_len, ret;
-
- hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
- msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
- recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
- hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
- GFP_KERNEL);
- if (!msg)
- return;
-
- format_ack((struct ib_rmpp_mad *) msg->mad,
- (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
- ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
- &bad_send_wr);
- if (ret)
- ib_free_send_mad(msg);
-}
-
static inline int get_last_flag(struct ib_mad_recv_buf *seg)
{
struct ib_rmpp_mad *rmpp_mad;
@@ -559,6 +620,34 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
return ib_send_mad(mad_send_wr);
}
+static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
+ u8 rmpp_status)
+{
+ struct ib_mad_send_wr_private *mad_send_wr;
+ struct ib_mad_send_wc wc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&agent->lock, flags);
+ mad_send_wr = ib_find_send_mad(agent, tid);
+ if (!mad_send_wr)
+ goto out; /* Unmatched send */
+
+ if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
+ (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
+ goto out; /* Send is already done */
+
+ ib_mark_mad_done(mad_send_wr);
+ spin_unlock_irqrestore(&agent->lock, flags);
+
+ wc.status = IB_WC_REM_ABORT_ERR;
+ wc.vendor_err = rmpp_status;
+ wc.wr_id = mad_send_wr->wr_id;
+ ib_mad_complete_send_wr(mad_send_wr, &wc);
+ return;
+out:
+ spin_unlock_irqrestore(&agent->lock, flags);
+}
+
static void process_rmpp_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
@@ -568,11 +657,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
int seg_num, newwin, ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
- if (rmpp_mad->rmpp_hdr.rmpp_status)
+ if (rmpp_mad->rmpp_hdr.rmpp_status) {
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ IB_MGMT_RMPP_STATUS_BAD_STATUS);
+ nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
return;
+ }
seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
+ if (newwin < seg_num) {
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ IB_MGMT_RMPP_STATUS_W2S);
+ nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
+ return;
+ }
spin_lock_irqsave(&agent->lock, flags);
mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
@@ -583,8 +682,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
(!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
goto out; /* Send is already done */
- if (seg_num > mad_send_wr->total_seg)
- goto out; /* Bad ACK */
+ if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) {
+ spin_unlock_irqrestore(&agent->lock, flags);
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ IB_MGMT_RMPP_STATUS_S2B);
+ nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
+ return;
+ }
if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
goto out; /* Old ACK */
@@ -628,6 +732,72 @@ out:
spin_unlock_irqrestore(&agent->lock, flags);
}
+static struct ib_mad_recv_wc *
+process_rmpp_data(struct ib_mad_agent_private *agent,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ struct ib_rmpp_hdr *rmpp_hdr;
+ u8 rmpp_status;
+
+ rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
+
+ if (rmpp_hdr->rmpp_status) {
+ rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
+ goto bad;
+ }
+
+ if (rmpp_hdr->seg_num == __constant_htonl(1)) {
+ if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
+ rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
+ goto bad;
+ }
+ return start_rmpp(agent, mad_recv_wc);
+ } else {
+ if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
+ rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
+ goto bad;
+ }
+ return continue_rmpp(agent, mad_recv_wc);
+ }
+bad:
+ nack_recv(agent, mad_recv_wc, rmpp_status);
+ ib_free_recv_mad(mad_recv_wc);
+ return NULL;
+}
+
+static void process_rmpp_stop(struct ib_mad_agent_private *agent,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ struct ib_rmpp_mad *rmpp_mad;
+
+ rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
+
+ if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ IB_MGMT_RMPP_STATUS_BAD_STATUS);
+ nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
+ } else
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ rmpp_mad->rmpp_hdr.rmpp_status);
+}
+
+static void process_rmpp_abort(struct ib_mad_agent_private *agent,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ struct ib_rmpp_mad *rmpp_mad;
+
+ rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
+
+ if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
+ rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ IB_MGMT_RMPP_STATUS_BAD_STATUS);
+ nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
+ } else
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ rmpp_mad->rmpp_hdr.rmpp_status);
+}
+
struct ib_mad_recv_wc *
ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
@@ -638,23 +808,29 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
return mad_recv_wc;
- if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION)
+ if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ IB_MGMT_RMPP_STATUS_UNV);
+ nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
goto out;
+ }
switch (rmpp_mad->rmpp_hdr.rmpp_type) {
case IB_MGMT_RMPP_TYPE_DATA:
- if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1))
- return start_rmpp(agent, mad_recv_wc);
- else
- return continue_rmpp(agent, mad_recv_wc);
+ return process_rmpp_data(agent, mad_recv_wc);
case IB_MGMT_RMPP_TYPE_ACK:
process_rmpp_ack(agent, mad_recv_wc);
break;
case IB_MGMT_RMPP_TYPE_STOP:
+ process_rmpp_stop(agent, mad_recv_wc);
+ break;
case IB_MGMT_RMPP_TYPE_ABORT:
- /* TODO: process_rmpp_nack(agent, mad_recv_wc); */
+ process_rmpp_abort(agent, mad_recv_wc);
break;
default:
+ abort_send(agent, rmpp_mad->mad_hdr.tid,
+ IB_MGMT_RMPP_STATUS_BADT);
+ nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
break;
}
out:
@@ -714,7 +890,10 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
msg = (struct ib_mad_send_buf *) (unsigned long)
mad_send_wc->wr_id;
- ib_free_send_mad(msg);
+ if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
+ ib_free_send_mad(msg);
+ else
+ free_msg(msg);
return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
}
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index eb5ff54c10d..35df5010e72 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -32,7 +33,7 @@
* $Id: packer.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <ib_pack.h>
+#include <rdma/ib_pack.h>
static u64 value_read(int offset, int size, void *structure)
{
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 795184931c8..126ac80db7b 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -44,8 +44,8 @@
#include <linux/kref.h>
#include <linux/idr.h>
-#include <ib_pack.h>
-#include <ib_sa.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_sa.h>
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand subnet administration query support");
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index b4b284324a3..35852e794e2 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -1,9 +1,10 @@
/*
- * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
- * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
- * Copyright (c) 2004 Intel Corporation. All rights reserved.
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -36,7 +37,7 @@
* $Id: smi.c 1389 2004-12-27 22:56:47Z roland $
*/
-#include <ib_smi.h>
+#include <rdma/ib_smi.h>
#include "smi.h"
/*
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 90d51b179ab..fae1c2dcee5 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1,5 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,7 @@
#include "core_priv.h"
-#include <ib_mad.h>
+#include <rdma/ib_mad.h>
struct ib_port {
struct kobject kobj;
@@ -253,14 +255,14 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
return ret;
return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((u16 *) gid.raw)[0]),
- be16_to_cpu(((u16 *) gid.raw)[1]),
- be16_to_cpu(((u16 *) gid.raw)[2]),
- be16_to_cpu(((u16 *) gid.raw)[3]),
- be16_to_cpu(((u16 *) gid.raw)[4]),
- be16_to_cpu(((u16 *) gid.raw)[5]),
- be16_to_cpu(((u16 *) gid.raw)[6]),
- be16_to_cpu(((u16 *) gid.raw)[7]));
+ be16_to_cpu(((__be16 *) gid.raw)[0]),
+ be16_to_cpu(((__be16 *) gid.raw)[1]),
+ be16_to_cpu(((__be16 *) gid.raw)[2]),
+ be16_to_cpu(((__be16 *) gid.raw)[3]),
+ be16_to_cpu(((__be16 *) gid.raw)[4]),
+ be16_to_cpu(((__be16 *) gid.raw)[5]),
+ be16_to_cpu(((__be16 *) gid.raw)[6]),
+ be16_to_cpu(((__be16 *) gid.raw)[7]));
}
static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
@@ -332,11 +334,11 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
break;
case 16:
ret = sprintf(buf, "%u\n",
- be16_to_cpup((u16 *)(out_mad->data + 40 + offset / 8)));
+ be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8)));
break;
case 32:
ret = sprintf(buf, "%u\n",
- be32_to_cpup((u32 *)(out_mad->data + 40 + offset / 8)));
+ be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8)));
break;
default:
ret = 0;
@@ -598,10 +600,10 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
return ret;
return sprintf(buf, "%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((u16 *) &attr.sys_image_guid)[0]),
- be16_to_cpu(((u16 *) &attr.sys_image_guid)[1]),
- be16_to_cpu(((u16 *) &attr.sys_image_guid)[2]),
- be16_to_cpu(((u16 *) &attr.sys_image_guid)[3]));
+ be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
+ be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
+ be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
+ be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
}
static ssize_t show_node_guid(struct class_device *cdev, char *buf)
@@ -615,10 +617,10 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf)
return ret;
return sprintf(buf, "%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((u16 *) &attr.node_guid)[0]),
- be16_to_cpu(((u16 *) &attr.node_guid)[1]),
- be16_to_cpu(((u16 *) &attr.node_guid)[2]),
- be16_to_cpu(((u16 *) &attr.node_guid)[3]));
+ be16_to_cpu(((__be16 *) &attr.node_guid)[0]),
+ be16_to_cpu(((__be16 *) &attr.node_guid)[1]),
+ be16_to_cpu(((__be16 *) &attr.node_guid)[2]),
+ be16_to_cpu(((__be16 *) &attr.node_guid)[3]));
}
static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 61d07c732f4..79595826ccc 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -73,14 +74,18 @@ static struct semaphore ctx_id_mutex;
static struct idr ctx_id_table;
static int ctx_id_rover = 0;
-static struct ib_ucm_context *ib_ucm_ctx_get(int id)
+static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
{
struct ib_ucm_context *ctx;
down(&ctx_id_mutex);
ctx = idr_find(&ctx_id_table, id);
- if (ctx)
- ctx->ref++;
+ if (!ctx)
+ ctx = ERR_PTR(-ENOENT);
+ else if (ctx->file != file)
+ ctx = ERR_PTR(-EINVAL);
+ else
+ atomic_inc(&ctx->ref);
up(&ctx_id_mutex);
return ctx;
@@ -88,21 +93,37 @@ static struct ib_ucm_context *ib_ucm_ctx_get(int id)
static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
{
+ if (atomic_dec_and_test(&ctx->ref))
+ wake_up(&ctx->wait);
+}
+
+static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id)
+{
+ struct ib_ucm_context *ctx;
struct ib_ucm_event *uevent;
down(&ctx_id_mutex);
-
- ctx->ref--;
- if (!ctx->ref)
+ ctx = idr_find(&ctx_id_table, id);
+ if (!ctx)
+ ctx = ERR_PTR(-ENOENT);
+ else if (ctx->file != file)
+ ctx = ERR_PTR(-EINVAL);
+ else
idr_remove(&ctx_id_table, ctx->id);
-
up(&ctx_id_mutex);
- if (ctx->ref)
- return;
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- down(&ctx->file->mutex);
+ atomic_dec(&ctx->ref);
+ wait_event(ctx->wait, !atomic_read(&ctx->ref));
+
+ /* No new events will be generated after destroying the cm_id. */
+ if (!IS_ERR(ctx->cm_id))
+ ib_destroy_cm_id(ctx->cm_id);
+ /* Cleanup events not yet reported to the user. */
+ down(&file->mutex);
list_del(&ctx->file_list);
while (!list_empty(&ctx->events)) {
@@ -117,13 +138,10 @@ static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
kfree(uevent);
}
+ up(&file->mutex);
- up(&ctx->file->mutex);
-
- ucm_dbg("Destroyed CM ID <%d>\n", ctx->id);
-
- ib_destroy_cm_id(ctx->cm_id);
kfree(ctx);
+ return 0;
}
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
@@ -135,11 +153,11 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
if (!ctx)
return NULL;
- ctx->ref = 1; /* user reference */
+ atomic_set(&ctx->ref, 1);
+ init_waitqueue_head(&ctx->wait);
ctx->file = file;
INIT_LIST_HEAD(&ctx->events);
- init_MUTEX(&ctx->mutex);
list_add_tail(&ctx->file_list, &file->ctxs);
@@ -177,8 +195,8 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
if (!kpath || !upath)
return;
- memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid));
- memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid));
+ memcpy(upath->dgid, kpath->dgid.raw, sizeof *upath->dgid);
+ memcpy(upath->sgid, kpath->sgid.raw, sizeof *upath->sgid);
upath->dlid = kpath->dlid;
upath->slid = kpath->slid;
@@ -201,10 +219,11 @@ static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
kpath->packet_life_time_selector;
}
-static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
+static void ib_ucm_event_req_get(struct ib_ucm_context *ctx,
+ struct ib_ucm_req_event_resp *ureq,
struct ib_cm_req_event_param *kreq)
{
- ureq->listen_id = (long)kreq->listen_id->context;
+ ureq->listen_id = ctx->id;
ureq->remote_ca_guid = kreq->remote_ca_guid;
ureq->remote_qkey = kreq->remote_qkey;
@@ -240,34 +259,11 @@ static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
urep->srq = krep->srq;
}
-static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej,
- struct ib_cm_rej_event_param *krej)
-{
- urej->reason = krej->reason;
-}
-
-static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra,
- struct ib_cm_mra_event_param *kmra)
-{
- umra->timeout = kmra->service_timeout;
-}
-
-static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap,
- struct ib_cm_lap_event_param *klap)
-{
- ib_ucm_event_path_get(&ulap->path, klap->alternate_path);
-}
-
-static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr,
- struct ib_cm_apr_event_param *kapr)
-{
- uapr->status = kapr->ap_status;
-}
-
-static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq,
+static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx,
+ struct ib_ucm_sidr_req_event_resp *ureq,
struct ib_cm_sidr_req_event_param *kreq)
{
- ureq->listen_id = (long)kreq->listen_id->context;
+ ureq->listen_id = ctx->id;
ureq->pkey = kreq->pkey;
}
@@ -279,19 +275,18 @@ static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
urep->qpn = krep->qpn;
};
-static int ib_ucm_event_process(struct ib_cm_event *evt,
+static int ib_ucm_event_process(struct ib_ucm_context *ctx,
+ struct ib_cm_event *evt,
struct ib_ucm_event *uvt)
{
void *info = NULL;
- int result;
switch (evt->event) {
case IB_CM_REQ_RECEIVED:
- ib_ucm_event_req_get(&uvt->resp.u.req_resp,
+ ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp,
&evt->param.req_rcvd);
uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
- uvt->resp.present |= (evt->param.req_rcvd.primary_path ?
- IB_UCM_PRES_PRIMARY : 0);
+ uvt->resp.present = IB_UCM_PRES_PRIMARY;
uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
IB_UCM_PRES_ALTERNATE : 0);
break;
@@ -299,57 +294,46 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
&evt->param.rep_rcvd);
uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
-
break;
case IB_CM_RTU_RECEIVED:
uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
uvt->resp.u.send_status = evt->param.send_status;
-
break;
case IB_CM_DREQ_RECEIVED:
uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
uvt->resp.u.send_status = evt->param.send_status;
-
break;
case IB_CM_DREP_RECEIVED:
uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
uvt->resp.u.send_status = evt->param.send_status;
-
break;
case IB_CM_MRA_RECEIVED:
- ib_ucm_event_mra_get(&uvt->resp.u.mra_resp,
- &evt->param.mra_rcvd);
+ uvt->resp.u.mra_resp.timeout =
+ evt->param.mra_rcvd.service_timeout;
uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
-
break;
case IB_CM_REJ_RECEIVED:
- ib_ucm_event_rej_get(&uvt->resp.u.rej_resp,
- &evt->param.rej_rcvd);
+ uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason;
uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
uvt->info_len = evt->param.rej_rcvd.ari_length;
info = evt->param.rej_rcvd.ari;
-
break;
case IB_CM_LAP_RECEIVED:
- ib_ucm_event_lap_get(&uvt->resp.u.lap_resp,
- &evt->param.lap_rcvd);
+ ib_ucm_event_path_get(&uvt->resp.u.lap_resp.path,
+ evt->param.lap_rcvd.alternate_path);
uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
- uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ?
- IB_UCM_PRES_ALTERNATE : 0);
+ uvt->resp.present = IB_UCM_PRES_ALTERNATE;
break;
case IB_CM_APR_RECEIVED:
- ib_ucm_event_apr_get(&uvt->resp.u.apr_resp,
- &evt->param.apr_rcvd);
+ uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status;
uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
uvt->info_len = evt->param.apr_rcvd.info_len;
info = evt->param.apr_rcvd.apr_info;
-
break;
case IB_CM_SIDR_REQ_RECEIVED:
- ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp,
+ ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp,
&evt->param.sidr_req_rcvd);
uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
-
break;
case IB_CM_SIDR_REP_RECEIVED:
ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
@@ -357,43 +341,35 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
info = evt->param.sidr_rep_rcvd.info;
-
break;
default:
uvt->resp.u.send_status = evt->param.send_status;
-
break;
}
- if (uvt->data_len && evt->private_data) {
-
+ if (uvt->data_len) {
uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
- if (!uvt->data) {
- result = -ENOMEM;
- goto error;
- }
+ if (!uvt->data)
+ goto err1;
memcpy(uvt->data, evt->private_data, uvt->data_len);
uvt->resp.present |= IB_UCM_PRES_DATA;
}
- if (uvt->info_len && info) {
-
+ if (uvt->info_len) {
uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
- if (!uvt->info) {
- result = -ENOMEM;
- goto error;
- }
+ if (!uvt->info)
+ goto err2;
memcpy(uvt->info, info, uvt->info_len);
uvt->resp.present |= IB_UCM_PRES_INFO;
}
-
return 0;
-error:
- kfree(uvt->info);
+
+err2:
kfree(uvt->data);
- return result;
+err1:
+ return -ENOMEM;
}
static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
@@ -403,63 +379,42 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
struct ib_ucm_context *ctx;
int result = 0;
int id;
- /*
- * lookup correct context based on event type.
- */
- switch (event->event) {
- case IB_CM_REQ_RECEIVED:
- id = (long)event->param.req_rcvd.listen_id->context;
- break;
- case IB_CM_SIDR_REQ_RECEIVED:
- id = (long)event->param.sidr_req_rcvd.listen_id->context;
- break;
- default:
- id = (long)cm_id->context;
- break;
- }
- ucm_dbg("Event. CM ID <%d> event <%d>\n", id, event->event);
-
- ctx = ib_ucm_ctx_get(id);
- if (!ctx)
- return -ENOENT;
+ ctx = cm_id->context;
if (event->event == IB_CM_REQ_RECEIVED ||
event->event == IB_CM_SIDR_REQ_RECEIVED)
id = IB_UCM_CM_ID_INVALID;
+ else
+ id = ctx->id;
uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
- if (!uevent) {
- result = -ENOMEM;
- goto done;
- }
+ if (!uevent)
+ goto err1;
memset(uevent, 0, sizeof(*uevent));
-
uevent->resp.id = id;
uevent->resp.event = event->event;
- result = ib_ucm_event_process(event, uevent);
+ result = ib_ucm_event_process(ctx, event, uevent);
if (result)
- goto done;
+ goto err2;
uevent->ctx = ctx;
- uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED ||
- event->event == IB_CM_SIDR_REQ_RECEIVED ) ?
- cm_id : NULL);
+ uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL;
down(&ctx->file->mutex);
-
list_add_tail(&uevent->file_list, &ctx->file->events);
list_add_tail(&uevent->ctx_list, &ctx->events);
-
wake_up_interruptible(&ctx->file->poll_wait);
-
up(&ctx->file->mutex);
-done:
- ctx->error = result;
- ib_ucm_ctx_put(ctx); /* func reference */
- return result;
+ return 0;
+
+err2:
+ kfree(uevent);
+err1:
+ /* Destroy new cm_id's */
+ return (id == IB_UCM_CM_ID_INVALID);
}
static ssize_t ib_ucm_event(struct ib_ucm_file *file,
@@ -517,9 +472,8 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
goto done;
}
- ctx->cm_id = uevent->cm_id;
- ctx->cm_id->cm_handler = ib_ucm_event_handler;
- ctx->cm_id->context = (void *)(unsigned long)ctx->id;
+ ctx->cm_id = uevent->cm_id;
+ ctx->cm_id->context = ctx;
uevent->resp.id = ctx->id;
@@ -585,30 +539,29 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ down(&file->mutex);
ctx = ib_ucm_ctx_alloc(file);
+ up(&file->mutex);
if (!ctx)
return -ENOMEM;
- ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler,
- (void *)(unsigned long)ctx->id);
- if (!ctx->cm_id) {
- result = -ENOMEM;
- goto err_cm;
+ ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx);
+ if (IS_ERR(ctx->cm_id)) {
+ result = PTR_ERR(ctx->cm_id);
+ goto err;
}
resp.id = ctx->id;
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&resp, sizeof(resp))) {
result = -EFAULT;
- goto err_ret;
+ goto err;
}
return 0;
-err_ret:
- ib_destroy_cm_id(ctx->cm_id);
-err_cm:
- ib_ucm_ctx_put(ctx); /* user reference */
+err:
+ ib_ucm_destroy_ctx(file, ctx->id);
return result;
}
@@ -617,19 +570,11 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
int in_len, int out_len)
{
struct ib_ucm_destroy_id cmd;
- struct ib_ucm_context *ctx;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx)
- return -ENOENT;
-
- ib_ucm_ctx_put(ctx); /* user reference */
- ib_ucm_ctx_put(ctx); /* func reference */
-
- return 0;
+ return ib_ucm_destroy_ctx(file, cmd.id);
}
static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
@@ -647,15 +592,9 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx)
- return -ENOENT;
-
- down(&ctx->file->mutex);
- if (ctx->file != file) {
- result = -EINVAL;
- goto done;
- }
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
resp.service_id = ctx->cm_id->service_id;
resp.service_mask = ctx->cm_id->service_mask;
@@ -666,9 +605,7 @@ static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
&resp, sizeof(resp)))
result = -EFAULT;
-done:
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
+ ib_ucm_ctx_put(ctx);
return result;
}
@@ -683,19 +620,12 @@ static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx)
- return -ENOENT;
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
- result = ib_cm_listen(ctx->cm_id, cmd.service_id,
- cmd.service_mask);
-
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
+ result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask);
+ ib_ucm_ctx_put(ctx);
return result;
}
@@ -710,18 +640,12 @@ static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx)
- return -ENOENT;
-
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
- result = ib_cm_establish(ctx->cm_id);
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
+ result = ib_cm_establish(ctx->cm_id);
+ ib_ucm_ctx_put(ctx);
return result;
}
@@ -768,8 +692,8 @@ static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
return -EFAULT;
}
- memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid));
- memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid));
+ memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof sa_path->dgid);
+ memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof sa_path->sgid);
sa_path->dlid = ucm_path.dlid;
sa_path->slid = ucm_path.slid;
@@ -839,25 +763,17 @@ static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
param.max_cm_retries = cmd.max_cm_retries;
param.srq = cmd.srq;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx) {
- result = -ENOENT;
- goto done;
- }
-
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (!IS_ERR(ctx)) {
result = ib_send_cm_req(ctx->cm_id, &param);
+ ib_ucm_ctx_put(ctx);
+ } else
+ result = PTR_ERR(ctx);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
done:
kfree(param.private_data);
kfree(param.primary_path);
kfree(param.alternate_path);
-
return result;
}
@@ -890,23 +806,14 @@ static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
param.rnr_retry_count = cmd.rnr_retry_count;
param.srq = cmd.srq;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx) {
- result = -ENOENT;
- goto done;
- }
-
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (!IS_ERR(ctx)) {
result = ib_send_cm_rep(ctx->cm_id, &param);
+ ib_ucm_ctx_put(ctx);
+ } else
+ result = PTR_ERR(ctx);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
-done:
kfree(param.private_data);
-
return result;
}
@@ -928,23 +835,14 @@ static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
if (result)
return result;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx) {
- result = -ENOENT;
- goto done;
- }
-
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (!IS_ERR(ctx)) {
result = func(ctx->cm_id, private_data, cmd.len);
+ ib_ucm_ctx_put(ctx);
+ } else
+ result = PTR_ERR(ctx);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
-done:
kfree(private_data);
-
return result;
}
@@ -995,26 +893,17 @@ static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
if (result)
goto done;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx) {
- result = -ENOENT;
- goto done;
- }
-
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
- result = func(ctx->cm_id, cmd.status,
- info, cmd.info_len,
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (!IS_ERR(ctx)) {
+ result = func(ctx->cm_id, cmd.status, info, cmd.info_len,
data, cmd.data_len);
+ ib_ucm_ctx_put(ctx);
+ } else
+ result = PTR_ERR(ctx);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
done:
kfree(data);
kfree(info);
-
return result;
}
@@ -1048,24 +937,14 @@ static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
if (result)
return result;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx) {
- result = -ENOENT;
- goto done;
- }
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (!IS_ERR(ctx)) {
+ result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len);
+ ib_ucm_ctx_put(ctx);
+ } else
+ result = PTR_ERR(ctx);
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
- result = ib_send_cm_mra(ctx->cm_id, cmd.timeout,
- data, cmd.len);
-
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
-done:
kfree(data);
-
return result;
}
@@ -1090,24 +969,16 @@ static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
if (result)
goto done;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx) {
- result = -ENOENT;
- goto done;
- }
-
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (!IS_ERR(ctx)) {
result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
+ ib_ucm_ctx_put(ctx);
+ } else
+ result = PTR_ERR(ctx);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
done:
kfree(data);
kfree(path);
-
return result;
}
@@ -1140,24 +1011,16 @@ static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
param.max_cm_retries = cmd.max_cm_retries;
param.pkey = cmd.pkey;
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx) {
- result = -ENOENT;
- goto done;
- }
-
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (!IS_ERR(ctx)) {
result = ib_send_cm_sidr_req(ctx->cm_id, &param);
+ ib_ucm_ctx_put(ctx);
+ } else
+ result = PTR_ERR(ctx);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
done:
kfree(param.private_data);
kfree(param.path);
-
return result;
}
@@ -1184,30 +1047,22 @@ static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
if (result)
goto done;
- param.qp_num = cmd.qpn;
- param.qkey = cmd.qkey;
- param.status = cmd.status;
- param.info_length = cmd.info_len;
- param.private_data_len = cmd.data_len;
-
- ctx = ib_ucm_ctx_get(cmd.id);
- if (!ctx) {
- result = -ENOENT;
- goto done;
- }
+ param.qp_num = cmd.qpn;
+ param.qkey = cmd.qkey;
+ param.status = cmd.status;
+ param.info_length = cmd.info_len;
+ param.private_data_len = cmd.data_len;
- down(&ctx->file->mutex);
- if (ctx->file != file)
- result = -EINVAL;
- else
+ ctx = ib_ucm_ctx_get(file, cmd.id);
+ if (!IS_ERR(ctx)) {
result = ib_send_cm_sidr_rep(ctx->cm_id, &param);
+ ib_ucm_ctx_put(ctx);
+ } else
+ result = PTR_ERR(ctx);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* func reference */
done:
kfree(param.private_data);
kfree(param.info);
-
return result;
}
@@ -1305,22 +1160,17 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
struct ib_ucm_context *ctx;
down(&file->mutex);
-
while (!list_empty(&file->ctxs)) {
ctx = list_entry(file->ctxs.next,
struct ib_ucm_context, file_list);
- up(&ctx->file->mutex);
- ib_ucm_ctx_put(ctx); /* user reference */
+ up(&file->mutex);
+ ib_ucm_destroy_ctx(file, ctx->id);
down(&file->mutex);
}
-
up(&file->mutex);
-
kfree(file);
-
- ucm_dbg("Deleted struct\n");
return 0;
}
diff --git a/drivers/infiniband/core/ucm.h b/drivers/infiniband/core/ucm.h
index 6d36606151b..c8819b928a1 100644
--- a/drivers/infiniband/core/ucm.h
+++ b/drivers/infiniband/core/ucm.h
@@ -40,17 +40,15 @@
#include <linux/cdev.h>
#include <linux/idr.h>
-#include <ib_cm.h>
-#include <ib_user_cm.h>
+#include <rdma/ib_cm.h>
+#include <rdma/ib_user_cm.h>
#define IB_UCM_CM_ID_INVALID 0xffffffff
struct ib_ucm_file {
struct semaphore mutex;
struct file *filp;
- /*
- * list of pending events
- */
+
struct list_head ctxs; /* list of active connections */
struct list_head events; /* list of pending events */
wait_queue_head_t poll_wait;
@@ -58,12 +56,11 @@ struct ib_ucm_file {
struct ib_ucm_context {
int id;
- int ref;
- int error;
+ wait_queue_head_t wait;
+ atomic_t ref;
struct ib_ucm_file *file;
struct ib_cm_id *cm_id;
- struct semaphore mutex;
struct list_head events; /* list of pending events. */
struct list_head file_list; /* member in file ctx list */
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index dc4eb1db5e9..527b23450ab 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +35,7 @@
#include <linux/errno.h>
-#include <ib_pack.h>
+#include <rdma/ib_pack.h>
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
@@ -194,6 +195,7 @@ void ib_ud_header_init(int payload_bytes,
struct ib_ud_header *header)
{
int header_len;
+ u16 packet_length;
memset(header, 0, sizeof *header);
@@ -208,7 +210,7 @@ void ib_ud_header_init(int payload_bytes,
header->lrh.link_version = 0;
header->lrh.link_next_header =
grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
- header->lrh.packet_length = (IB_LRH_BYTES +
+ packet_length = (IB_LRH_BYTES +
IB_BTH_BYTES +
IB_DETH_BYTES +
payload_bytes +
@@ -217,8 +219,7 @@ void ib_ud_header_init(int payload_bytes,
header->grh_present = grh_present;
if (grh_present) {
- header->lrh.packet_length += IB_GRH_BYTES / 4;
-
+ packet_length += IB_GRH_BYTES / 4;
header->grh.ip_version = 6;
header->grh.payload_length =
cpu_to_be16((IB_BTH_BYTES +
@@ -229,7 +230,7 @@ void ib_ud_header_init(int payload_bytes,
header->grh.next_header = 0x1b;
}
- cpu_to_be16s(&header->lrh.packet_length);
+ header->lrh.packet_length = cpu_to_be16(packet_length);
if (header->immediate_present)
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 2e38792df53..7c2f03057dd 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -49,8 +49,8 @@
#include <asm/uaccess.h>
#include <asm/semaphore.h>
-#include <ib_mad.h>
-#include <ib_user_mad.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_user_mad.h>
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
@@ -271,7 +271,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
struct ib_send_wr *bad_wr;
struct ib_rmpp_mad *rmpp_mad;
u8 method;
- u64 *tid;
+ __be64 *tid;
int ret, length, hdr_len, data_len, rmpp_hdr_size;
int rmpp_active = 0;
@@ -316,7 +316,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
if (packet->mad.hdr.grh_present) {
ah_attr.ah_flags = IB_AH_GRH;
memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
- ah_attr.grh.flow_label = packet->mad.hdr.flow_label;
+ ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 7696022f9a4..180b3d4765e 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -1,6 +1,8 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -43,8 +45,8 @@
#include <linux/kref.h>
#include <linux/idr.h>
-#include <ib_verbs.h>
-#include <ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
struct ib_uverbs_device {
int devnum;
@@ -97,10 +99,12 @@ extern struct idr ib_uverbs_mw_idr;
extern struct idr ib_uverbs_ah_idr;
extern struct idr ib_uverbs_cq_idr;
extern struct idr ib_uverbs_qp_idr;
+extern struct idr ib_uverbs_srq_idr;
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
+void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
void *addr, size_t size, int write);
@@ -129,5 +133,8 @@ IB_UVERBS_DECLARE_CMD(modify_qp);
IB_UVERBS_DECLARE_CMD(destroy_qp);
IB_UVERBS_DECLARE_CMD(attach_mcast);
IB_UVERBS_DECLARE_CMD(detach_mcast);
+IB_UVERBS_DECLARE_CMD(create_srq);
+IB_UVERBS_DECLARE_CMD(modify_srq);
+IB_UVERBS_DECLARE_CMD(destroy_srq);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5f2bbcda4c7..ebccf9f38af 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -724,6 +724,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_cq *scq, *rcq;
+ struct ib_srq *srq;
struct ib_qp *qp;
struct ib_qp_init_attr attr;
int ret;
@@ -747,10 +748,12 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
+ srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
if (!pd || pd->uobject->context != file->ucontext ||
!scq || scq->uobject->context != file->ucontext ||
- !rcq || rcq->uobject->context != file->ucontext) {
+ !rcq || rcq->uobject->context != file->ucontext ||
+ (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
ret = -EINVAL;
goto err_up;
}
@@ -759,7 +762,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
attr.qp_context = file;
attr.send_cq = scq;
attr.recv_cq = rcq;
- attr.srq = NULL;
+ attr.srq = srq;
attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
attr.qp_type = cmd.qp_type;
@@ -1004,3 +1007,178 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
return ret ? ret : in_len;
}
+
+ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_create_srq cmd;
+ struct ib_uverbs_create_srq_resp resp;
+ struct ib_udata udata;
+ struct ib_uobject *uobj;
+ struct ib_pd *pd;
+ struct ib_srq *srq;
+ struct ib_srq_init_attr attr;
+ int ret;
+
+ if (out_len < sizeof resp)
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+ INIT_UDATA(&udata, buf + sizeof cmd,
+ (unsigned long) cmd.response + sizeof resp,
+ in_len - sizeof cmd, out_len - sizeof resp);
+
+ uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
+ if (!uobj)
+ return -ENOMEM;
+
+ down(&ib_uverbs_idr_mutex);
+
+ pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
+
+ if (!pd || pd->uobject->context != file->ucontext) {
+ ret = -EINVAL;
+ goto err_up;
+ }
+
+ attr.event_handler = ib_uverbs_srq_event_handler;
+ attr.srq_context = file;
+ attr.attr.max_wr = cmd.max_wr;
+ attr.attr.max_sge = cmd.max_sge;
+ attr.attr.srq_limit = cmd.srq_limit;
+
+ uobj->user_handle = cmd.user_handle;
+ uobj->context = file->ucontext;
+
+ srq = pd->device->create_srq(pd, &attr, &udata);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto err_up;
+ }
+
+ srq->device = pd->device;
+ srq->pd = pd;
+ srq->uobject = uobj;
+ srq->event_handler = attr.event_handler;
+ srq->srq_context = attr.srq_context;
+ atomic_inc(&pd->usecnt);
+ atomic_set(&srq->usecnt, 0);
+
+ memset(&resp, 0, sizeof resp);
+
+retry:
+ if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err_destroy;
+ }
+
+ ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id);
+
+ if (ret == -EAGAIN)
+ goto retry;
+ if (ret)
+ goto err_destroy;
+
+ resp.srq_handle = uobj->id;
+
+ spin_lock_irq(&file->ucontext->lock);
+ list_add_tail(&uobj->list, &file->ucontext->srq_list);
+ spin_unlock_irq(&file->ucontext->lock);
+
+ if (copy_to_user((void __user *) (unsigned long) cmd.response,
+ &resp, sizeof resp)) {
+ ret = -EFAULT;
+ goto err_list;
+ }
+
+ up(&ib_uverbs_idr_mutex);
+
+ return in_len;
+
+err_list:
+ spin_lock_irq(&file->ucontext->lock);
+ list_del(&uobj->list);
+ spin_unlock_irq(&file->ucontext->lock);
+
+err_destroy:
+ ib_destroy_srq(srq);
+
+err_up:
+ up(&ib_uverbs_idr_mutex);
+
+ kfree(uobj);
+ return ret;
+}
+
+ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_modify_srq cmd;
+ struct ib_srq *srq;
+ struct ib_srq_attr attr;
+ int ret;
+
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+ down(&ib_uverbs_idr_mutex);
+
+ srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
+ if (!srq || srq->uobject->context != file->ucontext) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ attr.max_wr = cmd.max_wr;
+ attr.max_sge = cmd.max_sge;
+ attr.srq_limit = cmd.srq_limit;
+
+ ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
+
+out:
+ up(&ib_uverbs_idr_mutex);
+
+ return ret ? ret : in_len;
+}
+
+ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_destroy_srq cmd;
+ struct ib_srq *srq;
+ struct ib_uobject *uobj;
+ int ret = -EINVAL;
+
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+ down(&ib_uverbs_idr_mutex);
+
+ srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
+ if (!srq || srq->uobject->context != file->ucontext)
+ goto out;
+
+ uobj = srq->uobject;
+
+ ret = ib_destroy_srq(srq);
+ if (ret)
+ goto out;
+
+ idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
+
+ spin_lock_irq(&file->ucontext->lock);
+ list_del(&uobj->list);
+ spin_unlock_irq(&file->ucontext->lock);
+
+ kfree(uobj);
+
+out:
+ up(&ib_uverbs_idr_mutex);
+
+ return ret ? ret : in_len;
+}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f6e9ea29cd..09caf5b1ef3 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -1,6 +1,8 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +69,7 @@ DEFINE_IDR(ib_uverbs_mw_idr);
DEFINE_IDR(ib_uverbs_ah_idr);
DEFINE_IDR(ib_uverbs_cq_idr);
DEFINE_IDR(ib_uverbs_qp_idr);
+DEFINE_IDR(ib_uverbs_srq_idr);
static spinlock_t map_lock;
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -91,6 +94,9 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
[IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
[IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
+ [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
+ [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
+ [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
};
static struct vfsmount *uverbs_event_mnt;
@@ -125,7 +131,14 @@ static int ib_dealloc_ucontext(struct ib_ucontext *context)
kfree(uobj);
}
- /* XXX Free SRQs */
+ list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
+ struct ib_srq *srq = idr_find(&ib_uverbs_srq_idr, uobj->id);
+ idr_remove(&ib_uverbs_srq_idr, uobj->id);
+ ib_destroy_srq(srq);
+ list_del(&uobj->list);
+ kfree(uobj);
+ }
+
/* XXX Free MWs */
list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
@@ -344,6 +357,13 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
event->event);
}
+void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
+{
+ ib_uverbs_async_handler(context_ptr,
+ event->element.srq->uobject->user_handle,
+ event->event);
+}
+
static void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index ed550f6595b..36a32c31566 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 506fdf1f2a2..5081d903e56 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -4,6 +4,7 @@
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -40,8 +41,8 @@
#include <linux/errno.h>
#include <linux/err.h>
-#include <ib_verbs.h>
-#include <ib_cache.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
/* Protection domains */
@@ -153,6 +154,66 @@ int ib_destroy_ah(struct ib_ah *ah)
}
EXPORT_SYMBOL(ib_destroy_ah);
+/* Shared receive queues */
+
+struct ib_srq *ib_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr)
+{
+ struct ib_srq *srq;
+
+ if (!pd->device->create_srq)
+ return ERR_PTR(-ENOSYS);
+
+ srq = pd->device->create_srq(pd, srq_init_attr, NULL);
+
+ if (!IS_ERR(srq)) {
+ srq->device = pd->device;
+ srq->pd = pd;
+ srq->uobject = NULL;
+ srq->event_handler = srq_init_attr->event_handler;
+ srq->srq_context = srq_init_attr->srq_context;
+ atomic_inc(&pd->usecnt);
+ atomic_set(&srq->usecnt, 0);
+ }
+
+ return srq;
+}
+EXPORT_SYMBOL(ib_create_srq);
+
+int ib_modify_srq(struct ib_srq *srq,
+ struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask)
+{
+ return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
+}
+EXPORT_SYMBOL(ib_modify_srq);
+
+int ib_query_srq(struct ib_srq *srq,
+ struct ib_srq_attr *srq_attr)
+{
+ return srq->device->query_srq ?
+ srq->device->query_srq(srq, srq_attr) : -ENOSYS;
+}
+EXPORT_SYMBOL(ib_query_srq);
+
+int ib_destroy_srq(struct ib_srq *srq)
+{
+ struct ib_pd *pd;
+ int ret;
+
+ if (atomic_read(&srq->usecnt))
+ return -EBUSY;
+
+ pd = srq->pd;
+
+ ret = srq->device->destroy_srq(srq);
+ if (!ret)
+ atomic_dec(&pd->usecnt);
+
+ return ret;
+}
+EXPORT_SYMBOL(ib_destroy_srq);
+
/* Queue pairs */
struct ib_qp *ib_create_qp(struct ib_pd *pd,
diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile
index 5dcbd43073e..c44f7bae542 100644
--- a/drivers/infiniband/hw/mthca/Makefile
+++ b/drivers/infiniband/hw/mthca/Makefile
@@ -1,5 +1,3 @@
-EXTRA_CFLAGS += -Idrivers/infiniband/include
-
ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
EXTRA_CFLAGS += -DDEBUG
endif
@@ -9,4 +7,4 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \
mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \
- mthca_provider.o mthca_memfree.o mthca_uar.o
+ mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index b1db48dd91d..9ba3211cef7 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -177,3 +177,119 @@ void mthca_array_cleanup(struct mthca_array *array, int nent)
kfree(array->page_list);
}
+
+/*
+ * Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+
+int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
+ union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
+ int hca_write, struct mthca_mr *mr)
+{
+ int err = -ENOMEM;
+ int npages, shift;
+ u64 *dma_list = NULL;
+ dma_addr_t t;
+ int i;
+
+ if (size <= max_direct) {
+ *is_direct = 1;
+ npages = 1;
+ shift = get_order(size) + PAGE_SHIFT;
+
+ buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
+
+ pci_unmap_addr_set(&buf->direct, mapping, t);
+
+ memset(buf->direct.buf, 0, size);
+
+ while (t & ((1 << shift) - 1)) {
+ --shift;
+ npages *= 2;
+ }
+
+ dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ if (!dma_list)
+ goto err_free;
+
+ for (i = 0; i < npages; ++i)
+ dma_list[i] = t + i * (1 << shift);
+ } else {
+ *is_direct = 0;
+ npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ shift = PAGE_SHIFT;
+
+ dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+ if (!dma_list)
+ return -ENOMEM;
+
+ buf->page_list = kmalloc(npages * sizeof *buf->page_list,
+ GFP_KERNEL);
+ if (!buf->page_list)
+ goto err_out;
+
+ for (i = 0; i < npages; ++i)
+ buf->page_list[i].buf = NULL;
+
+ for (i = 0; i < npages; ++i) {
+ buf->page_list[i].buf =
+ dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ &t, GFP_KERNEL);
+ if (!buf->page_list[i].buf)
+ goto err_free;
+
+ dma_list[i] = t;
+ pci_unmap_addr_set(&buf->page_list[i], mapping, t);
+
+ memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+ }
+ }
+
+ err = mthca_mr_alloc_phys(dev, pd->pd_num,
+ dma_list, shift, npages,
+ 0, size,
+ MTHCA_MPT_FLAG_LOCAL_READ |
+ (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
+ mr);
+ if (err)
+ goto err_free;
+
+ kfree(dma_list);
+
+ return 0;
+
+err_free:
+ mthca_buf_free(dev, size, buf, *is_direct, NULL);
+
+err_out:
+ kfree(dma_list);
+
+ return err;
+}
+
+void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
+ int is_direct, struct mthca_mr *mr)
+{
+ int i;
+
+ if (mr)
+ mthca_free_mr(dev, mr);
+
+ if (is_direct)
+ dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
+ pci_unmap_addr(&buf->direct, mapping));
+ else {
+ for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ buf->page_list[i].buf,
+ pci_unmap_addr(&buf->page_list[i],
+ mapping));
+ kfree(buf->page_list);
+ }
+}
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index d58dcbe6648..889e8509673 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -35,22 +35,22 @@
#include <linux/init.h>
-#include <ib_verbs.h>
-#include <ib_cache.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
#include "mthca_dev.h"
struct mthca_av {
- u32 port_pd;
- u8 reserved1;
- u8 g_slid;
- u16 dlid;
- u8 reserved2;
- u8 gid_index;
- u8 msg_sr;
- u8 hop_limit;
- u32 sl_tclass_flowlabel;
- u32 dgid[4];
+ __be32 port_pd;
+ u8 reserved1;
+ u8 g_slid;
+ __be16 dlid;
+ u8 reserved2;
+ u8 gid_index;
+ u8 msg_sr;
+ u8 hop_limit;
+ __be32 sl_tclass_flowlabel;
+ __be32 dgid[4];
};
int mthca_create_ah(struct mthca_dev *dev,
@@ -128,7 +128,7 @@ on_hca_fail:
av, (unsigned long) ah->avdma);
for (j = 0; j < 8; ++j)
printk(KERN_DEBUG " [%2x] %08x\n",
- j * 4, be32_to_cpu(((u32 *) av)[j]));
+ j * 4, be32_to_cpu(((__be32 *) av)[j]));
}
if (ah->type == MTHCA_AH_ON_HCA) {
@@ -169,7 +169,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
header->lrh.destination_lid = ah->av->dlid;
- header->lrh.source_lid = ah->av->g_slid & 0x7f;
+ header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
if (ah->av->g_slid & 0x80) {
header->grh_present = 1;
header->grh.traffic_class =
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 1557a522d83..cc758a2d2bc 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -36,7 +37,7 @@
#include <linux/pci.h>
#include <linux/errno.h>
#include <asm/io.h>
-#include <ib_mad.h>
+#include <rdma/ib_mad.h>
#include "mthca_dev.h"
#include "mthca_config_reg.h"
@@ -108,6 +109,7 @@ enum {
CMD_SW2HW_SRQ = 0x35,
CMD_HW2SW_SRQ = 0x36,
CMD_QUERY_SRQ = 0x37,
+ CMD_ARM_SRQ = 0x40,
/* QP/EE commands */
CMD_RST2INIT_QPEE = 0x19,
@@ -219,20 +221,20 @@ static int mthca_cmd_post(struct mthca_dev *dev,
* (and some architectures such as ia64 implement memcpy_toio
* in terms of writeb).
*/
- __raw_writel(cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4);
- __raw_writel(cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4);
- __raw_writel(cpu_to_be32(in_modifier), dev->hcr + 2 * 4);
- __raw_writel(cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4);
- __raw_writel(cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4);
- __raw_writel(cpu_to_be32(token << 16), dev->hcr + 5 * 4);
+ __raw_writel((__force u32) cpu_to_be32(in_param >> 32), dev->hcr + 0 * 4);
+ __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), dev->hcr + 1 * 4);
+ __raw_writel((__force u32) cpu_to_be32(in_modifier), dev->hcr + 2 * 4);
+ __raw_writel((__force u32) cpu_to_be32(out_param >> 32), dev->hcr + 3 * 4);
+ __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), dev->hcr + 4 * 4);
+ __raw_writel((__force u32) cpu_to_be32(token << 16), dev->hcr + 5 * 4);
/* __raw_writel may not order writes. */
wmb();
- __raw_writel(cpu_to_be32((1 << HCR_GO_BIT) |
- (event ? (1 << HCA_E_BIT) : 0) |
- (op_modifier << HCR_OPMOD_SHIFT) |
- op), dev->hcr + 6 * 4);
+ __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
+ (event ? (1 << HCA_E_BIT) : 0) |
+ (op_modifier << HCR_OPMOD_SHIFT) |
+ op), dev->hcr + 6 * 4);
out:
up(&dev->cmd.hcr_sem);
@@ -273,12 +275,14 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
goto out;
}
- if (out_is_imm) {
- memcpy_fromio(out_param, dev->hcr + HCR_OUT_PARAM_OFFSET, sizeof (u64));
- be64_to_cpus(out_param);
- }
+ if (out_is_imm)
+ *out_param =
+ (u64) be32_to_cpu((__force __be32)
+ __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
+ (u64) be32_to_cpu((__force __be32)
+ __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
- *status = be32_to_cpu(__raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
+ *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
out:
up(&dev->cmd.poll_sem);
@@ -1029,6 +1033,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
+ mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
+ dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
@@ -1082,6 +1088,34 @@ out:
return err;
}
+static void get_board_id(void *vsd, char *board_id)
+{
+ int i;
+
+#define VSD_OFFSET_SIG1 0x00
+#define VSD_OFFSET_SIG2 0xde
+#define VSD_OFFSET_MLX_BOARD_ID 0xd0
+#define VSD_OFFSET_TS_BOARD_ID 0x20
+
+#define VSD_SIGNATURE_TOPSPIN 0x5ad
+
+ memset(board_id, 0, MTHCA_BOARD_ID_LEN);
+
+ if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
+ be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
+ strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
+ } else {
+ /*
+ * The board ID is a string but the firmware byte
+ * swaps each 4-byte word before passing it back to
+ * us. Therefore we need to swab it before printing.
+ */
+ for (i = 0; i < 4; ++i)
+ ((u32 *) board_id)[i] =
+ swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
+ }
+}
+
int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
struct mthca_adapter *adapter, u8 *status)
{
@@ -1094,6 +1128,7 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
+#define QUERY_ADAPTER_VSD_OFFSET 0x20
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
@@ -1111,6 +1146,9 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
+ get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
+ adapter->board_id);
+
out:
mthca_free_mailbox(dev, mailbox);
return err;
@@ -1121,7 +1159,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
u8 *status)
{
struct mthca_mailbox *mailbox;
- u32 *inbox;
+ __be32 *inbox;
int err;
#define INIT_HCA_IN_SIZE 0x200
@@ -1247,10 +1285,8 @@ int mthca_INIT_IB(struct mthca_dev *dev,
#define INIT_IB_FLAG_SIG (1 << 18)
#define INIT_IB_FLAG_NG (1 << 17)
#define INIT_IB_FLAG_G0 (1 << 16)
-#define INIT_IB_FLAG_1X (1 << 8)
-#define INIT_IB_FLAG_4X (1 << 9)
-#define INIT_IB_FLAG_12X (1 << 11)
#define INIT_IB_VL_SHIFT 4
+#define INIT_IB_PORT_WIDTH_SHIFT 8
#define INIT_IB_MTU_SHIFT 12
#define INIT_IB_MAX_GID_OFFSET 0x06
#define INIT_IB_MAX_PKEY_OFFSET 0x0a
@@ -1266,12 +1302,11 @@ int mthca_INIT_IB(struct mthca_dev *dev,
memset(inbox, 0, INIT_IB_IN_SIZE);
flags = 0;
- flags |= param->enable_1x ? INIT_IB_FLAG_1X : 0;
- flags |= param->enable_4x ? INIT_IB_FLAG_4X : 0;
flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0;
flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0;
flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0;
flags |= param->vl_cap << INIT_IB_VL_SHIFT;
+ flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
@@ -1342,7 +1377,7 @@ int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *st
int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
{
struct mthca_mailbox *mailbox;
- u64 *inbox;
+ __be64 *inbox;
int err;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -1468,6 +1503,27 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
CMD_TIME_CLASS_A, status);
}
+int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int srq_num, u8 *status)
+{
+ return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
+ CMD_TIME_CLASS_A, status);
+}
+
+int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int srq_num, u8 *status)
+{
+ return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
+ CMD_HW2SW_SRQ,
+ CMD_TIME_CLASS_A, status);
+}
+
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
+{
+ return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
+ CMD_TIME_CLASS_B, status);
+}
+
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status)
@@ -1513,7 +1569,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
if (i % 8 == 0)
printk(" [%02x] ", i * 4);
printk(" %08x",
- be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
+ be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
if ((i + 1) % 8 == 0)
printk("\n");
}
@@ -1533,7 +1589,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
if (i % 8 == 0)
printk("[%02x] ", i * 4);
printk(" %08x",
- be32_to_cpu(((u32 *) mailbox->buf)[i + 2]));
+ be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
if ((i + 1) % 8 == 0)
printk("\n");
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index ed517f175dd..65f976a13e0 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +36,7 @@
#ifndef MTHCA_CMD_H
#define MTHCA_CMD_H
-#include <ib_verbs.h>
+#include <rdma/ib_verbs.h>
#define MTHCA_MAILBOX_SIZE 4096
@@ -183,10 +184,11 @@ struct mthca_dev_lim {
};
struct mthca_adapter {
- u32 vendor_id;
- u32 device_id;
- u32 revision_id;
- u8 inta_pin;
+ u32 vendor_id;
+ u32 device_id;
+ u32 revision_id;
+ char board_id[MTHCA_BOARD_ID_LEN];
+ u8 inta_pin;
};
struct mthca_init_hca_param {
@@ -218,8 +220,7 @@ struct mthca_init_hca_param {
};
struct mthca_init_ib_param {
- int enable_1x;
- int enable_4x;
+ int port_width;
int vl_cap;
int mtu_cap;
u16 gid_cap;
@@ -297,6 +298,11 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
+int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int srq_num, u8 *status);
+int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int srq_num, u8 *status);
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status);
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h
index b4bfbbfe2c3..afa56bfaab2 100644
--- a/drivers/infiniband/hw/mthca/mthca_config_reg.h
+++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 5687c301452..8600b6c3e0c 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -2,6 +2,8 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,7 +39,7 @@
#include <linux/init.h>
#include <linux/hardirq.h>
-#include <ib_pack.h>
+#include <rdma/ib_pack.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
@@ -55,21 +57,21 @@ enum {
* Must be packed because start is 64 bits but only aligned to 32 bits.
*/
struct mthca_cq_context {
- u32 flags;
- u64 start;
- u32 logsize_usrpage;
- u32 error_eqn; /* Tavor only */
- u32 comp_eqn;
- u32 pd;
- u32 lkey;
- u32 last_notified_index;
- u32 solicit_producer_index;
- u32 consumer_index;
- u32 producer_index;
- u32 cqn;
- u32 ci_db; /* Arbel only */
- u32 state_db; /* Arbel only */
- u32 reserved;
+ __be32 flags;
+ __be64 start;
+ __be32 logsize_usrpage;
+ __be32 error_eqn; /* Tavor only */
+ __be32 comp_eqn;
+ __be32 pd;
+ __be32 lkey;
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_index;
+ __be32 producer_index;
+ __be32 cqn;
+ __be32 ci_db; /* Arbel only */
+ __be32 state_db; /* Arbel only */
+ u32 reserved;
} __attribute__((packed));
#define MTHCA_CQ_STATUS_OK ( 0 << 28)
@@ -108,31 +110,31 @@ enum {
};
struct mthca_cqe {
- u32 my_qpn;
- u32 my_ee;
- u32 rqpn;
- u16 sl_g_mlpath;
- u16 rlid;
- u32 imm_etype_pkey_eec;
- u32 byte_cnt;
- u32 wqe;
- u8 opcode;
- u8 is_send;
- u8 reserved;
- u8 owner;
+ __be32 my_qpn;
+ __be32 my_ee;
+ __be32 rqpn;
+ __be16 sl_g_mlpath;
+ __be16 rlid;
+ __be32 imm_etype_pkey_eec;
+ __be32 byte_cnt;
+ __be32 wqe;
+ u8 opcode;
+ u8 is_send;
+ u8 reserved;
+ u8 owner;
};
struct mthca_err_cqe {
- u32 my_qpn;
- u32 reserved1[3];
- u8 syndrome;
- u8 reserved2;
- u16 db_cnt;
- u32 reserved3;
- u32 wqe;
- u8 opcode;
- u8 reserved4[2];
- u8 owner;
+ __be32 my_qpn;
+ u32 reserved1[3];
+ u8 syndrome;
+ u8 reserved2;
+ __be16 db_cnt;
+ u32 reserved3;
+ __be32 wqe;
+ u8 opcode;
+ u8 reserved4[2];
+ u8 owner;
};
#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
@@ -191,7 +193,7 @@ static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
int incr)
{
- u32 doorbell[2];
+ __be32 doorbell[2];
if (mthca_is_memfree(dev)) {
*cq->set_ci_db = cpu_to_be32(cq->cons_index);
@@ -222,7 +224,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
+void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+ struct mthca_srq *srq)
{
struct mthca_cq *cq;
struct mthca_cqe *cqe;
@@ -263,8 +266,11 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
*/
while (prod_index > cq->cons_index) {
cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
- if (cqe->my_qpn == cpu_to_be32(qpn))
+ if (cqe->my_qpn == cpu_to_be32(qpn)) {
+ if (srq)
+ mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
++nfreed;
+ }
else if (nfreed)
memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
cq->ibcq.cqe),
@@ -291,7 +297,7 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
{
int err;
int dbd;
- u32 new_wqe;
+ __be32 new_wqe;
if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
mthca_dbg(dev, "local QP operation err "
@@ -365,6 +371,13 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
break;
}
+ /*
+ * Mem-free HCAs always generate one CQE per WQE, even in the
+ * error case, so we don't have to check the doorbell count, etc.
+ */
+ if (mthca_is_memfree(dev))
+ return 0;
+
err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
if (err)
return err;
@@ -373,12 +386,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
* If we're at the end of the WQE chain, or we've used up our
* doorbell count, free the CQE. Otherwise just update it for
* the next poll operation.
- *
- * This does not apply to mem-free HCAs: they don't use the
- * doorbell count field, and so we should always free the CQE.
*/
- if (mthca_is_memfree(dev) ||
- !(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
+ if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
return 0;
cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
@@ -450,23 +459,27 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
>> wq->wqe_shift);
entry->wr_id = (*cur_qp)->wrid[wqe_index +
(*cur_qp)->rq.max];
+ } else if ((*cur_qp)->ibqp.srq) {
+ struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
+ u32 wqe = be32_to_cpu(cqe->wqe);
+ wq = NULL;
+ wqe_index = wqe >> srq->wqe_shift;
+ entry->wr_id = srq->wrid[wqe_index];
+ mthca_free_srq_wqe(srq, wqe);
} else {
wq = &(*cur_qp)->rq;
wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
entry->wr_id = (*cur_qp)->wrid[wqe_index];
}
- if (wq->last_comp < wqe_index)
- wq->tail += wqe_index - wq->last_comp;
- else
- wq->tail += wqe_index + wq->max - wq->last_comp;
-
- wq->last_comp = wqe_index;
+ if (wq) {
+ if (wq->last_comp < wqe_index)
+ wq->tail += wqe_index - wq->last_comp;
+ else
+ wq->tail += wqe_index + wq->max - wq->last_comp;
- if (0)
- mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n",
- is_send ? "Send" : "Receive",
- (*cur_qp)->qpn, wqe_index, wq->max);
+ wq->last_comp = wqe_index;
+ }
if (is_error) {
err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
@@ -584,13 +597,13 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
{
- u32 doorbell[2];
+ __be32 doorbell[2];
doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ?
MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
MTHCA_TAVOR_CQ_DB_REQ_NOT) |
to_mcq(cq)->cqn);
- doorbell[1] = 0xffffffff;
+ doorbell[1] = (__force __be32) 0xffffffff;
mthca_write64(doorbell,
to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
@@ -602,9 +615,9 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
{
struct mthca_cq *cq = to_mcq(ibcq);
- u32 doorbell[2];
+ __be32 doorbell[2];
u32 sn;
- u32 ci;
+ __be32 ci;
sn = cq->arm_sn & 3;
ci = cpu_to_be32(cq->cons_index);
@@ -637,113 +650,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
{
- int i;
- int size;
-
- if (cq->is_direct)
- dma_free_coherent(&dev->pdev->dev,
- (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
- cq->queue.direct.buf,
- pci_unmap_addr(&cq->queue.direct,
- mapping));
- else {
- size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
- for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
- if (cq->queue.page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- cq->queue.page_list[i].buf,
- pci_unmap_addr(&cq->queue.page_list[i],
- mapping));
-
- kfree(cq->queue.page_list);
- }
-}
-
-static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
- struct mthca_cq *cq)
-{
- int err = -ENOMEM;
- int npages, shift;
- u64 *dma_list = NULL;
- dma_addr_t t;
- int i;
-
- if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
- cq->is_direct = 1;
- npages = 1;
- shift = get_order(size) + PAGE_SHIFT;
-
- cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
- if (!cq->queue.direct.buf)
- return -ENOMEM;
-
- pci_unmap_addr_set(&cq->queue.direct, mapping, t);
-
- memset(cq->queue.direct.buf, 0, size);
-
- while (t & ((1 << shift) - 1)) {
- --shift;
- npages *= 2;
- }
-
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
- if (!dma_list)
- goto err_free;
-
- for (i = 0; i < npages; ++i)
- dma_list[i] = t + i * (1 << shift);
- } else {
- cq->is_direct = 0;
- npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- shift = PAGE_SHIFT;
-
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
- if (!dma_list)
- return -ENOMEM;
-
- cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
- GFP_KERNEL);
- if (!cq->queue.page_list)
- goto err_out;
-
- for (i = 0; i < npages; ++i)
- cq->queue.page_list[i].buf = NULL;
-
- for (i = 0; i < npages; ++i) {
- cq->queue.page_list[i].buf =
- dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
- if (!cq->queue.page_list[i].buf)
- goto err_free;
-
- dma_list[i] = t;
- pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
-
- memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
- }
- }
-
- err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
- dma_list, shift, npages,
- 0, size,
- MTHCA_MPT_FLAG_LOCAL_WRITE |
- MTHCA_MPT_FLAG_LOCAL_READ,
- &cq->mr);
- if (err)
- goto err_free;
-
- kfree(dma_list);
-
- return 0;
-
-err_free:
- mthca_free_cq_buf(dev, cq);
-
-err_out:
- kfree(dma_list);
-
- return err;
+ mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
+ &cq->queue, cq->is_direct, &cq->mr);
}
int mthca_init_cq(struct mthca_dev *dev, int nent,
@@ -795,7 +703,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context = mailbox->buf;
if (cq->is_kernel) {
- err = mthca_alloc_cq_buf(dev, size, cq);
+ err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
+ &cq->queue, &cq->is_direct,
+ &dev->driver_pd, 1, &cq->mr);
if (err)
goto err_out_mailbox;
@@ -811,7 +721,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
MTHCA_CQ_STATE_DISARMED |
MTHCA_CQ_FLAG_TR);
- cq_context->start = cpu_to_be64(0);
cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
if (ctx)
cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
@@ -857,10 +766,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
return 0;
err_out_free_mr:
- if (cq->is_kernel) {
- mthca_free_mr(dev, &cq->mr);
+ if (cq->is_kernel)
mthca_free_cq_buf(dev, cq);
- }
err_out_mailbox:
mthca_free_mailbox(dev, mailbox);
@@ -904,7 +811,7 @@ void mthca_free_cq(struct mthca_dev *dev,
mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
if (0) {
- u32 *ctx = mailbox->buf;
+ __be32 *ctx = mailbox->buf;
int j;
printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
@@ -928,7 +835,6 @@ void mthca_free_cq(struct mthca_dev *dev,
wait_event(cq->wait, !atomic_read(&cq->refcount));
if (cq->is_kernel) {
- mthca_free_mr(dev, &cq->mr);
mthca_free_cq_buf(dev, cq);
if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 5ecdd2eeeb0..7bff5a8425f 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -2,6 +2,8 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -67,6 +69,10 @@ enum {
};
enum {
+ MTHCA_BOARD_ID_LEN = 64
+};
+
+enum {
MTHCA_EQ_CONTEXT_SIZE = 0x40,
MTHCA_CQ_CONTEXT_SIZE = 0x40,
MTHCA_QP_CONTEXT_SIZE = 0x200,
@@ -142,6 +148,7 @@ struct mthca_limits {
int reserved_mcgs;
int num_pds;
int reserved_pds;
+ u8 port_width_cap;
};
struct mthca_alloc {
@@ -211,6 +218,13 @@ struct mthca_cq_table {
struct mthca_icm_table *table;
};
+struct mthca_srq_table {
+ struct mthca_alloc alloc;
+ spinlock_t lock;
+ struct mthca_array srq;
+ struct mthca_icm_table *table;
+};
+
struct mthca_qp_table {
struct mthca_alloc alloc;
u32 rdb_base;
@@ -246,6 +260,7 @@ struct mthca_dev {
unsigned long device_cap_flags;
u32 rev_id;
+ char board_id[MTHCA_BOARD_ID_LEN];
/* firmware info */
u64 fw_ver;
@@ -291,6 +306,7 @@ struct mthca_dev {
struct mthca_mr_table mr_table;
struct mthca_eq_table eq_table;
struct mthca_cq_table cq_table;
+ struct mthca_srq_table srq_table;
struct mthca_qp_table qp_table;
struct mthca_av_table av_table;
struct mthca_mcg_table mcg_table;
@@ -331,14 +347,13 @@ extern void __buggy_use_of_MTHCA_PUT(void);
#define MTHCA_PUT(dest, source, offset) \
do { \
- __typeof__(source) *__p = \
- (__typeof__(source) *) ((char *) (dest) + (offset)); \
+ void *__d = ((char *) (dest) + (offset)); \
switch (sizeof(source)) { \
- case 1: *__p = (source); break; \
- case 2: *__p = cpu_to_be16(source); break; \
- case 4: *__p = cpu_to_be32(source); break; \
- case 8: *__p = cpu_to_be64(source); break; \
- default: __buggy_use_of_MTHCA_PUT(); \
+ case 1: *(u8 *) __d = (source); break; \
+ case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
+ case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
+ case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
+ default: __buggy_use_of_MTHCA_PUT(); \
} \
} while (0)
@@ -354,12 +369,18 @@ int mthca_array_set(struct mthca_array *array, int index, void *value);
void mthca_array_clear(struct mthca_array *array, int index);
int mthca_array_init(struct mthca_array *array, int nent);
void mthca_array_cleanup(struct mthca_array *array, int nent);
+int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
+ union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
+ int hca_write, struct mthca_mr *mr);
+void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
+ int is_direct, struct mthca_mr *mr);
int mthca_init_uar_table(struct mthca_dev *dev);
int mthca_init_pd_table(struct mthca_dev *dev);
int mthca_init_mr_table(struct mthca_dev *dev);
int mthca_init_eq_table(struct mthca_dev *dev);
int mthca_init_cq_table(struct mthca_dev *dev);
+int mthca_init_srq_table(struct mthca_dev *dev);
int mthca_init_qp_table(struct mthca_dev *dev);
int mthca_init_av_table(struct mthca_dev *dev);
int mthca_init_mcg_table(struct mthca_dev *dev);
@@ -369,6 +390,7 @@ void mthca_cleanup_pd_table(struct mthca_dev *dev);
void mthca_cleanup_mr_table(struct mthca_dev *dev);
void mthca_cleanup_eq_table(struct mthca_dev *dev);
void mthca_cleanup_cq_table(struct mthca_dev *dev);
+void mthca_cleanup_srq_table(struct mthca_dev *dev);
void mthca_cleanup_qp_table(struct mthca_dev *dev);
void mthca_cleanup_av_table(struct mthca_dev *dev);
void mthca_cleanup_mcg_table(struct mthca_dev *dev);
@@ -419,7 +441,19 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn);
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn);
+void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+ struct mthca_srq *srq);
+
+int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
+ struct ib_srq_attr *attr, struct mthca_srq *srq);
+void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
+void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
+ enum ib_event_type event_type);
+void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
+int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type);
@@ -433,7 +467,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
- int index, int *dbd, u32 *new_wqe);
+ int index, int *dbd, __be32 *new_wqe);
int mthca_alloc_qp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
diff --git a/drivers/infiniband/hw/mthca/mthca_doorbell.h b/drivers/infiniband/hw/mthca/mthca_doorbell.h
index 535fad7710f..dd9a44d170c 100644
--- a/drivers/infiniband/hw/mthca/mthca_doorbell.h
+++ b/drivers/infiniband/hw/mthca/mthca_doorbell.h
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -57,13 +58,13 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest)
__raw_writeq((__force u64) val, dest);
}
-static inline void mthca_write64(u32 val[2], void __iomem *dest,
+static inline void mthca_write64(__be32 val[2], void __iomem *dest,
spinlock_t *doorbell_lock)
{
__raw_writeq(*(u64 *) val, dest);
}
-static inline void mthca_write_db_rec(u32 val[2], u32 *db)
+static inline void mthca_write_db_rec(__be32 val[2], __be32 *db)
{
*(u64 *) db = *(u64 *) val;
}
@@ -86,18 +87,18 @@ static inline void mthca_write64_raw(__be64 val, void __iomem *dest)
__raw_writel(((__force u32 *) &val)[1], dest + 4);
}
-static inline void mthca_write64(u32 val[2], void __iomem *dest,
+static inline void mthca_write64(__be32 val[2], void __iomem *dest,
spinlock_t *doorbell_lock)
{
unsigned long flags;
spin_lock_irqsave(doorbell_lock, flags);
- __raw_writel(val[0], dest);
- __raw_writel(val[1], dest + 4);
+ __raw_writel((__force u32) val[0], dest);
+ __raw_writel((__force u32) val[1], dest + 4);
spin_unlock_irqrestore(doorbell_lock, flags);
}
-static inline void mthca_write_db_rec(u32 val[2], u32 *db)
+static inline void mthca_write_db_rec(__be32 val[2], __be32 *db)
{
db[0] = val[0];
wmb();
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index cbcf2b4722e..18f0981eb0c 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -51,18 +52,18 @@ enum {
* Must be packed because start is 64 bits but only aligned to 32 bits.
*/
struct mthca_eq_context {
- u32 flags;
- u64 start;
- u32 logsize_usrpage;
- u32 tavor_pd; /* reserved for Arbel */
- u8 reserved1[3];
- u8 intr;
- u32 arbel_pd; /* lost_count for Tavor */
- u32 lkey;
- u32 reserved2[2];
- u32 consumer_index;
- u32 producer_index;
- u32 reserved3[4];
+ __be32 flags;
+ __be64 start;
+ __be32 logsize_usrpage;
+ __be32 tavor_pd; /* reserved for Arbel */
+ u8 reserved1[3];
+ u8 intr;
+ __be32 arbel_pd; /* lost_count for Tavor */
+ __be32 lkey;
+ u32 reserved2[2];
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved3[4];
} __attribute__((packed));
#define MTHCA_EQ_STATUS_OK ( 0 << 28)
@@ -127,28 +128,28 @@ struct mthca_eqe {
union {
u32 raw[6];
struct {
- u32 cqn;
+ __be32 cqn;
} __attribute__((packed)) comp;
struct {
- u16 reserved1;
- u16 token;
- u32 reserved2;
- u8 reserved3[3];
- u8 status;
- u64 out_param;
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
} __attribute__((packed)) cmd;
struct {
- u32 qpn;
+ __be32 qpn;
} __attribute__((packed)) qp;
struct {
- u32 cqn;
- u32 reserved1;
- u8 reserved2[3];
- u8 syndrome;
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
} __attribute__((packed)) cq_err;
struct {
- u32 reserved1[2];
- u32 port;
+ u32 reserved1[2];
+ __be32 port;
} __attribute__((packed)) port_change;
} event;
u8 reserved3[3];
@@ -167,7 +168,7 @@ static inline u64 async_mask(struct mthca_dev *dev)
static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
{
- u32 doorbell[2];
+ __be32 doorbell[2];
doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn);
doorbell[1] = cpu_to_be32(ci & (eq->nent - 1));
@@ -190,8 +191,8 @@ static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u
{
/* See comment in tavor_set_eq_ci() above. */
wmb();
- __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base +
- eq->eqn * 8);
+ __raw_writel((__force u32) cpu_to_be32(ci),
+ dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
/* We still want ordering, just not swabbing, so add a barrier */
mb();
}
@@ -206,7 +207,7 @@ static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
{
- u32 doorbell[2];
+ __be32 doorbell[2];
doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn);
doorbell[1] = 0;
@@ -224,7 +225,7 @@ static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
{
if (!mthca_is_memfree(dev)) {
- u32 doorbell[2];
+ __be32 doorbell[2];
doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn);
doorbell[1] = cpu_to_be32(cqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7df22364201..9804174f7f3 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -1,5 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -32,9 +34,9 @@
* $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <ib_verbs.h>
-#include <ib_mad.h>
-#include <ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
@@ -192,7 +194,7 @@ int mthca_process_mad(struct ib_device *ibdev,
{
int err;
u8 status;
- u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE;
+ u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
/* Forward locally generated traps to the SM */
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 2ef916859e1..3241d6c9dc1 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +35,6 @@
*/
#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
@@ -171,6 +171,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
mdev->limits.reserved_mrws = dev_lim->reserved_mrws;
mdev->limits.reserved_uars = dev_lim->reserved_uars;
mdev->limits.reserved_pds = dev_lim->reserved_pds;
+ mdev->limits.port_width_cap = dev_lim->max_port_width;
/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
May be doable since hardware supports it for SRQ.
@@ -212,7 +213,6 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
- struct mthca_adapter adapter;
err = mthca_SYS_EN(mdev, &status);
if (err) {
@@ -253,6 +253,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.uarc_size = 0;
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ profile.num_srq = dev_lim.max_srqs;
err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (err < 0)
@@ -270,26 +272,8 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
goto err_disable;
}
- err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
- if (err) {
- mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
- goto err_close;
- }
- if (status) {
- mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
- goto err_close;
- }
-
- mdev->eq_table.inta_pin = adapter.inta_pin;
- mdev->rev_id = adapter.revision_id;
-
return 0;
-err_close:
- mthca_CLOSE_HCA(mdev, 0, &status);
-
err_disable:
mthca_SYS_DIS(mdev, &status);
@@ -442,15 +426,29 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
}
mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
- dev_lim->cqc_entry_sz,
- mdev->limits.num_cqs,
- mdev->limits.reserved_cqs, 0);
+ dev_lim->cqc_entry_sz,
+ mdev->limits.num_cqs,
+ mdev->limits.reserved_cqs, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_rdb;
}
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
+ mdev->srq_table.table =
+ mthca_alloc_icm_table(mdev, init_hca->srqc_base,
+ dev_lim->srq_entry_sz,
+ mdev->limits.num_srqs,
+ mdev->limits.reserved_srqs, 0);
+ if (!mdev->srq_table.table) {
+ mthca_err(mdev, "Failed to map SRQ context memory, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_unmap_cq;
+ }
+ }
+
/*
* It's not strictly required, but for simplicity just map the
* whole multicast group table now. The table isn't very big
@@ -466,11 +464,15 @@ static int __devinit mthca_init_icm(struct mthca_dev *mdev,
if (!mdev->mcg_table.table) {
mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
err = -ENOMEM;
- goto err_unmap_cq;
+ goto err_unmap_srq;
}
return 0;
+err_unmap_srq:
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ mthca_free_icm_table(mdev, mdev->srq_table.table);
+
err_unmap_cq:
mthca_free_icm_table(mdev, mdev->cq_table.table);
@@ -506,7 +508,6 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
- struct mthca_adapter adapter;
u64 icm_size;
u8 status;
int err;
@@ -551,6 +552,8 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.num_udav = 0;
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ profile.num_srq = dev_lim.max_srqs;
icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if ((int) icm_size < 0) {
@@ -574,24 +577,11 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
goto err_free_icm;
}
- err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
- if (err) {
- mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
- goto err_free_icm;
- }
- if (status) {
- mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
- goto err_free_icm;
- }
-
- mdev->eq_table.inta_pin = adapter.inta_pin;
- mdev->rev_id = adapter.revision_id;
-
return 0;
err_free_icm:
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ mthca_free_icm_table(mdev, mdev->srq_table.table);
mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
@@ -614,12 +604,70 @@ err_disable:
return err;
}
+static void mthca_close_hca(struct mthca_dev *mdev)
+{
+ u8 status;
+
+ mthca_CLOSE_HCA(mdev, 0, &status);
+
+ if (mthca_is_memfree(mdev)) {
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ mthca_free_icm_table(mdev, mdev->srq_table.table);
+ mthca_free_icm_table(mdev, mdev->cq_table.table);
+ mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
+ mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
+ mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
+ mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
+ mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
+ mthca_unmap_eq_icm(mdev);
+
+ mthca_UNMAP_ICM_AUX(mdev, &status);
+ mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+
+ mthca_UNMAP_FA(mdev, &status);
+ mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+
+ if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
+ mthca_DISABLE_LAM(mdev, &status);
+ } else
+ mthca_SYS_DIS(mdev, &status);
+}
+
static int __devinit mthca_init_hca(struct mthca_dev *mdev)
{
+ u8 status;
+ int err;
+ struct mthca_adapter adapter;
+
if (mthca_is_memfree(mdev))
- return mthca_init_arbel(mdev);
+ err = mthca_init_arbel(mdev);
else
- return mthca_init_tavor(mdev);
+ err = mthca_init_tavor(mdev);
+
+ if (err)
+ return err;
+
+ err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
+ if (err) {
+ mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
+ goto err_close;
+ }
+ if (status) {
+ mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
+ "aborting.\n", status);
+ err = -EINVAL;
+ goto err_close;
+ }
+
+ mdev->eq_table.inta_pin = adapter.inta_pin;
+ mdev->rev_id = adapter.revision_id;
+ memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
+
+ return 0;
+
+err_close:
+ mthca_close_hca(mdev);
+ return err;
}
static int __devinit mthca_setup_hca(struct mthca_dev *dev)
@@ -709,11 +757,18 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev)
goto err_cmd_poll;
}
+ err = mthca_init_srq_table(dev);
+ if (err) {
+ mthca_err(dev, "Failed to initialize "
+ "shared receive queue table, aborting.\n");
+ goto err_cq_table_free;
+ }
+
err = mthca_init_qp_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"queue pair table, aborting.\n");
- goto err_cq_table_free;
+ goto err_srq_table_free;
}
err = mthca_init_av_table(dev);
@@ -738,6 +793,9 @@ err_av_table_free:
err_qp_table_free:
mthca_cleanup_qp_table(dev);
+err_srq_table_free:
+ mthca_cleanup_srq_table(dev);
+
err_cq_table_free:
mthca_cleanup_cq_table(dev);
@@ -844,33 +902,6 @@ static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev)
return 0;
}
-static void mthca_close_hca(struct mthca_dev *mdev)
-{
- u8 status;
-
- mthca_CLOSE_HCA(mdev, 0, &status);
-
- if (mthca_is_memfree(mdev)) {
- mthca_free_icm_table(mdev, mdev->cq_table.table);
- mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
- mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
- mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
- mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
- mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
- mthca_unmap_eq_icm(mdev);
-
- mthca_UNMAP_ICM_AUX(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
-
- mthca_UNMAP_FA(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
-
- if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
- mthca_DISABLE_LAM(mdev, &status);
- } else
- mthca_SYS_DIS(mdev, &status);
-}
-
/* Types of supported HCA */
enum {
TAVOR, /* MT23108 */
@@ -887,9 +918,9 @@ static struct {
int is_memfree;
int is_pcie;
} mthca_hca_table[] = {
- [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 2), .is_memfree = 0, .is_pcie = 0 },
- [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 6, 2), .is_memfree = 0, .is_pcie = 1 },
- [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 0, 1), .is_memfree = 1, .is_pcie = 1 },
+ [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 3), .is_memfree = 0, .is_pcie = 0 },
+ [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 0), .is_memfree = 0, .is_pcie = 1 },
+ [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), .is_memfree = 1, .is_pcie = 1 },
[SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 }
};
@@ -1051,6 +1082,7 @@ err_cleanup:
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
+ mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
@@ -1100,6 +1132,7 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
+ mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 5be7d949dbf..a2707605f4c 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -42,10 +42,10 @@ enum {
};
struct mthca_mgm {
- u32 next_gid_index;
- u32 reserved[3];
- u8 gid[16];
- u32 qp[MTHCA_QP_PER_MGM];
+ __be32 next_gid_index;
+ u32 reserved[3];
+ u8 gid[16];
+ __be32 qp[MTHCA_QP_PER_MGM];
};
static const u8 zero_gid[16]; /* automatically initialized to 0 */
@@ -94,10 +94,14 @@ static int find_mgm(struct mthca_dev *dev,
if (0)
mthca_dbg(dev, "Hash for %04x:%04x:%04x:%04x:"
"%04x:%04x:%04x:%04x is %04x\n",
- be16_to_cpu(((u16 *) gid)[0]), be16_to_cpu(((u16 *) gid)[1]),
- be16_to_cpu(((u16 *) gid)[2]), be16_to_cpu(((u16 *) gid)[3]),
- be16_to_cpu(((u16 *) gid)[4]), be16_to_cpu(((u16 *) gid)[5]),
- be16_to_cpu(((u16 *) gid)[6]), be16_to_cpu(((u16 *) gid)[7]),
+ be16_to_cpu(((__be16 *) gid)[0]),
+ be16_to_cpu(((__be16 *) gid)[1]),
+ be16_to_cpu(((__be16 *) gid)[2]),
+ be16_to_cpu(((__be16 *) gid)[3]),
+ be16_to_cpu(((__be16 *) gid)[4]),
+ be16_to_cpu(((__be16 *) gid)[5]),
+ be16_to_cpu(((__be16 *) gid)[6]),
+ be16_to_cpu(((__be16 *) gid)[7]),
*hash);
*index = *hash;
@@ -258,14 +262,14 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (index == -1) {
mthca_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
"not found\n",
- be16_to_cpu(((u16 *) gid->raw)[0]),
- be16_to_cpu(((u16 *) gid->raw)[1]),
- be16_to_cpu(((u16 *) gid->raw)[2]),
- be16_to_cpu(((u16 *) gid->raw)[3]),
- be16_to_cpu(((u16 *) gid->raw)[4]),
- be16_to_cpu(((u16 *) gid->raw)[5]),
- be16_to_cpu(((u16 *) gid->raw)[6]),
- be16_to_cpu(((u16 *) gid->raw)[7]));
+ be16_to_cpu(((__be16 *) gid->raw)[0]),
+ be16_to_cpu(((__be16 *) gid->raw)[1]),
+ be16_to_cpu(((__be16 *) gid->raw)[2]),
+ be16_to_cpu(((__be16 *) gid->raw)[3]),
+ be16_to_cpu(((__be16 *) gid->raw)[4]),
+ be16_to_cpu(((__be16 *) gid->raw)[5]),
+ be16_to_cpu(((__be16 *) gid->raw)[6]),
+ be16_to_cpu(((__be16 *) gid->raw)[7]));
err = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 2a864615035..1827400f189 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -285,6 +286,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
{
struct mthca_icm_table *table;
int num_icm;
+ unsigned chunk_size;
int i;
u8 status;
@@ -305,7 +307,11 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
table->icm[i] = NULL;
for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
- table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+ chunk_size = MTHCA_TABLE_CHUNK_SIZE;
+ if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
+ chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
+
+ table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN);
if (!table->icm[i])
@@ -481,7 +487,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
}
}
-int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
+int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
{
int group;
int start, end, dir;
@@ -564,7 +570,7 @@ found:
page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
- *db = (u32 *) &page->db_rec[j];
+ *db = (__be32 *) &page->db_rec[j];
out:
up(&dev->db_tab->mutex);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index 4761d844cb5..bafa51544aa 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -137,7 +138,7 @@ enum {
struct mthca_db_page {
DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE);
- u64 *db_rec;
+ __be64 *db_rec;
dma_addr_t mapping;
};
@@ -172,7 +173,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
int mthca_init_db_tab(struct mthca_dev *dev);
void mthca_cleanup_db_tab(struct mthca_dev *dev);
-int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db);
+int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db);
void mthca_free_db(struct mthca_dev *dev, int type, int db_index);
#endif /* MTHCA_MEMFREE_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index cbe50feaf68..1f97a44477f 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -50,18 +51,18 @@ struct mthca_mtt {
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
struct mthca_mpt_entry {
- u32 flags;
- u32 page_size;
- u32 key;
- u32 pd;
- u64 start;
- u64 length;
- u32 lkey;
- u32 window_count;
- u32 window_count_limit;
- u64 mtt_seg;
- u32 mtt_sz; /* Arbel only */
- u32 reserved[2];
+ __be32 flags;
+ __be32 page_size;
+ __be32 key;
+ __be32 pd;
+ __be64 start;
+ __be64 length;
+ __be32 lkey;
+ __be32 window_count;
+ __be32 window_count_limit;
+ __be64 mtt_seg;
+ __be32 mtt_sz; /* Arbel only */
+ u32 reserved[2];
} __attribute__((packed));
#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
@@ -247,7 +248,7 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
int start_index, u64 *buffer_list, int list_len)
{
struct mthca_mailbox *mailbox;
- u64 *mtt_entry;
+ __be64 *mtt_entry;
int err = 0;
u8 status;
int i;
@@ -389,7 +390,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
- printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i]));
+ printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
if ((i + 1) % 4 == 0)
printk("\n");
}
@@ -458,7 +459,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
{
mthca_table_put(dev, dev->mr_table.mpt_table,
- arbel_key_to_hw_index(lkey));
+ key_to_hw_index(dev, lkey));
mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
}
@@ -562,7 +563,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
- printk(" %08x", be32_to_cpu(((u32 *) mpt_entry)[i]));
+ printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
if ((i + 1) % 4 == 0)
printk("\n");
}
@@ -669,7 +670,7 @@ int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
mpt_entry.start = cpu_to_be64(iova);
- writel(mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
+ __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
offsetof(struct mthca_mpt_entry, window_count) -
offsetof(struct mthca_mpt_entry, start));
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index c2c899844e9..3dbf06a6e6f 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 4fedc32d587..0576056b34f 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -101,6 +102,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
profile[MTHCA_RES_UARC].size = request->uarc_size;
profile[MTHCA_RES_QP].num = request->num_qp;
+ profile[MTHCA_RES_SRQ].num = request->num_srq;
profile[MTHCA_RES_EQP].num = request->num_qp;
profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp;
profile[MTHCA_RES_CQ].num = request->num_cq;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.h b/drivers/infiniband/hw/mthca/mthca_profile.h
index 17aef335766..94641808f97 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.h
+++ b/drivers/infiniband/hw/mthca/mthca_profile.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +42,7 @@
struct mthca_profile {
int num_qp;
int rdb_per_qp;
+ int num_srq;
int num_cq;
int num_mcg;
int num_mpt;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 81919a7b493..1c1c2e23087 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -2,6 +2,8 @@
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,7 @@
* $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
*/
-#include <ib_smi.h>
+#include <rdma/ib_smi.h>
#include <linux/mm.h>
#include "mthca_dev.h"
@@ -79,10 +81,10 @@ static int mthca_query_device(struct ib_device *ibdev,
}
props->device_cap_flags = mdev->device_cap_flags;
- props->vendor_id = be32_to_cpup((u32 *) (out_mad->data + 36)) &
+ props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
- props->vendor_part_id = be16_to_cpup((u16 *) (out_mad->data + 30));
- props->hw_ver = be16_to_cpup((u16 *) (out_mad->data + 32));
+ props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
+ props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
memcpy(&props->node_guid, out_mad->data + 12, 8);
@@ -118,6 +120,8 @@ static int mthca_query_port(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
+ memset(props, 0, sizeof *props);
+
memset(in_mad, 0, sizeof *in_mad);
in_mad->base_version = 1;
in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
@@ -136,16 +140,17 @@ static int mthca_query_port(struct ib_device *ibdev,
goto out;
}
- props->lid = be16_to_cpup((u16 *) (out_mad->data + 16));
+ props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
- props->sm_lid = be16_to_cpup((u16 *) (out_mad->data + 18));
+ props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
props->sm_sl = out_mad->data[36] & 0xf;
props->state = out_mad->data[32] & 0xf;
props->phys_state = out_mad->data[33] >> 4;
- props->port_cap_flags = be32_to_cpup((u32 *) (out_mad->data + 20));
+ props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
+ props->max_msg_sz = 0x80000000;
props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
- props->qkey_viol_cntr = be16_to_cpup((u16 *) (out_mad->data + 48));
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
props->active_speed = out_mad->data[35] >> 4;
@@ -221,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
goto out;
}
- *pkey = be16_to_cpu(((u16 *) out_mad->data)[index % 32]);
+ *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
out:
kfree(in_mad);
@@ -420,6 +425,77 @@ static int mthca_ah_destroy(struct ib_ah *ah)
return 0;
}
+static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mthca_create_srq ucmd;
+ struct mthca_ucontext *context = NULL;
+ struct mthca_srq *srq;
+ int err;
+
+ srq = kmalloc(sizeof *srq, GFP_KERNEL);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ if (pd->uobject) {
+ context = to_mucontext(pd->uobject->context);
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+ return ERR_PTR(-EFAULT);
+
+ err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+ context->db_tab, ucmd.db_index,
+ ucmd.db_page);
+
+ if (err)
+ goto err_free;
+
+ srq->mr.ibmr.lkey = ucmd.lkey;
+ srq->db_index = ucmd.db_index;
+ }
+
+ err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
+ &init_attr->attr, srq);
+
+ if (err && pd->uobject)
+ mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
+ context->db_tab, ucmd.db_index);
+
+ if (err)
+ goto err_free;
+
+ if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
+ mthca_free_srq(to_mdev(pd->device), srq);
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ return &srq->ibsrq;
+
+err_free:
+ kfree(srq);
+
+ return ERR_PTR(err);
+}
+
+static int mthca_destroy_srq(struct ib_srq *srq)
+{
+ struct mthca_ucontext *context;
+
+ if (srq->uobject) {
+ context = to_mucontext(srq->uobject->context);
+
+ mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
+ context->db_tab, to_msrq(srq)->db_index);
+ }
+
+ mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
+ kfree(srq);
+
+ return 0;
+}
+
static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
@@ -956,14 +1032,22 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
}
}
+static ssize_t show_board(struct class_device *cdev, char *buf)
+{
+ struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
+ return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
+}
+
static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct class_device_attribute *mthca_class_attributes[] = {
&class_device_attr_hw_rev,
&class_device_attr_fw_ver,
- &class_device_attr_hca_type
+ &class_device_attr_hca_type,
+ &class_device_attr_board_id
};
int mthca_register_device(struct mthca_dev *dev)
@@ -990,6 +1074,17 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
dev->ib_dev.create_ah = mthca_ah_create;
dev->ib_dev.destroy_ah = mthca_ah_destroy;
+
+ if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
+ dev->ib_dev.create_srq = mthca_create_srq;
+ dev->ib_dev.destroy_srq = mthca_destroy_srq;
+
+ if (mthca_is_memfree(dev))
+ dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
+ else
+ dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
+ }
+
dev->ib_dev.create_qp = mthca_create_qp;
dev->ib_dev.modify_qp = mthca_modify_qp;
dev->ib_dev.destroy_qp = mthca_destroy_qp;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 1d032791cc8..bcd4b01a339 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -36,8 +37,8 @@
#ifndef MTHCA_PROVIDER_H
#define MTHCA_PROVIDER_H
-#include <ib_verbs.h>
-#include <ib_pack.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
#define MTHCA_MPT_FLAG_ATOMIC (1 << 14)
#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13)
@@ -50,6 +51,11 @@ struct mthca_buf_list {
DECLARE_PCI_UNMAP_ADDR(mapping)
};
+union mthca_buf {
+ struct mthca_buf_list direct;
+ struct mthca_buf_list *page_list;
+};
+
struct mthca_uar {
unsigned long pfn;
int index;
@@ -181,19 +187,39 @@ struct mthca_cq {
/* Next fields are Arbel only */
int set_ci_db_index;
- u32 *set_ci_db;
+ __be32 *set_ci_db;
int arm_db_index;
- u32 *arm_db;
+ __be32 *arm_db;
int arm_sn;
- union {
- struct mthca_buf_list direct;
- struct mthca_buf_list *page_list;
- } queue;
+ union mthca_buf queue;
struct mthca_mr mr;
wait_queue_head_t wait;
};
+struct mthca_srq {
+ struct ib_srq ibsrq;
+ spinlock_t lock;
+ atomic_t refcount;
+ int srqn;
+ int max;
+ int max_gs;
+ int wqe_shift;
+ int first_free;
+ int last_free;
+ u16 counter; /* Arbel only */
+ int db_index; /* Arbel only */
+ __be32 *db; /* Arbel only */
+ void *last;
+
+ int is_direct;
+ u64 *wrid;
+ union mthca_buf queue;
+ struct mthca_mr mr;
+
+ wait_queue_head_t wait;
+};
+
struct mthca_wq {
spinlock_t lock;
int max;
@@ -206,7 +232,7 @@ struct mthca_wq {
int wqe_shift;
int db_index; /* Arbel only */
- u32 *db;
+ __be32 *db;
};
struct mthca_qp {
@@ -227,10 +253,7 @@ struct mthca_qp {
int send_wqe_offset;
u64 *wrid;
- union {
- struct mthca_buf_list direct;
- struct mthca_buf_list *page_list;
- } queue;
+ union mthca_buf queue;
wait_queue_head_t wait;
};
@@ -277,6 +300,11 @@ static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
return container_of(ibcq, struct mthca_cq, ibcq);
}
+static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct mthca_srq, ibsrq);
+}
+
static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct mthca_qp, ibqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f7126b14d5a..0164b84d4ec 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1,6 +1,8 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,13 +37,14 @@
#include <linux/init.h>
-#include <ib_verbs.h>
-#include <ib_cache.h>
-#include <ib_pack.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_pack.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_memfree.h"
+#include "mthca_wqe.h"
enum {
MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
@@ -95,62 +98,62 @@ enum {
};
struct mthca_qp_path {
- u32 port_pkey;
- u8 rnr_retry;
- u8 g_mylmc;
- u16 rlid;
- u8 ackto;
- u8 mgid_index;
- u8 static_rate;
- u8 hop_limit;
- u32 sl_tclass_flowlabel;
- u8 rgid[16];
+ __be32 port_pkey;
+ u8 rnr_retry;
+ u8 g_mylmc;
+ __be16 rlid;
+ u8 ackto;
+ u8 mgid_index;
+ u8 static_rate;
+ u8 hop_limit;
+ __be32 sl_tclass_flowlabel;
+ u8 rgid[16];
} __attribute__((packed));
struct mthca_qp_context {
- u32 flags;
- u32 tavor_sched_queue; /* Reserved on Arbel */
- u8 mtu_msgmax;
- u8 rq_size_stride; /* Reserved on Tavor */
- u8 sq_size_stride; /* Reserved on Tavor */
- u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
- u32 usr_page;
- u32 local_qpn;
- u32 remote_qpn;
- u32 reserved1[2];
+ __be32 flags;
+ __be32 tavor_sched_queue; /* Reserved on Arbel */
+ u8 mtu_msgmax;
+ u8 rq_size_stride; /* Reserved on Tavor */
+ u8 sq_size_stride; /* Reserved on Tavor */
+ u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
+ __be32 usr_page;
+ __be32 local_qpn;
+ __be32 remote_qpn;
+ u32 reserved1[2];
struct mthca_qp_path pri_path;
struct mthca_qp_path alt_path;
- u32 rdd;
- u32 pd;
- u32 wqe_base;
- u32 wqe_lkey;
- u32 params1;
- u32 reserved2;
- u32 next_send_psn;
- u32 cqn_snd;
- u32 snd_wqe_base_l; /* Next send WQE on Tavor */
- u32 snd_db_index; /* (debugging only entries) */
- u32 last_acked_psn;
- u32 ssn;
- u32 params2;
- u32 rnr_nextrecvpsn;
- u32 ra_buff_indx;
- u32 cqn_rcv;
- u32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
- u32 rcv_db_index; /* (debugging only entries) */
- u32 qkey;
- u32 srqn;
- u32 rmsn;
- u16 rq_wqe_counter; /* reserved on Tavor */
- u16 sq_wqe_counter; /* reserved on Tavor */
- u32 reserved3[18];
+ __be32 rdd;
+ __be32 pd;
+ __be32 wqe_base;
+ __be32 wqe_lkey;
+ __be32 params1;
+ __be32 reserved2;
+ __be32 next_send_psn;
+ __be32 cqn_snd;
+ __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
+ __be32 snd_db_index; /* (debugging only entries) */
+ __be32 last_acked_psn;
+ __be32 ssn;
+ __be32 params2;
+ __be32 rnr_nextrecvpsn;
+ __be32 ra_buff_indx;
+ __be32 cqn_rcv;
+ __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
+ __be32 rcv_db_index; /* (debugging only entries) */
+ __be32 qkey;
+ __be32 srqn;
+ __be32 rmsn;
+ __be16 rq_wqe_counter; /* reserved on Tavor */
+ __be16 sq_wqe_counter; /* reserved on Tavor */
+ u32 reserved3[18];
} __attribute__((packed));
struct mthca_qp_param {
- u32 opt_param_mask;
- u32 reserved1;
+ __be32 opt_param_mask;
+ u32 reserved1;
struct mthca_qp_context context;
- u32 reserved2[62];
+ u32 reserved2[62];
} __attribute__((packed));
enum {
@@ -173,80 +176,6 @@ enum {
MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
};
-enum {
- MTHCA_NEXT_DBD = 1 << 7,
- MTHCA_NEXT_FENCE = 1 << 6,
- MTHCA_NEXT_CQ_UPDATE = 1 << 3,
- MTHCA_NEXT_EVENT_GEN = 1 << 2,
- MTHCA_NEXT_SOLICIT = 1 << 1,
-
- MTHCA_MLX_VL15 = 1 << 17,
- MTHCA_MLX_SLR = 1 << 16
-};
-
-enum {
- MTHCA_INVAL_LKEY = 0x100
-};
-
-struct mthca_next_seg {
- u32 nda_op; /* [31:6] next WQE [4:0] next opcode */
- u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
- u32 flags; /* [3] CQ [2] Event [1] Solicit */
- u32 imm; /* immediate data */
-};
-
-struct mthca_tavor_ud_seg {
- u32 reserved1;
- u32 lkey;
- u64 av_addr;
- u32 reserved2[4];
- u32 dqpn;
- u32 qkey;
- u32 reserved3[2];
-};
-
-struct mthca_arbel_ud_seg {
- u32 av[8];
- u32 dqpn;
- u32 qkey;
- u32 reserved[2];
-};
-
-struct mthca_bind_seg {
- u32 flags; /* [31] Atomic [30] rem write [29] rem read */
- u32 reserved;
- u32 new_rkey;
- u32 lkey;
- u64 addr;
- u64 length;
-};
-
-struct mthca_raddr_seg {
- u64 raddr;
- u32 rkey;
- u32 reserved;
-};
-
-struct mthca_atomic_seg {
- u64 swap_add;
- u64 compare;
-};
-
-struct mthca_data_seg {
- u32 byte_count;
- u32 lkey;
- u64 addr;
-};
-
-struct mthca_mlx_seg {
- u32 nda_op;
- u32 nds;
- u32 flags; /* [17] VL15 [16] SLR [14:12] static rate
- [11:8] SL [3] C [2] E */
- u16 rlid;
- u16 vcrc;
-};
-
static const u8 mthca_opcode[] = {
[IB_WR_SEND] = MTHCA_OPCODE_SEND,
[IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
@@ -573,12 +502,11 @@ static void init_port(struct mthca_dev *dev, int port)
memset(&param, 0, sizeof param);
- param.enable_1x = 1;
- param.enable_4x = 1;
- param.vl_cap = dev->limits.vl_cap;
- param.mtu_cap = dev->limits.mtu_cap;
- param.gid_cap = dev->limits.gid_table_len;
- param.pkey_cap = dev->limits.pkey_table_len;
+ param.port_width = dev->limits.port_width_cap;
+ param.vl_cap = dev->limits.vl_cap;
+ param.mtu_cap = dev->limits.mtu_cap;
+ param.gid_cap = dev->limits.gid_table_len;
+ param.pkey_cap = dev->limits.pkey_table_len;
err = mthca_INIT_IB(dev, &param, port, &status);
if (err)
@@ -684,10 +612,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
if (mthca_is_memfree(dev)) {
- qp_context->rq_size_stride =
- ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4);
- qp_context->sq_size_stride =
- ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4);
+ if (qp->rq.max)
+ qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
+ qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
+
+ if (qp->sq.max)
+ qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
+ qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
}
/* leave arbel_sched_queue as 0 */
@@ -856,6 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
+ if (ibqp->srq)
+ qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
+
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
@@ -878,6 +812,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
}
+ if (ibqp->srq)
+ qp_context->srqn = cpu_to_be32(1 << 24 |
+ to_msrq(ibqp->srq)->srqn);
+
err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
qp->qpn, 0, mailbox, 0, &status);
if (status) {
@@ -925,10 +863,6 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
struct mthca_qp *qp)
{
int size;
- int i;
- int npages, shift;
- dma_addr_t t;
- u64 *dma_list = NULL;
int err = -ENOMEM;
size = sizeof (struct mthca_next_seg) +
@@ -978,116 +912,24 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
if (!qp->wrid)
goto err_out;
- if (size <= MTHCA_MAX_DIRECT_QP_SIZE) {
- qp->is_direct = 1;
- npages = 1;
- shift = get_order(size) + PAGE_SHIFT;
-
- if (0)
- mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
- size, shift);
-
- qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
- &t, GFP_KERNEL);
- if (!qp->queue.direct.buf)
- goto err_out;
-
- pci_unmap_addr_set(&qp->queue.direct, mapping, t);
-
- memset(qp->queue.direct.buf, 0, size);
-
- while (t & ((1 << shift) - 1)) {
- --shift;
- npages *= 2;
- }
-
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
- if (!dma_list)
- goto err_out_free;
-
- for (i = 0; i < npages; ++i)
- dma_list[i] = t + i * (1 << shift);
- } else {
- qp->is_direct = 0;
- npages = size / PAGE_SIZE;
- shift = PAGE_SHIFT;
-
- if (0)
- mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
-
- dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
- if (!dma_list)
- goto err_out;
-
- qp->queue.page_list = kmalloc(npages *
- sizeof *qp->queue.page_list,
- GFP_KERNEL);
- if (!qp->queue.page_list)
- goto err_out;
-
- for (i = 0; i < npages; ++i) {
- qp->queue.page_list[i].buf =
- dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
- if (!qp->queue.page_list[i].buf)
- goto err_out_free;
-
- memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
-
- pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
- dma_list[i] = t;
- }
- }
-
- err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
- npages, 0, size,
- MTHCA_MPT_FLAG_LOCAL_READ,
- &qp->mr);
+ err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
+ &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
if (err)
- goto err_out_free;
+ goto err_out;
- kfree(dma_list);
return 0;
- err_out_free:
- if (qp->is_direct) {
- dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
- pci_unmap_addr(&qp->queue.direct, mapping));
- } else
- for (i = 0; i < npages; ++i) {
- if (qp->queue.page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- qp->queue.page_list[i].buf,
- pci_unmap_addr(&qp->queue.page_list[i],
- mapping));
-
- }
-
- err_out:
+err_out:
kfree(qp->wrid);
- kfree(dma_list);
return err;
}
static void mthca_free_wqe_buf(struct mthca_dev *dev,
struct mthca_qp *qp)
{
- int i;
- int size = PAGE_ALIGN(qp->send_wqe_offset +
- (qp->sq.max << qp->sq.wqe_shift));
-
- if (qp->is_direct) {
- dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
- pci_unmap_addr(&qp->queue.direct, mapping));
- } else {
- for (i = 0; i < size / PAGE_SIZE; ++i) {
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- qp->queue.page_list[i].buf,
- pci_unmap_addr(&qp->queue.page_list[i],
- mapping));
- }
- }
-
+ mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
+ (qp->sq.max << qp->sq.wqe_shift)),
+ &qp->queue, qp->is_direct, &qp->mr);
kfree(qp->wrid);
}
@@ -1428,11 +1270,12 @@ void mthca_free_qp(struct mthca_dev *dev,
* unref the mem-free tables and free the QPN in our table.
*/
if (!qp->ibqp.uobject) {
- mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
- mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
- mthca_free_mr(dev, &qp->mr);
mthca_free_memfree(dev, qp);
mthca_free_wqe_buf(dev, qp);
}
@@ -1457,6 +1300,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
{
int header_size;
int err;
+ u16 pkey;
ib_ud_header_init(256, /* assume a MAD */
sqp->ud_header.grh_present,
@@ -1467,8 +1311,8 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
return err;
mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
- (sqp->ud_header.lrh.destination_lid == 0xffff ?
- MTHCA_MLX_SLR : 0) |
+ (sqp->ud_header.lrh.destination_lid ==
+ IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
(sqp->ud_header.lrh.service_level << 8));
mlx->rlid = sqp->ud_header.lrh.destination_lid;
mlx->vcrc = 0;
@@ -1488,18 +1332,16 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
}
sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
- if (sqp->ud_header.lrh.destination_lid == 0xffff)
- sqp->ud_header.lrh.source_lid = 0xffff;
+ if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
+ sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
ib_get_cached_pkey(&dev->ib_dev, sqp->port,
- sqp->pkey_index,
- &sqp->ud_header.bth.pkey);
+ sqp->pkey_index, &pkey);
else
ib_get_cached_pkey(&dev->ib_dev, sqp->port,
- wr->wr.ud.pkey_index,
- &sqp->ud_header.bth.pkey);
- cpu_to_be16s(&sqp->ud_header.bth.pkey);
+ wr->wr.ud.pkey_index, &pkey);
+ sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
@@ -1742,7 +1584,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
out:
if (likely(nreq)) {
- u32 doorbell[2];
+ __be32 doorbell[2];
doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
qp->send_wqe_offset) | f0 | op0);
@@ -1843,7 +1685,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
out:
if (likely(nreq)) {
- u32 doorbell[2];
+ __be32 doorbell[2];
doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
@@ -2064,7 +1906,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
out:
if (likely(nreq)) {
- u32 doorbell[2];
+ __be32 doorbell[2];
doorbell[0] = cpu_to_be32((nreq << 24) |
((qp->sq.head & 0xffff) << 8) |
@@ -2174,19 +2016,25 @@ out:
}
int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
- int index, int *dbd, u32 *new_wqe)
+ int index, int *dbd, __be32 *new_wqe)
{
struct mthca_next_seg *next;
+ /*
+ * For SRQs, all WQEs generate a CQE, so we're always at the
+ * end of the doorbell chain.
+ */
+ if (qp->ibqp.srq) {
+ *new_wqe = 0;
+ return 0;
+ }
+
if (is_send)
next = get_send_wqe(qp, index);
else
next = get_recv_wqe(qp, index);
- if (mthca_is_memfree(dev))
- *dbd = 1;
- else
- *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
+ *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
if (next->ee_nds & cpu_to_be32(0x3f))
*new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
(next->ee_nds & cpu_to_be32(0x3f));
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
new file mode 100644
index 00000000000..75cd2d84ef1
--- /dev/null
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#include "mthca_dev.h"
+#include "mthca_cmd.h"
+#include "mthca_memfree.h"
+#include "mthca_wqe.h"
+
+enum {
+ MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
+};
+
+struct mthca_tavor_srq_context {
+ __be64 wqe_base_ds; /* low 6 bits is descriptor size */
+ __be32 state_pd;
+ __be32 lkey;
+ __be32 uar;
+ __be32 wqe_cnt;
+ u32 reserved[2];
+};
+
+struct mthca_arbel_srq_context {
+ __be32 state_logsize_srqn;
+ __be32 lkey;
+ __be32 db_index;
+ __be32 logstride_usrpage;
+ __be64 wqe_base;
+ __be32 eq_pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved1;
+ __be16 wqe_counter;
+ u32 reserved2[3];
+};
+
+static void *get_wqe(struct mthca_srq *srq, int n)
+{
+ if (srq->is_direct)
+ return srq->queue.direct.buf + (n << srq->wqe_shift);
+ else
+ return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
+ ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
+}
+
+/*
+ * Return a pointer to the location within a WQE that we're using as a
+ * link when the WQE is in the free list. We use an offset of 4
+ * because in the Tavor case, posting a WQE may overwrite the first
+ * four bytes of the previous WQE. The offset avoids corrupting our
+ * free list if the WQE has already completed and been put on the free
+ * list when we post the next WQE.
+ */
+static inline int *wqe_to_link(void *wqe)
+{
+ return (int *) (wqe + 4);
+}
+
+static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_srq *srq,
+ struct mthca_tavor_srq_context *context)
+{
+ memset(context, 0, sizeof *context);
+
+ context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
+ context->state_pd = cpu_to_be32(pd->pd_num);
+ context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
+
+ if (pd->ibpd.uobject)
+ context->uar =
+ cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ else
+ context->uar = cpu_to_be32(dev->driver_uar.index);
+}
+
+static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_srq *srq,
+ struct mthca_arbel_srq_context *context)
+{
+ int logsize;
+
+ memset(context, 0, sizeof *context);
+
+ logsize = long_log2(srq->max) + srq->wqe_shift;
+ context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
+ context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
+ context->db_index = cpu_to_be32(srq->db_index);
+ context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
+ if (pd->ibpd.uobject)
+ context->logstride_usrpage |=
+ cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ else
+ context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
+ context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
+}
+
+static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
+{
+ mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
+ srq->is_direct, &srq->mr);
+ kfree(srq->wrid);
+}
+
+static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
+ struct mthca_srq *srq)
+{
+ struct mthca_data_seg *scatter;
+ void *wqe;
+ int err;
+ int i;
+
+ if (pd->ibpd.uobject)
+ return 0;
+
+ srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
+ if (!srq->wrid)
+ return -ENOMEM;
+
+ err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
+ MTHCA_MAX_DIRECT_SRQ_SIZE,
+ &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
+ if (err) {
+ kfree(srq->wrid);
+ return err;
+ }
+
+ /*
+ * Now initialize the SRQ buffer so that all of the WQEs are
+ * linked into the list of free WQEs. In addition, set the
+ * scatter list L_Keys to the sentry value of 0x100.
+ */
+ for (i = 0; i < srq->max; ++i) {
+ wqe = get_wqe(srq, i);
+
+ *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
+
+ for (scatter = wqe + sizeof (struct mthca_next_seg);
+ (void *) scatter < wqe + (1 << srq->wqe_shift);
+ ++scatter)
+ scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+ }
+
+ return 0;
+}
+
+int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
+ struct ib_srq_attr *attr, struct mthca_srq *srq)
+{
+ struct mthca_mailbox *mailbox;
+ u8 status;
+ int ds;
+ int err;
+
+ /* Sanity check SRQ size before proceeding */
+ if (attr->max_wr > 16 << 20 || attr->max_sge > 64)
+ return -EINVAL;
+
+ srq->max = attr->max_wr;
+ srq->max_gs = attr->max_sge;
+ srq->last = NULL;
+ srq->counter = 0;
+
+ if (mthca_is_memfree(dev))
+ srq->max = roundup_pow_of_two(srq->max + 1);
+
+ ds = min(64UL,
+ roundup_pow_of_two(sizeof (struct mthca_next_seg) +
+ srq->max_gs * sizeof (struct mthca_data_seg)));
+ srq->wqe_shift = long_log2(ds);
+
+ srq->srqn = mthca_alloc(&dev->srq_table.alloc);
+ if (srq->srqn == -1)
+ return -ENOMEM;
+
+ if (mthca_is_memfree(dev)) {
+ err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
+ if (err)
+ goto err_out;
+
+ if (!pd->ibpd.uobject) {
+ srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
+ srq->srqn, &srq->db);
+ if (srq->db_index < 0) {
+ err = -ENOMEM;
+ goto err_out_icm;
+ }
+ }
+ }
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_out_db;
+ }
+
+ err = mthca_alloc_srq_buf(dev, pd, srq);
+ if (err)
+ goto err_out_mailbox;
+
+ spin_lock_init(&srq->lock);
+ atomic_set(&srq->refcount, 1);
+ init_waitqueue_head(&srq->wait);
+
+ if (mthca_is_memfree(dev))
+ mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
+ else
+ mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
+
+ err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
+
+ if (err) {
+ mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
+ goto err_out_free_buf;
+ }
+ if (status) {
+ mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
+ status);
+ err = -EINVAL;
+ goto err_out_free_buf;
+ }
+
+ spin_lock_irq(&dev->srq_table.lock);
+ if (mthca_array_set(&dev->srq_table.srq,
+ srq->srqn & (dev->limits.num_srqs - 1),
+ srq)) {
+ spin_unlock_irq(&dev->srq_table.lock);
+ goto err_out_free_srq;
+ }
+ spin_unlock_irq(&dev->srq_table.lock);
+
+ mthca_free_mailbox(dev, mailbox);
+
+ srq->first_free = 0;
+ srq->last_free = srq->max - 1;
+
+ return 0;
+
+err_out_free_srq:
+ err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+ if (err)
+ mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
+ else if (status)
+ mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
+
+err_out_free_buf:
+ if (!pd->ibpd.uobject)
+ mthca_free_srq_buf(dev, srq);
+
+err_out_mailbox:
+ mthca_free_mailbox(dev, mailbox);
+
+err_out_db:
+ if (!pd->ibpd.uobject && mthca_is_memfree(dev))
+ mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
+
+err_out_icm:
+ mthca_table_put(dev, dev->srq_table.table, srq->srqn);
+
+err_out:
+ mthca_free(&dev->srq_table.alloc, srq->srqn);
+
+ return err;
+}
+
+void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
+{
+ struct mthca_mailbox *mailbox;
+ int err;
+ u8 status;
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox)) {
+ mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
+ return;
+ }
+
+ err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+ if (err)
+ mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
+ else if (status)
+ mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
+
+ spin_lock_irq(&dev->srq_table.lock);
+ mthca_array_clear(&dev->srq_table.srq,
+ srq->srqn & (dev->limits.num_srqs - 1));
+ spin_unlock_irq(&dev->srq_table.lock);
+
+ atomic_dec(&srq->refcount);
+ wait_event(srq->wait, !atomic_read(&srq->refcount));
+
+ if (!srq->ibsrq.uobject) {
+ mthca_free_srq_buf(dev, srq);
+ if (mthca_is_memfree(dev))
+ mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
+ }
+
+ mthca_table_put(dev, dev->srq_table.table, srq->srqn);
+ mthca_free(&dev->srq_table.alloc, srq->srqn);
+ mthca_free_mailbox(dev, mailbox);
+}
+
+void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
+ enum ib_event_type event_type)
+{
+ struct mthca_srq *srq;
+ struct ib_event event;
+
+ spin_lock(&dev->srq_table.lock);
+ srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
+ if (srq)
+ atomic_inc(&srq->refcount);
+ spin_unlock(&dev->srq_table.lock);
+
+ if (!srq) {
+ mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
+ return;
+ }
+
+ if (!srq->ibsrq.event_handler)
+ goto out;
+
+ event.device = &dev->ib_dev;
+ event.event = event_type;
+ event.element.srq = &srq->ibsrq;
+ srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
+
+out:
+ if (atomic_dec_and_test(&srq->refcount))
+ wake_up(&srq->wait);
+}
+
+/*
+ * This function must be called with IRQs disabled.
+ */
+void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
+{
+ int ind;
+
+ ind = wqe_addr >> srq->wqe_shift;
+
+ spin_lock(&srq->lock);
+
+ if (likely(srq->first_free >= 0))
+ *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
+ else
+ srq->first_free = ind;
+
+ *wqe_to_link(get_wqe(srq, ind)) = -1;
+ srq->last_free = ind;
+
+ spin_unlock(&srq->lock);
+}
+
+int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mthca_dev *dev = to_mdev(ibsrq->device);
+ struct mthca_srq *srq = to_msrq(ibsrq);
+ unsigned long flags;
+ int err = 0;
+ int first_ind;
+ int ind;
+ int next_ind;
+ int nreq;
+ int i;
+ void *wqe;
+ void *prev_wqe;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ first_ind = srq->first_free;
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ ind = srq->first_free;
+
+ if (ind < 0) {
+ mthca_err(dev, "SRQ %06x full\n", srq->srqn);
+ err = -ENOMEM;
+ *bad_wr = wr;
+ return nreq;
+ }
+
+ wqe = get_wqe(srq, ind);
+ next_ind = *wqe_to_link(wqe);
+ prev_wqe = srq->last;
+ srq->last = wqe;
+
+ ((struct mthca_next_seg *) wqe)->nda_op = 0;
+ ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+ /* flags field will always remain 0 */
+
+ wqe += sizeof (struct mthca_next_seg);
+
+ if (unlikely(wr->num_sge > srq->max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ srq->last = prev_wqe;
+ return nreq;
+ }
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ ((struct mthca_data_seg *) wqe)->byte_count =
+ cpu_to_be32(wr->sg_list[i].length);
+ ((struct mthca_data_seg *) wqe)->lkey =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ ((struct mthca_data_seg *) wqe)->addr =
+ cpu_to_be64(wr->sg_list[i].addr);
+ wqe += sizeof (struct mthca_data_seg);
+ }
+
+ if (i < srq->max_gs) {
+ ((struct mthca_data_seg *) wqe)->byte_count = 0;
+ ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+ ((struct mthca_data_seg *) wqe)->addr = 0;
+ }
+
+ if (likely(prev_wqe)) {
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32((ind << srq->wqe_shift) | 1);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD);
+ }
+
+ srq->wrid[ind] = wr->wr_id;
+ srq->first_free = next_ind;
+ }
+
+ return nreq;
+
+ if (likely(nreq)) {
+ __be32 doorbell[2];
+
+ doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
+ doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
+
+ /*
+ * Make sure that descriptors are written before
+ * doorbell is rung.
+ */
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+ return err;
+}
+
+int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mthca_dev *dev = to_mdev(ibsrq->device);
+ struct mthca_srq *srq = to_msrq(ibsrq);
+ unsigned long flags;
+ int err = 0;
+ int ind;
+ int next_ind;
+ int nreq;
+ int i;
+ void *wqe;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ ind = srq->first_free;
+
+ if (ind < 0) {
+ mthca_err(dev, "SRQ %06x full\n", srq->srqn);
+ err = -ENOMEM;
+ *bad_wr = wr;
+ return nreq;
+ }
+
+ wqe = get_wqe(srq, ind);
+ next_ind = *wqe_to_link(wqe);
+
+ ((struct mthca_next_seg *) wqe)->nda_op =
+ cpu_to_be32((next_ind << srq->wqe_shift) | 1);
+ ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+ /* flags field will always remain 0 */
+
+ wqe += sizeof (struct mthca_next_seg);
+
+ if (unlikely(wr->num_sge > srq->max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ return nreq;
+ }
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ ((struct mthca_data_seg *) wqe)->byte_count =
+ cpu_to_be32(wr->sg_list[i].length);
+ ((struct mthca_data_seg *) wqe)->lkey =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ ((struct mthca_data_seg *) wqe)->addr =
+ cpu_to_be64(wr->sg_list[i].addr);
+ wqe += sizeof (struct mthca_data_seg);
+ }
+
+ if (i < srq->max_gs) {
+ ((struct mthca_data_seg *) wqe)->byte_count = 0;
+ ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+ ((struct mthca_data_seg *) wqe)->addr = 0;
+ }
+
+ srq->wrid[ind] = wr->wr_id;
+ srq->first_free = next_ind;
+ }
+
+ if (likely(nreq)) {
+ srq->counter += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+ * we write doorbell record.
+ */
+ wmb();
+ *srq->db = cpu_to_be32(srq->counter);
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+ return err;
+}
+
+int __devinit mthca_init_srq_table(struct mthca_dev *dev)
+{
+ int err;
+
+ if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
+ return 0;
+
+ spin_lock_init(&dev->srq_table.lock);
+
+ err = mthca_alloc_init(&dev->srq_table.alloc,
+ dev->limits.num_srqs,
+ dev->limits.num_srqs - 1,
+ dev->limits.reserved_srqs);
+ if (err)
+ return err;
+
+ err = mthca_array_init(&dev->srq_table.srq,
+ dev->limits.num_srqs);
+ if (err)
+ mthca_alloc_cleanup(&dev->srq_table.alloc);
+
+ return err;
+}
+
+void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
+{
+ if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
+ return;
+
+ mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
+ mthca_alloc_cleanup(&dev->srq_table.alloc);
+}
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
index 3024c1b4547..41613ec8a04 100644
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -69,6 +69,17 @@ struct mthca_create_cq_resp {
__u32 reserved;
};
+struct mthca_create_srq {
+ __u32 lkey;
+ __u32 db_index;
+ __u64 db_page;
+};
+
+struct mthca_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
struct mthca_create_qp {
__u32 lkey;
__u32 reserved;
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
new file mode 100644
index 00000000000..1f4c0ff28f7
--- /dev/null
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_wqe.h 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#ifndef MTHCA_WQE_H
+#define MTHCA_WQE_H
+
+#include <linux/types.h>
+
+enum {
+ MTHCA_NEXT_DBD = 1 << 7,
+ MTHCA_NEXT_FENCE = 1 << 6,
+ MTHCA_NEXT_CQ_UPDATE = 1 << 3,
+ MTHCA_NEXT_EVENT_GEN = 1 << 2,
+ MTHCA_NEXT_SOLICIT = 1 << 1,
+
+ MTHCA_MLX_VL15 = 1 << 17,
+ MTHCA_MLX_SLR = 1 << 16
+};
+
+enum {
+ MTHCA_INVAL_LKEY = 0x100
+};
+
+struct mthca_next_seg {
+ __be32 nda_op; /* [31:6] next WQE [4:0] next opcode */
+ __be32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
+ __be32 flags; /* [3] CQ [2] Event [1] Solicit */
+ __be32 imm; /* immediate data */
+};
+
+struct mthca_tavor_ud_seg {
+ u32 reserved1;
+ __be32 lkey;
+ __be64 av_addr;
+ u32 reserved2[4];
+ __be32 dqpn;
+ __be32 qkey;
+ u32 reserved3[2];
+};
+
+struct mthca_arbel_ud_seg {
+ __be32 av[8];
+ __be32 dqpn;
+ __be32 qkey;
+ u32 reserved[2];
+};
+
+struct mthca_bind_seg {
+ __be32 flags; /* [31] Atomic [30] rem write [29] rem read */
+ u32 reserved;
+ __be32 new_rkey;
+ __be32 lkey;
+ __be64 addr;
+ __be64 length;
+};
+
+struct mthca_raddr_seg {
+ __be64 raddr;
+ __be32 rkey;
+ u32 reserved;
+};
+
+struct mthca_atomic_seg {
+ __be64 swap_add;
+ __be64 compare;
+};
+
+struct mthca_data_seg {
+ __be32 byte_count;
+ __be32 lkey;
+ __be64 addr;
+};
+
+struct mthca_mlx_seg {
+ __be32 nda_op;
+ __be32 nds;
+ __be32 flags; /* [17] VL15 [16] SLR [14:12] static rate
+ [11:8] SL [3] C [2] E */
+ __be16 rlid;
+ __be16 vcrc;
+};
+
+#endif /* MTHCA_WQE_H */
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index 394bc08abc6..8935e74ae3f 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -1,5 +1,3 @@
-EXTRA_CFLAGS += -Idrivers/infiniband/include
-
obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
ib_ipoib-y := ipoib_main.o \
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 04c98f54e9c..bea960b8191 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -1,5 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -49,9 +51,9 @@
#include <asm/atomic.h>
#include <asm/semaphore.h>
-#include <ib_verbs.h>
-#include <ib_pack.h>
-#include <ib_sa.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_sa.h>
/* constants */
@@ -88,8 +90,8 @@ enum {
/* structs */
struct ipoib_header {
- u16 proto;
- u16 reserved;
+ __be16 proto;
+ u16 reserved;
};
struct ipoib_pseudoheader {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index a84e5fe0f19..38b150f775e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -97,7 +97,7 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
for (n = 0, i = 0; i < sizeof mgid / 2; ++i) {
n += sprintf(gid_buf + n, "%x",
- be16_to_cpu(((u16 *)mgid.raw)[i]));
+ be16_to_cpu(((__be16 *) mgid.raw)[i]));
if (i < sizeof mgid / 2 - 1)
gid_buf[n++] = ':';
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index eee82363167..ef0e3894863 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1,5 +1,8 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +38,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <ib_cache.h>
+#include <rdma/ib_cache.h>
#include "ipoib.h"
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index fa00816a3cf..0e8ac138e35 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1,5 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -34,7 +36,6 @@
#include "ipoib.h"
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -607,8 +608,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
IPOIB_GID_FMT "\n",
skb->dst ? "neigh" : "dst",
- be16_to_cpup((u16 *) skb->data),
- be32_to_cpup((u32 *) phdr->hwaddr),
+ be16_to_cpup((__be16 *) skb->data),
+ be32_to_cpup((__be32 *) phdr->hwaddr),
IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
dev_kfree_skb_any(skb);
++priv->stats.tx_dropped;
@@ -671,7 +672,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- schedule_work(&priv->restart_task);
+ queue_work(ipoib_workqueue, &priv->restart_task);
}
static void ipoib_neigh_destructor(struct neighbour *n)
@@ -780,15 +781,11 @@ void ipoib_dev_cleanup(struct net_device *dev)
ipoib_ib_dev_cleanup(dev);
- if (priv->rx_ring) {
- kfree(priv->rx_ring);
- priv->rx_ring = NULL;
- }
+ kfree(priv->rx_ring);
+ kfree(priv->tx_ring);
- if (priv->tx_ring) {
- kfree(priv->tx_ring);
- priv->tx_ring = NULL;
- }
+ priv->rx_ring = NULL;
+ priv->tx_ring = NULL;
}
static void ipoib_setup(struct net_device *dev)
@@ -886,6 +883,12 @@ static ssize_t create_child(struct class_device *cdev,
if (pkey < 0 || pkey > 0xffff)
return -EINVAL;
+ /*
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
+ */
+ pkey |= 0x8000;
+
ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
pkey);
@@ -938,6 +941,12 @@ static struct net_device *ipoib_add_port(const char *format,
goto alloc_mem_failed;
}
+ /*
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
+ */
+ priv->pkey |= 0x8000;
+
priv->dev->broadcast[8] = priv->pkey >> 8;
priv->dev->broadcast[9] = priv->pkey & 0xff;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 70208c3d21e..aca7aea18a6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -1,5 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -357,7 +359,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
rec.mgid = mcast->mcmember.mgid;
rec.port_gid = priv->local_gid;
- rec.pkey = be16_to_cpu(priv->pkey);
+ rec.pkey = cpu_to_be16(priv->pkey);
ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
IB_SA_MCMEMBER_REC_MGID |
@@ -457,7 +459,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.mgid = mcast->mcmember.mgid;
rec.port_gid = priv->local_gid;
- rec.pkey = be16_to_cpu(priv->pkey);
+ rec.pkey = cpu_to_be16(priv->pkey);
comp_mask =
IB_SA_MCMEMBER_REC_MGID |
@@ -646,7 +648,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
rec.mgid = mcast->mcmember.mgid;
rec.port_gid = priv->local_gid;
- rec.pkey = be16_to_cpu(priv->pkey);
+ rec.pkey = cpu_to_be16(priv->pkey);
/* Remove ourselves from the multicast group */
ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 4933edf062c..79f59d0563e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -32,7 +33,7 @@
* $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <ib_cache.h>
+#include <rdma/ib_cache.h>
#include "ipoib.h"
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 94b8ea812fe..332d730e60c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -32,7 +32,6 @@
* $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/media/dvb/frontends/tda80xx.c b/drivers/media/dvb/frontends/tda80xx.c
index 88e125079ca..d1cabb6a0a1 100644
--- a/drivers/media/dvb/frontends/tda80xx.c
+++ b/drivers/media/dvb/frontends/tda80xx.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <asm/irq.h>
#include <asm/div64.h>
#include "dvb_frontend.h"
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
new file mode 100644
index 00000000000..1588a59e376
--- /dev/null
+++ b/drivers/mfd/Kconfig
@@ -0,0 +1,16 @@
+#
+# Multifunction miscellaneous devices
+#
+
+menu "Multimedia Capabilities Port drivers"
+
+config MCP
+ tristate
+
+# Interface drivers
+config MCP_SA11X0
+ tristate "Support SA11x0 MCP interface"
+ depends on ARCH_SA1100
+ select MCP
+
+endmenu
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
new file mode 100644
index 00000000000..98bdd6a4218
--- /dev/null
+++ b/drivers/mfd/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for multifunction miscellaneous devices
+#
+
+obj-$(CONFIG_MCP) += mcp-core.o
+obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
new file mode 100644
index 00000000000..c75d713c01e
--- /dev/null
+++ b/drivers/mfd/mcp-core.c
@@ -0,0 +1,255 @@
+/*
+ * linux/drivers/mfd/mcp-core.c
+ *
+ * Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * Generic MCP (Multimedia Communications Port) layer. All MCP locking
+ * is solely held within this file.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/device.h>
+
+#include <asm/dma.h>
+#include <asm/system.h>
+
+#include "mcp.h"
+
+#define to_mcp(d) container_of(d, struct mcp, attached_device)
+#define to_mcp_driver(d) container_of(d, struct mcp_driver, drv)
+
+static int mcp_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return 1;
+}
+
+static int mcp_bus_probe(struct device *dev)
+{
+ struct mcp *mcp = to_mcp(dev);
+ struct mcp_driver *drv = to_mcp_driver(dev->driver);
+
+ return drv->probe(mcp);
+}
+
+static int mcp_bus_remove(struct device *dev)
+{
+ struct mcp *mcp = to_mcp(dev);
+ struct mcp_driver *drv = to_mcp_driver(dev->driver);
+
+ drv->remove(mcp);
+ return 0;
+}
+
+static int mcp_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct mcp *mcp = to_mcp(dev);
+ int ret = 0;
+
+ if (dev->driver) {
+ struct mcp_driver *drv = to_mcp_driver(dev->driver);
+
+ ret = drv->suspend(mcp, state);
+ }
+ return ret;
+}
+
+static int mcp_bus_resume(struct device *dev)
+{
+ struct mcp *mcp = to_mcp(dev);
+ int ret = 0;
+
+ if (dev->driver) {
+ struct mcp_driver *drv = to_mcp_driver(dev->driver);
+
+ ret = drv->resume(mcp);
+ }
+ return ret;
+}
+
+static struct bus_type mcp_bus_type = {
+ .name = "mcp",
+ .match = mcp_bus_match,
+ .suspend = mcp_bus_suspend,
+ .resume = mcp_bus_resume,
+};
+
+/**
+ * mcp_set_telecom_divisor - set the telecom divisor
+ * @mcp: MCP interface structure
+ * @div: SIB clock divisor
+ *
+ * Set the telecom divisor on the MCP interface. The resulting
+ * sample rate is SIBCLOCK/div.
+ */
+void mcp_set_telecom_divisor(struct mcp *mcp, unsigned int div)
+{
+ spin_lock_irq(&mcp->lock);
+ mcp->ops->set_telecom_divisor(mcp, div);
+ spin_unlock_irq(&mcp->lock);
+}
+EXPORT_SYMBOL(mcp_set_telecom_divisor);
+
+/**
+ * mcp_set_audio_divisor - set the audio divisor
+ * @mcp: MCP interface structure
+ * @div: SIB clock divisor
+ *
+ * Set the audio divisor on the MCP interface.
+ */
+void mcp_set_audio_divisor(struct mcp *mcp, unsigned int div)
+{
+ spin_lock_irq(&mcp->lock);
+ mcp->ops->set_audio_divisor(mcp, div);
+ spin_unlock_irq(&mcp->lock);
+}
+EXPORT_SYMBOL(mcp_set_audio_divisor);
+
+/**
+ * mcp_reg_write - write a device register
+ * @mcp: MCP interface structure
+ * @reg: 4-bit register index
+ * @val: 16-bit data value
+ *
+ * Write a device register. The MCP interface must be enabled
+ * to prevent this function hanging.
+ */
+void mcp_reg_write(struct mcp *mcp, unsigned int reg, unsigned int val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcp->lock, flags);
+ mcp->ops->reg_write(mcp, reg, val);
+ spin_unlock_irqrestore(&mcp->lock, flags);
+}
+EXPORT_SYMBOL(mcp_reg_write);
+
+/**
+ * mcp_reg_read - read a device register
+ * @mcp: MCP interface structure
+ * @reg: 4-bit register index
+ *
+ * Read a device register and return its value. The MCP interface
+ * must be enabled to prevent this function hanging.
+ */
+unsigned int mcp_reg_read(struct mcp *mcp, unsigned int reg)
+{
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&mcp->lock, flags);
+ val = mcp->ops->reg_read(mcp, reg);
+ spin_unlock_irqrestore(&mcp->lock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL(mcp_reg_read);
+
+/**
+ * mcp_enable - enable the MCP interface
+ * @mcp: MCP interface to enable
+ *
+ * Enable the MCP interface. Each call to mcp_enable will need
+ * a corresponding call to mcp_disable to disable the interface.
+ */
+void mcp_enable(struct mcp *mcp)
+{
+ spin_lock_irq(&mcp->lock);
+ if (mcp->use_count++ == 0)
+ mcp->ops->enable(mcp);
+ spin_unlock_irq(&mcp->lock);
+}
+EXPORT_SYMBOL(mcp_enable);
+
+/**
+ * mcp_disable - disable the MCP interface
+ * @mcp: MCP interface to disable
+ *
+ * Disable the MCP interface. The MCP interface will only be
+ * disabled once the number of calls to mcp_enable matches the
+ * number of calls to mcp_disable.
+ */
+void mcp_disable(struct mcp *mcp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcp->lock, flags);
+ if (--mcp->use_count == 0)
+ mcp->ops->disable(mcp);
+ spin_unlock_irqrestore(&mcp->lock, flags);
+}
+EXPORT_SYMBOL(mcp_disable);
+
+static void mcp_release(struct device *dev)
+{
+ struct mcp *mcp = container_of(dev, struct mcp, attached_device);
+
+ kfree(mcp);
+}
+
+struct mcp *mcp_host_alloc(struct device *parent, size_t size)
+{
+ struct mcp *mcp;
+
+ mcp = kmalloc(sizeof(struct mcp) + size, GFP_KERNEL);
+ if (mcp) {
+ memset(mcp, 0, sizeof(struct mcp) + size);
+ spin_lock_init(&mcp->lock);
+ mcp->attached_device.parent = parent;
+ mcp->attached_device.bus = &mcp_bus_type;
+ mcp->attached_device.dma_mask = parent->dma_mask;
+ mcp->attached_device.release = mcp_release;
+ }
+ return mcp;
+}
+EXPORT_SYMBOL(mcp_host_alloc);
+
+int mcp_host_register(struct mcp *mcp)
+{
+ strcpy(mcp->attached_device.bus_id, "mcp0");
+ return device_register(&mcp->attached_device);
+}
+EXPORT_SYMBOL(mcp_host_register);
+
+void mcp_host_unregister(struct mcp *mcp)
+{
+ device_unregister(&mcp->attached_device);
+}
+EXPORT_SYMBOL(mcp_host_unregister);
+
+int mcp_driver_register(struct mcp_driver *mcpdrv)
+{
+ mcpdrv->drv.bus = &mcp_bus_type;
+ mcpdrv->drv.probe = mcp_bus_probe;
+ mcpdrv->drv.remove = mcp_bus_remove;
+ return driver_register(&mcpdrv->drv);
+}
+EXPORT_SYMBOL(mcp_driver_register);
+
+void mcp_driver_unregister(struct mcp_driver *mcpdrv)
+{
+ driver_unregister(&mcpdrv->drv);
+}
+EXPORT_SYMBOL(mcp_driver_unregister);
+
+static int __init mcp_init(void)
+{
+ return bus_register(&mcp_bus_type);
+}
+
+static void __exit mcp_exit(void)
+{
+ bus_unregister(&mcp_bus_type);
+}
+
+module_init(mcp_init);
+module_exit(mcp_exit);
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Core multimedia communications port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
new file mode 100644
index 00000000000..e9806fbbe69
--- /dev/null
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -0,0 +1,275 @@
+/*
+ * linux/drivers/mfd/mcp-sa11x0.c
+ *
+ * Copyright (C) 2001-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * SA11x0 MCP (Multimedia Communications Port) driver.
+ *
+ * MCP read/write timeouts from Jordi Colomer, rehacked by rmk.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include <asm/dma.h>
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/system.h>
+#include <asm/arch/mcp.h>
+
+#include <asm/arch/assabet.h>
+
+#include "mcp.h"
+
+struct mcp_sa11x0 {
+ u32 mccr0;
+ u32 mccr1;
+};
+
+#define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp))
+
+static void
+mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor)
+{
+ unsigned int mccr0;
+
+ divisor /= 32;
+
+ mccr0 = Ser4MCCR0 & ~0x00007f00;
+ mccr0 |= divisor << 8;
+ Ser4MCCR0 = mccr0;
+}
+
+static void
+mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
+{
+ unsigned int mccr0;
+
+ divisor /= 32;
+
+ mccr0 = Ser4MCCR0 & ~0x0000007f;
+ mccr0 |= divisor;
+ Ser4MCCR0 = mccr0;
+}
+
+/*
+ * Write data to the device. The bit should be set after 3 subframe
+ * times (each frame is 64 clocks). We wait a maximum of 6 subframes.
+ * We really should try doing something more productive while we
+ * wait.
+ */
+static void
+mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
+{
+ int ret = -ETIME;
+ int i;
+
+ Ser4MCDR2 = reg << 17 | MCDR2_Wr | (val & 0xffff);
+
+ for (i = 0; i < 2; i++) {
+ udelay(mcp->rw_timeout);
+ if (Ser4MCSR & MCSR_CWC) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ printk(KERN_WARNING "mcp: write timed out\n");
+}
+
+/*
+ * Read data from the device. The bit should be set after 3 subframe
+ * times (each frame is 64 clocks). We wait a maximum of 6 subframes.
+ * We really should try doing something more productive while we
+ * wait.
+ */
+static unsigned int
+mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
+{
+ int ret = -ETIME;
+ int i;
+
+ Ser4MCDR2 = reg << 17 | MCDR2_Rd;
+
+ for (i = 0; i < 2; i++) {
+ udelay(mcp->rw_timeout);
+ if (Ser4MCSR & MCSR_CRC) {
+ ret = Ser4MCDR2 & 0xffff;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ printk(KERN_WARNING "mcp: read timed out\n");
+
+ return ret;
+}
+
+static void mcp_sa11x0_enable(struct mcp *mcp)
+{
+ Ser4MCSR = -1;
+ Ser4MCCR0 |= MCCR0_MCE;
+}
+
+static void mcp_sa11x0_disable(struct mcp *mcp)
+{
+ Ser4MCCR0 &= ~MCCR0_MCE;
+}
+
+/*
+ * Our methods.
+ */
+static struct mcp_ops mcp_sa11x0 = {
+ .set_telecom_divisor = mcp_sa11x0_set_telecom_divisor,
+ .set_audio_divisor = mcp_sa11x0_set_audio_divisor,
+ .reg_write = mcp_sa11x0_write,
+ .reg_read = mcp_sa11x0_read,
+ .enable = mcp_sa11x0_enable,
+ .disable = mcp_sa11x0_disable,
+};
+
+static int mcp_sa11x0_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mcp_plat_data *data = pdev->dev.platform_data;
+ struct mcp *mcp;
+ int ret;
+
+ if (!data)
+ return -ENODEV;
+
+ if (!request_mem_region(0x80060000, 0x60, "sa11x0-mcp"))
+ return -EBUSY;
+
+ mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0));
+ if (!mcp) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ mcp->owner = THIS_MODULE;
+ mcp->ops = &mcp_sa11x0;
+ mcp->sclk_rate = data->sclk_rate;
+ mcp->dma_audio_rd = DMA_Ser4MCP0Rd;
+ mcp->dma_audio_wr = DMA_Ser4MCP0Wr;
+ mcp->dma_telco_rd = DMA_Ser4MCP1Rd;
+ mcp->dma_telco_wr = DMA_Ser4MCP1Wr;
+
+ dev_set_drvdata(dev, mcp);
+
+ if (machine_is_assabet()) {
+ ASSABET_BCR_set(ASSABET_BCR_CODEC_RST);
+ }
+
+ /*
+ * Setup the PPC unit correctly.
+ */
+ PPDR &= ~PPC_RXD4;
+ PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
+ PSDR |= PPC_RXD4;
+ PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
+ PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
+
+ /*
+ * Initialise device. Note that we initially
+ * set the sampling rate to minimum.
+ */
+ Ser4MCSR = -1;
+ Ser4MCCR1 = data->mccr1;
+ Ser4MCCR0 = data->mccr0 | 0x7f7f;
+
+ /*
+ * Calculate the read/write timeout (us) from the bit clock
+ * rate. This is the period for 3 64-bit frames. Always
+ * round this time up.
+ */
+ mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
+ mcp->sclk_rate;
+
+ ret = mcp_host_register(mcp);
+ if (ret == 0)
+ goto out;
+
+ release:
+ release_mem_region(0x80060000, 0x60);
+ dev_set_drvdata(dev, NULL);
+
+ out:
+ return ret;
+}
+
+static int mcp_sa11x0_remove(struct device *dev)
+{
+ struct mcp *mcp = dev_get_drvdata(dev);
+
+ dev_set_drvdata(dev, NULL);
+ mcp_host_unregister(mcp);
+ release_mem_region(0x80060000, 0x60);
+
+ return 0;
+}
+
+static int mcp_sa11x0_suspend(struct device *dev, pm_message_t state, u32 level)
+{
+ struct mcp *mcp = dev_get_drvdata(dev);
+
+ if (level == SUSPEND_DISABLE) {
+ priv(mcp)->mccr0 = Ser4MCCR0;
+ priv(mcp)->mccr1 = Ser4MCCR1;
+ Ser4MCCR0 &= ~MCCR0_MCE;
+ }
+ return 0;
+}
+
+static int mcp_sa11x0_resume(struct device *dev, u32 level)
+{
+ struct mcp *mcp = dev_get_drvdata(dev);
+
+ if (level == RESUME_RESTORE_STATE) {
+ Ser4MCCR1 = priv(mcp)->mccr1;
+ Ser4MCCR0 = priv(mcp)->mccr0;
+ }
+ return 0;
+}
+
+/*
+ * The driver for the SA11x0 MCP port.
+ */
+static struct device_driver mcp_sa11x0_driver = {
+ .name = "sa11x0-mcp",
+ .bus = &platform_bus_type,
+ .probe = mcp_sa11x0_probe,
+ .remove = mcp_sa11x0_remove,
+ .suspend = mcp_sa11x0_suspend,
+ .resume = mcp_sa11x0_resume,
+};
+
+/*
+ * This needs re-working
+ */
+static int __init mcp_sa11x0_init(void)
+{
+ return driver_register(&mcp_sa11x0_driver);
+}
+
+static void __exit mcp_sa11x0_exit(void)
+{
+ driver_unregister(&mcp_sa11x0_driver);
+}
+
+module_init(mcp_sa11x0_init);
+module_exit(mcp_sa11x0_exit);
+
+MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("SA11x0 multimedia communications port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mcp.h b/drivers/mfd/mcp.h
new file mode 100644
index 00000000000..c093a93b880
--- /dev/null
+++ b/drivers/mfd/mcp.h
@@ -0,0 +1,66 @@
+/*
+ * linux/drivers/mfd/mcp.h
+ *
+ * Copyright (C) 2001 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#ifndef MCP_H
+#define MCP_H
+
+struct mcp_ops;
+
+struct mcp {
+ struct module *owner;
+ struct mcp_ops *ops;
+ spinlock_t lock;
+ int use_count;
+ unsigned int sclk_rate;
+ unsigned int rw_timeout;
+ dma_device_t dma_audio_rd;
+ dma_device_t dma_audio_wr;
+ dma_device_t dma_telco_rd;
+ dma_device_t dma_telco_wr;
+ struct device attached_device;
+};
+
+struct mcp_ops {
+ void (*set_telecom_divisor)(struct mcp *, unsigned int);
+ void (*set_audio_divisor)(struct mcp *, unsigned int);
+ void (*reg_write)(struct mcp *, unsigned int, unsigned int);
+ unsigned int (*reg_read)(struct mcp *, unsigned int);
+ void (*enable)(struct mcp *);
+ void (*disable)(struct mcp *);
+};
+
+void mcp_set_telecom_divisor(struct mcp *, unsigned int);
+void mcp_set_audio_divisor(struct mcp *, unsigned int);
+void mcp_reg_write(struct mcp *, unsigned int, unsigned int);
+unsigned int mcp_reg_read(struct mcp *, unsigned int);
+void mcp_enable(struct mcp *);
+void mcp_disable(struct mcp *);
+#define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate)
+
+struct mcp *mcp_host_alloc(struct device *, size_t);
+int mcp_host_register(struct mcp *);
+void mcp_host_unregister(struct mcp *);
+
+struct mcp_driver {
+ struct device_driver drv;
+ int (*probe)(struct mcp *);
+ void (*remove)(struct mcp *);
+ int (*suspend)(struct mcp *, pm_message_t);
+ int (*resume)(struct mcp *);
+};
+
+int mcp_driver_register(struct mcp_driver *);
+void mcp_driver_unregister(struct mcp_driver *);
+
+#define mcp_get_drvdata(mcp) dev_get_drvdata(&(mcp)->attached_device)
+#define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d)
+
+#define mcp_priv(mcp) ((void *)((mcp)+1))
+
+#endif
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index eeb9f6668e6..3c5904834fe 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -361,7 +361,7 @@ static void mmc_decode_cid(struct mmc_card *card)
default:
printk("%s: card has unknown MMCA version %d\n",
- card->host->host_name, card->csd.mmca_vsn);
+ mmc_hostname(card->host), card->csd.mmca_vsn);
mmc_card_set_bad(card);
break;
}
@@ -383,7 +383,7 @@ static void mmc_decode_csd(struct mmc_card *card)
csd_struct = UNSTUFF_BITS(resp, 126, 2);
if (csd_struct != 1 && csd_struct != 2) {
printk("%s: unrecognised CSD structure version %d\n",
- card->host->host_name, csd_struct);
+ mmc_hostname(card->host), csd_struct);
mmc_card_set_bad(card);
return;
}
@@ -551,7 +551,7 @@ static void mmc_discover_cards(struct mmc_host *host)
}
if (err != MMC_ERR_NONE) {
printk(KERN_ERR "%s: error requesting CID: %d\n",
- host->host_name, err);
+ mmc_hostname(host), err);
break;
}
@@ -796,17 +796,13 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
{
struct mmc_host *host;
- host = kmalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
+ host = mmc_alloc_host_sysfs(extra, dev);
if (host) {
- memset(host, 0, sizeof(struct mmc_host) + extra);
-
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_LIST_HEAD(&host->cards);
INIT_WORK(&host->detect, mmc_rescan, host);
- host->dev = dev;
-
/*
* By default, hosts do not support SGIO or large requests.
* They have to set these according to their abilities.
@@ -828,15 +824,15 @@ EXPORT_SYMBOL(mmc_alloc_host);
*/
int mmc_add_host(struct mmc_host *host)
{
- static unsigned int host_num;
+ int ret;
- snprintf(host->host_name, sizeof(host->host_name),
- "mmc%d", host_num++);
-
- mmc_power_off(host);
- mmc_detect_change(host);
+ ret = mmc_add_host_sysfs(host);
+ if (ret == 0) {
+ mmc_power_off(host);
+ mmc_detect_change(host);
+ }
- return 0;
+ return ret;
}
EXPORT_SYMBOL(mmc_add_host);
@@ -859,6 +855,7 @@ void mmc_remove_host(struct mmc_host *host)
}
mmc_power_off(host);
+ mmc_remove_host_sysfs(host);
}
EXPORT_SYMBOL(mmc_remove_host);
@@ -872,7 +869,7 @@ EXPORT_SYMBOL(mmc_remove_host);
void mmc_free_host(struct mmc_host *host)
{
flush_scheduled_work();
- kfree(host);
+ mmc_free_host_sysfs(host);
}
EXPORT_SYMBOL(mmc_free_host);
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
index b498dffe0b1..97bae00292f 100644
--- a/drivers/mmc/mmc.h
+++ b/drivers/mmc/mmc.h
@@ -13,4 +13,9 @@
void mmc_init_card(struct mmc_card *card, struct mmc_host *host);
int mmc_register_card(struct mmc_card *card);
void mmc_remove_card(struct mmc_card *card);
+
+struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev);
+int mmc_add_host_sysfs(struct mmc_host *host);
+void mmc_remove_host_sysfs(struct mmc_host *host);
+void mmc_free_host_sysfs(struct mmc_host *host);
#endif
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index 5556cd3b555..ad8949810fc 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/idr.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -20,6 +21,7 @@
#define dev_to_mmc_card(d) container_of(d, struct mmc_card, dev)
#define to_mmc_driver(d) container_of(d, struct mmc_driver, drv)
+#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
#define MMC_ATTR(name, fmt, args...) \
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
@@ -206,7 +208,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
int mmc_register_card(struct mmc_card *card)
{
snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
- "%s:%04x", card->host->host_name, card->rca);
+ "%s:%04x", mmc_hostname(card->host), card->rca);
return device_add(&card->dev);
}
@@ -224,13 +226,97 @@ void mmc_remove_card(struct mmc_card *card)
}
+static void mmc_host_classdev_release(struct class_device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ kfree(host);
+}
+
+static struct class mmc_host_class = {
+ .name = "mmc_host",
+ .release = mmc_host_classdev_release,
+};
+
+static DEFINE_IDR(mmc_host_idr);
+static DEFINE_SPINLOCK(mmc_host_lock);
+
+/*
+ * Internal function. Allocate a new MMC host.
+ */
+struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev)
+{
+ struct mmc_host *host;
+
+ host = kmalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
+ if (host) {
+ memset(host, 0, sizeof(struct mmc_host) + extra);
+
+ host->dev = dev;
+ host->class_dev.dev = host->dev;
+ host->class_dev.class = &mmc_host_class;
+ class_device_initialize(&host->class_dev);
+ }
+
+ return host;
+}
+
+/*
+ * Internal function. Register a new MMC host with the MMC class.
+ */
+int mmc_add_host_sysfs(struct mmc_host *host)
+{
+ int err;
+
+ if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock(&mmc_host_lock);
+ err = idr_get_new(&mmc_host_idr, host, &host->index);
+ spin_unlock(&mmc_host_lock);
+ if (err)
+ return err;
+
+ snprintf(host->class_dev.class_id, BUS_ID_SIZE,
+ "mmc%d", host->index);
+
+ return class_device_add(&host->class_dev);
+}
+
+/*
+ * Internal function. Unregister a MMC host with the MMC class.
+ */
+void mmc_remove_host_sysfs(struct mmc_host *host)
+{
+ class_device_del(&host->class_dev);
+
+ spin_lock(&mmc_host_lock);
+ idr_remove(&mmc_host_idr, host->index);
+ spin_unlock(&mmc_host_lock);
+}
+
+/*
+ * Internal function. Free a MMC host.
+ */
+void mmc_free_host_sysfs(struct mmc_host *host)
+{
+ class_device_put(&host->class_dev);
+}
+
+
static int __init mmc_init(void)
{
- return bus_register(&mmc_bus_type);
+ int ret = bus_register(&mmc_bus_type);
+ if (ret == 0) {
+ ret = class_register(&mmc_host_class);
+ if (ret)
+ bus_unregister(&mmc_bus_type);
+ }
+ return ret;
}
static void __exit mmc_exit(void)
{
+ class_unregister(&mmc_host_class);
bus_unregister(&mmc_bus_type);
}
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 7a42966d755..716c4ef4faf 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -34,7 +34,7 @@
#ifdef CONFIG_MMC_DEBUG
#define DBG(host,fmt,args...) \
- pr_debug("%s: %s: " fmt, host->mmc->host_name, __func__ , args)
+ pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
#else
#define DBG(host,fmt,args...) do { } while (0)
#endif
@@ -541,7 +541,7 @@ static int mmci_probe(struct amba_device *dev, void *id)
mmc_add_host(mmc);
printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%08lx irq %d,%d\n",
- mmc->host_name, amba_rev(dev), amba_config(dev),
+ mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
dev->res.start, dev->irq[0], dev->irq[1]);
init_timer(&host->timer);
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 974f2f36bdb..402c2d661fb 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1796,7 +1796,7 @@ static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
mmc_add_host(mmc);
- printk(KERN_INFO "%s: W83L51xD", mmc->host_name);
+ printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
if (host->chip_id != 0)
printk(" id %x", (int)host->chip_id);
printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8edb6936fb9..79e8aa6f2b9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -131,6 +131,8 @@ config NET_SB1000
source "drivers/net/arcnet/Kconfig"
+source "drivers/net/phy/Kconfig"
+
#
# Ethernet
#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 63c6d1e6d4d..a369ae284a9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
#
obj-$(CONFIG_MII) += mii.o
+obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_SUNDANCE) += sundance.o
obj-$(CONFIG_HAMACHI) += hamachi.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 3707df6b0cf..60304f7e7e5 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -87,7 +87,6 @@ extern struct net_device *mvme147lance_probe(int unit);
extern struct net_device *tc515_probe(int unit);
extern struct net_device *lance_probe(int unit);
extern struct net_device *mace_probe(int unit);
-extern struct net_device *macsonic_probe(int unit);
extern struct net_device *mac8390_probe(int unit);
extern struct net_device *mac89x0_probe(int unit);
extern struct net_device *mc32_probe(int unit);
@@ -284,9 +283,6 @@ static struct devprobe2 m68k_probes[] __initdata = {
#ifdef CONFIG_MACMACE /* Mac 68k Quadra AV builtin Ethernet */
{mace_probe, 0},
#endif
-#ifdef CONFIG_MACSONIC /* Mac SONIC-based Ethernet of all sorts */
- {macsonic_probe, 0},
-#endif
#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */
{mac8390_probe, 0},
#endif
@@ -318,17 +314,9 @@ static void __init ethif_probe2(int unit)
#ifdef CONFIG_TR
/* Token-ring device probe */
extern int ibmtr_probe_card(struct net_device *);
-extern struct net_device *sk_isa_probe(int unit);
-extern struct net_device *proteon_probe(int unit);
extern struct net_device *smctr_probe(int unit);
static struct devprobe2 tr_probes2[] __initdata = {
-#ifdef CONFIG_SKISA
- {sk_isa_probe, 0},
-#endif
-#ifdef CONFIG_PROTEON
- {proteon_probe, 0},
-#endif
#ifdef CONFIG_SMCTR
{smctr_probe, 0},
#endif
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5ce606d9dc0..19e829b567d 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1106,18 +1106,13 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
}
}
- if (found) {
- /* a slave was found that is using the mac address
- * of the new slave
- */
- printk(KERN_ERR DRV_NAME
- ": Error: the hw address of slave %s is not "
- "unique - cannot enslave it!",
- slave->dev->name);
- return -EINVAL;
- }
+ if (!found)
+ return 0;
- return 0;
+ /* Try setting slave mac to bond address and fall-through
+ to code handling that situation below... */
+ alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
+ bond->alb_info.rlb_enabled);
}
/* The slave's address is equal to the address of the bond.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2c930da90a8..94c9f68dd16 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1604,6 +1604,44 @@ static int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_
return 0;
}
+#define BOND_INTERSECT_FEATURES \
+ (NETIF_F_SG|NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)
+
+/*
+ * Compute the features available to the bonding device by
+ * intersection of all of the slave devices' BOND_INTERSECT_FEATURES.
+ * Call this after attaching or detaching a slave to update the
+ * bond's features.
+ */
+static int bond_compute_features(struct bonding *bond)
+{
+ int i;
+ struct slave *slave;
+ struct net_device *bond_dev = bond->dev;
+ int features = bond->bond_features;
+
+ bond_for_each_slave(bond, slave, i) {
+ struct net_device * slave_dev = slave->dev;
+ if (i == 0) {
+ features |= BOND_INTERSECT_FEATURES;
+ }
+ features &=
+ ~(~slave_dev->features & BOND_INTERSECT_FEATURES);
+ }
+
+ /* turn off NETIF_F_SG if we need a csum and h/w can't do it */
+ if ((features & NETIF_F_SG) &&
+ !(features & (NETIF_F_IP_CSUM |
+ NETIF_F_NO_CSUM |
+ NETIF_F_HW_CSUM))) {
+ features &= ~NETIF_F_SG;
+ }
+
+ bond_dev->features = features;
+
+ return 0;
+}
+
/* enslave device <slave> to bond device <master> */
static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@@ -1811,6 +1849,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
new_slave->delay = 0;
new_slave->link_failure_count = 0;
+ bond_compute_features(bond);
+
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -2015,7 +2055,7 @@ err_free:
err_undo_flags:
bond_dev->features = old_features;
-
+
return res;
}
@@ -2100,6 +2140,8 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
/* release the slave from its bond */
bond_detach_slave(bond, slave);
+ bond_compute_features(bond);
+
if (bond->primary_slave == slave) {
bond->primary_slave = NULL;
}
@@ -2243,6 +2285,8 @@ static int bond_release_all(struct net_device *bond_dev)
bond_alb_deinit_slave(bond, slave);
}
+ bond_compute_features(bond);
+
/* now that the slave is detached, unlock and perform
* all the undo steps that should not be called from
* within a lock.
@@ -3588,6 +3632,7 @@ static int bond_master_netdev_event(unsigned long event, struct net_device *bond
static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev)
{
struct net_device *bond_dev = slave_dev->master;
+ struct bonding *bond = bond_dev->priv;
switch (event) {
case NETDEV_UNREGISTER:
@@ -3626,6 +3671,9 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
* TODO: handle changing the primary's name
*/
break;
+ case NETDEV_FEAT_CHANGE:
+ bond_compute_features(bond);
+ break;
default:
break;
}
@@ -4526,6 +4574,11 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
}
}
+static struct ethtool_ops bond_ethtool_ops = {
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+};
+
/*
* Does not allocate but creates a /proc entry.
* Allowed to fail.
@@ -4555,6 +4608,7 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
bond_dev->stop = bond_close;
bond_dev->get_stats = bond_get_stats;
bond_dev->do_ioctl = bond_do_ioctl;
+ bond_dev->ethtool_ops = &bond_ethtool_ops;
bond_dev->set_multicast_list = bond_set_multicast_list;
bond_dev->change_mtu = bond_change_mtu;
bond_dev->set_mac_address = bond_set_mac_address;
@@ -4591,6 +4645,8 @@ static int __init bond_init(struct net_device *bond_dev, struct bond_params *par
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER);
+ bond->bond_features = bond_dev->features;
+
#ifdef CONFIG_PROC_FS
bond_create_proc_entry(bond);
#endif
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index d27f377b3ee..38819698086 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -211,6 +211,9 @@ struct bonding {
struct bond_params params;
struct list_head vlan_list;
struct vlan_group *vlgrp;
+ /* the features the bonding device supports, independently
+ * of any slaves */
+ int bond_features;
};
/**
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b82fd15d089..9b596e0bbf9 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2767,7 +2767,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
" next_to_use <%x>\n"
" next_to_clean <%x>\n"
"buffer_info[next_to_clean]\n"
- " dma <%zx>\n"
+ " dma <%llx>\n"
" time_stamp <%lx>\n"
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
@@ -2776,7 +2776,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
E1000_READ_REG(&adapter->hw, TDT),
tx_ring->next_to_use,
i,
- tx_ring->buffer_info[i].dma,
+ (unsigned long long)tx_ring->buffer_info[i].dma,
tx_ring->buffer_info[i].time_stamp,
eop,
jiffies,
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 1795425f512..8c62ced2c9b 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1263,8 +1263,8 @@ speedo_init_rx_ring(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
- /* XXX: do we really want to call this before the NULL check? --hch */
- rx_align(skb); /* Align IP on 16 byte boundary */
+ if (skb)
+ rx_align(skb); /* Align IP on 16 byte boundary */
sp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* OK. Just initially short of Rx bufs. */
@@ -1654,8 +1654,8 @@ static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
struct sk_buff *skb;
/* Get a fresh skbuff to replace the consumed one. */
skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
- /* XXX: do we really want to call this before the NULL check? --hch */
- rx_align(skb); /* Align IP on 16 byte boundary */
+ if (skb)
+ rx_align(skb); /* Align IP on 16 byte boundary */
sp->rx_skbuff[entry] = skb;
if (skb == NULL) {
sp->rx_ringp[entry] = NULL;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 64f0f697c95..7d93948aec8 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -85,6 +85,16 @@
* 0.33: 16 May 2005: Support for MCP51 added.
* 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
* 0.35: 26 Jun 2005: Support for MCP55 added.
+ * 0.36: 28 Jun 2005: Add jumbo frame support.
+ * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
+ * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
+ * per-packet flags.
+ * 0.39: 18 Jul 2005: Add 64bit descriptor support.
+ * 0.40: 19 Jul 2005: Add support for mac address change.
+ * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
+ * of nv_remove
+ * 0.42: 06 Aug 2005: Fix lack of link speed initialization
+ * in the second (and later) nv_open call
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -96,7 +106,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.35"
+#define FORCEDETH_VERSION "0.41"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -131,11 +141,10 @@
* Hardware access:
*/
-#define DEV_NEED_LASTPACKET1 0x0001 /* set LASTPACKET1 in tx flags */
-#define DEV_IRQMASK_1 0x0002 /* use NVREG_IRQMASK_WANTED_1 for irq mask */
-#define DEV_IRQMASK_2 0x0004 /* use NVREG_IRQMASK_WANTED_2 for irq mask */
-#define DEV_NEED_TIMERIRQ 0x0008 /* set the timer irq flag in the irq mask */
-#define DEV_NEED_LINKTIMER 0x0010 /* poll link settings. Relies on the timer irq */
+#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
+#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
+#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
+#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
enum {
NvRegIrqStatus = 0x000,
@@ -146,13 +155,16 @@ enum {
#define NVREG_IRQ_RX 0x0002
#define NVREG_IRQ_RX_NOBUF 0x0004
#define NVREG_IRQ_TX_ERR 0x0008
-#define NVREG_IRQ_TX2 0x0010
+#define NVREG_IRQ_TX_OK 0x0010
#define NVREG_IRQ_TIMER 0x0020
#define NVREG_IRQ_LINK 0x0040
+#define NVREG_IRQ_TX_ERROR 0x0080
#define NVREG_IRQ_TX1 0x0100
-#define NVREG_IRQMASK_WANTED_1 0x005f
-#define NVREG_IRQMASK_WANTED_2 0x0147
-#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1))
+#define NVREG_IRQMASK_WANTED 0x00df
+
+#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
+ NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
+ NVREG_IRQ_TX1))
NvRegUnknownSetupReg6 = 0x008,
#define NVREG_UNKSETUP6_VAL 3
@@ -286,6 +298,18 @@ struct ring_desc {
u32 FlagLen;
};
+struct ring_desc_ex {
+ u32 PacketBufferHigh;
+ u32 PacketBufferLow;
+ u32 Reserved;
+ u32 FlagLen;
+};
+
+typedef union _ring_type {
+ struct ring_desc* orig;
+ struct ring_desc_ex* ex;
+} ring_type;
+
#define FLAG_MASK_V1 0xffff0000
#define FLAG_MASK_V2 0xffffc000
#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
@@ -293,7 +317,7 @@ struct ring_desc {
#define NV_TX_LASTPACKET (1<<16)
#define NV_TX_RETRYERROR (1<<19)
-#define NV_TX_LASTPACKET1 (1<<24)
+#define NV_TX_FORCED_INTERRUPT (1<<24)
#define NV_TX_DEFERRED (1<<26)
#define NV_TX_CARRIERLOST (1<<27)
#define NV_TX_LATECOLLISION (1<<28)
@@ -303,7 +327,7 @@ struct ring_desc {
#define NV_TX2_LASTPACKET (1<<29)
#define NV_TX2_RETRYERROR (1<<18)
-#define NV_TX2_LASTPACKET1 (1<<23)
+#define NV_TX2_FORCED_INTERRUPT (1<<30)
#define NV_TX2_DEFERRED (1<<25)
#define NV_TX2_CARRIERLOST (1<<26)
#define NV_TX2_LATECOLLISION (1<<27)
@@ -379,9 +403,13 @@ struct ring_desc {
#define TX_LIMIT_START 62
/* rx/tx mac addr + type + vlan + align + slack*/
-#define RX_NIC_BUFSIZE (ETH_DATA_LEN + 64)
-/* even more slack */
-#define RX_ALLOC_BUFSIZE (ETH_DATA_LEN + 128)
+#define NV_RX_HEADERS (64)
+/* even more slack. */
+#define NV_RX_ALLOC_PAD (64)
+
+/* maximum mtu size */
+#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
+#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
#define OOM_REFILL (1+HZ/20)
#define POLL_WAIT (1+HZ/100)
@@ -396,6 +424,7 @@ struct ring_desc {
*/
#define DESC_VER_1 0x0
#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK)
+#define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK)
/* PHY defines */
#define PHY_OUI_MARVELL 0x5043
@@ -468,11 +497,12 @@ struct fe_priv {
/* rx specific fields.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
- struct ring_desc *rx_ring;
+ ring_type rx_ring;
unsigned int cur_rx, refill_rx;
struct sk_buff *rx_skbuff[RX_RING];
dma_addr_t rx_dma[RX_RING];
unsigned int rx_buf_sz;
+ unsigned int pkt_limit;
struct timer_list oom_kick;
struct timer_list nic_poll;
@@ -484,7 +514,7 @@ struct fe_priv {
/*
* tx specific fields.
*/
- struct ring_desc *tx_ring;
+ ring_type tx_ring;
unsigned int next_tx, nic_tx;
struct sk_buff *tx_skbuff[TX_RING];
dma_addr_t tx_dma[TX_RING];
@@ -519,6 +549,11 @@ static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
}
+static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
+{
+ return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
+}
+
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
int delay, int delaymax, const char *msg)
{
@@ -792,7 +827,7 @@ static int nv_alloc_rx(struct net_device *dev)
nr = refill_rx % RX_RING;
if (np->rx_skbuff[nr] == NULL) {
- skb = dev_alloc_skb(RX_ALLOC_BUFSIZE);
+ skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (!skb)
break;
@@ -803,9 +838,16 @@ static int nv_alloc_rx(struct net_device *dev)
}
np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
PCI_DMA_FROMDEVICE);
- np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
- wmb();
- np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
+ wmb();
+ np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+ } else {
+ np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
+ np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
+ wmb();
+ np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
+ }
dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
dev->name, refill_rx);
refill_rx++;
@@ -831,19 +873,37 @@ static void nv_do_rx_refill(unsigned long data)
enable_irq(dev->irq);
}
-static int nv_init_ring(struct net_device *dev)
+static void nv_init_rx(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
int i;
- np->next_tx = np->nic_tx = 0;
- for (i = 0; i < TX_RING; i++)
- np->tx_ring[i].FlagLen = 0;
-
np->cur_rx = RX_RING;
np->refill_rx = 0;
for (i = 0; i < RX_RING; i++)
- np->rx_ring[i].FlagLen = 0;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->rx_ring.orig[i].FlagLen = 0;
+ else
+ np->rx_ring.ex[i].FlagLen = 0;
+}
+
+static void nv_init_tx(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+
+ np->next_tx = np->nic_tx = 0;
+ for (i = 0; i < TX_RING; i++)
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->tx_ring.orig[i].FlagLen = 0;
+ else
+ np->tx_ring.ex[i].FlagLen = 0;
+}
+
+static int nv_init_ring(struct net_device *dev)
+{
+ nv_init_tx(dev);
+ nv_init_rx(dev);
return nv_alloc_rx(dev);
}
@@ -852,7 +912,10 @@ static void nv_drain_tx(struct net_device *dev)
struct fe_priv *np = get_nvpriv(dev);
int i;
for (i = 0; i < TX_RING; i++) {
- np->tx_ring[i].FlagLen = 0;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->tx_ring.orig[i].FlagLen = 0;
+ else
+ np->tx_ring.ex[i].FlagLen = 0;
if (np->tx_skbuff[i]) {
pci_unmap_single(np->pci_dev, np->tx_dma[i],
np->tx_skbuff[i]->len,
@@ -869,7 +932,10 @@ static void nv_drain_rx(struct net_device *dev)
struct fe_priv *np = get_nvpriv(dev);
int i;
for (i = 0; i < RX_RING; i++) {
- np->rx_ring[i].FlagLen = 0;
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->rx_ring.orig[i].FlagLen = 0;
+ else
+ np->rx_ring.ex[i].FlagLen = 0;
wmb();
if (np->rx_skbuff[i]) {
pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -900,11 +966,19 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
PCI_DMA_TODEVICE);
- np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
+ else {
+ np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+ np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+ }
spin_lock_irq(&np->lock);
wmb();
- np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
+ else
+ np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
dev->name, np->next_tx);
{
@@ -942,7 +1016,10 @@ static void nv_tx_done(struct net_device *dev)
while (np->nic_tx != np->next_tx) {
i = np->nic_tx % TX_RING;
- Flags = le32_to_cpu(np->tx_ring[i].FlagLen);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
+ else
+ Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
dev->name, np->nic_tx, Flags);
@@ -993,9 +1070,56 @@ static void nv_tx_timeout(struct net_device *dev)
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name,
+ printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
+ {
+ int i;
+
+ printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
+ dev->name, (unsigned long)np->ring_addr,
+ np->next_tx, np->nic_tx);
+ printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
+ for (i=0;i<0x400;i+= 32) {
+ printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ i,
+ readl(base + i + 0), readl(base + i + 4),
+ readl(base + i + 8), readl(base + i + 12),
+ readl(base + i + 16), readl(base + i + 20),
+ readl(base + i + 24), readl(base + i + 28));
+ }
+ printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
+ for (i=0;i<TX_RING;i+= 4) {
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
+ le32_to_cpu(np->tx_ring.orig[i].FlagLen),
+ le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
+ le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
+ le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
+ le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
+ le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
+ le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
+ } else {
+ printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
+ le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
+ le32_to_cpu(np->tx_ring.ex[i].FlagLen),
+ le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
+ le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
+ le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
+ le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
+ le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
+ le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
+ }
+ }
+ }
+
spin_lock_irq(&np->lock);
/* 1) stop tx engine */
@@ -1009,7 +1133,10 @@ static void nv_tx_timeout(struct net_device *dev)
printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
nv_drain_tx(dev);
np->next_tx = np->nic_tx = 0;
- writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ else
+ writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
netif_wake_queue(dev);
}
@@ -1084,8 +1211,13 @@ static void nv_rx_process(struct net_device *dev)
break; /* we scanned the whole ring - do not continue */
i = np->cur_rx % RX_RING;
- Flags = le32_to_cpu(np->rx_ring[i].FlagLen);
- len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
+ len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
+ } else {
+ Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
+ len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
+ }
dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
dev->name, np->cur_rx, Flags);
@@ -1207,15 +1339,133 @@ next_pkt:
}
}
+static void set_bufsize(struct net_device *dev)
+{
+ struct fe_priv *np = netdev_priv(dev);
+
+ if (dev->mtu <= ETH_DATA_LEN)
+ np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
+ else
+ np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
+}
+
/*
* nv_change_mtu: dev->change_mtu function
* Called with dev_base_lock held for read.
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
- if (new_mtu > ETH_DATA_LEN)
+ struct fe_priv *np = get_nvpriv(dev);
+ int old_mtu;
+
+ if (new_mtu < 64 || new_mtu > np->pkt_limit)
return -EINVAL;
+
+ old_mtu = dev->mtu;
dev->mtu = new_mtu;
+
+ /* return early if the buffer sizes will not change */
+ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+ return 0;
+ if (old_mtu == new_mtu)
+ return 0;
+
+ /* synchronized against open : rtnl_lock() held by caller */
+ if (netif_running(dev)) {
+ u8 *base = get_hwbase(dev);
+ /*
+ * It seems that the nic preloads valid ring entries into an
+ * internal buffer. The procedure for flushing everything is
+ * guessed, there is probably a simpler approach.
+ * Changing the MTU is a rare event, it shouldn't matter.
+ */
+ disable_irq(dev->irq);
+ spin_lock_bh(&dev->xmit_lock);
+ spin_lock(&np->lock);
+ /* stop engines */
+ nv_stop_rx(dev);
+ nv_stop_tx(dev);
+ nv_txrx_reset(dev);
+ /* drain rx queue */
+ nv_drain_rx(dev);
+ nv_drain_tx(dev);
+ /* reinit driver view of the rx queue */
+ nv_init_rx(dev);
+ nv_init_tx(dev);
+ /* alloc new rx buffers */
+ set_bufsize(dev);
+ if (nv_alloc_rx(dev)) {
+ if (!np->in_shutdown)
+ mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+ }
+ /* reinit nic view of the rx queue */
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+ writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ else
+ writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
+ writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+ base + NvRegRingSizes);
+ pci_push(base);
+ writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
+ pci_push(base);
+
+ /* restart rx engine */
+ nv_start_rx(dev);
+ nv_start_tx(dev);
+ spin_unlock(&np->lock);
+ spin_unlock_bh(&dev->xmit_lock);
+ enable_irq(dev->irq);
+ }
+ return 0;
+}
+
+static void nv_copy_mac_to_hw(struct net_device *dev)
+{
+ u8 *base = get_hwbase(dev);
+ u32 mac[2];
+
+ mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+ (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+ mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
+
+ writel(mac[0], base + NvRegMacAddrA);
+ writel(mac[1], base + NvRegMacAddrB);
+}
+
+/*
+ * nv_set_mac_address: dev->set_mac_address function
+ * Called with rtnl_lock() held.
+ */
+static int nv_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ struct sockaddr *macaddr = (struct sockaddr*)addr;
+
+ if(!is_valid_ether_addr(macaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* synchronized against open : rtnl_lock() held by caller */
+ memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
+
+ if (netif_running(dev)) {
+ spin_lock_bh(&dev->xmit_lock);
+ spin_lock_irq(&np->lock);
+
+ /* stop rx engine */
+ nv_stop_rx(dev);
+
+ /* set mac address */
+ nv_copy_mac_to_hw(dev);
+
+ /* restart rx engine */
+ nv_start_rx(dev);
+ spin_unlock_irq(&np->lock);
+ spin_unlock_bh(&dev->xmit_lock);
+ } else {
+ nv_copy_mac_to_hw(dev);
+ }
return 0;
}
@@ -1470,7 +1720,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
if (!(events & np->irqmask))
break;
- if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) {
+ if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) {
spin_lock(&np->lock);
nv_tx_done(dev);
spin_unlock(&np->lock);
@@ -1761,6 +2011,50 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return 0;
}
+#define FORCEDETH_REGS_VER 1
+#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
+
+static int nv_get_regs_len(struct net_device *dev)
+{
+ return FORCEDETH_REGS_SIZE;
+}
+
+static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 *rbuf = buf;
+ int i;
+
+ regs->version = FORCEDETH_REGS_VER;
+ spin_lock_irq(&np->lock);
+ for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
+ rbuf[i] = readl(base + i*sizeof(u32));
+ spin_unlock_irq(&np->lock);
+}
+
+static int nv_nway_reset(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int ret;
+
+ spin_lock_irq(&np->lock);
+ if (np->autoneg) {
+ int bmcr;
+
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irq(&np->lock);
+
+ return ret;
+}
+
static struct ethtool_ops ops = {
.get_drvinfo = nv_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1768,6 +2062,9 @@ static struct ethtool_ops ops = {
.set_wol = nv_set_wol,
.get_settings = nv_get_settings,
.set_settings = nv_set_settings,
+ .get_regs_len = nv_get_regs_len,
+ .get_regs = nv_get_regs,
+ .nway_reset = nv_nway_reset,
};
static int nv_open(struct net_device *dev)
@@ -1792,6 +2089,7 @@ static int nv_open(struct net_device *dev)
writel(0, base + NvRegAdapterControl);
/* 2) initialize descriptor rings */
+ set_bufsize(dev);
oom = nv_init_ring(dev);
writel(0, base + NvRegLinkSpeed);
@@ -1802,20 +2100,14 @@ static int nv_open(struct net_device *dev)
np->in_shutdown = 0;
/* 3) set mac address */
- {
- u32 mac[2];
-
- mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
- (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
- mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
-
- writel(mac[0], base + NvRegMacAddrA);
- writel(mac[1], base + NvRegMacAddrB);
- }
+ nv_copy_mac_to_hw(dev);
/* 4) give hw rings */
writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
- writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+ else
+ writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
base + NvRegRingSizes);
@@ -1837,7 +2129,7 @@ static int nv_open(struct net_device *dev)
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
- writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig);
+ writel(np->rx_buf_sz, base + NvRegOffloadConfig);
writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
get_random_bytes(&i, sizeof(i));
@@ -1888,6 +2180,9 @@ static int nv_open(struct net_device *dev)
writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
}
+ /* set linkspeed to invalid value, thus force nv_update_linkspeed
+ * to init hw */
+ np->linkspeed = 0;
ret = nv_update_linkspeed(dev);
nv_start_rx(dev);
nv_start_tx(dev);
@@ -1942,6 +2237,12 @@ static int nv_close(struct net_device *dev)
if (np->wolenabled)
nv_start_rx(dev);
+ /* special op: write back the misordered MAC address - otherwise
+ * the next nv_probe would see a wrong address.
+ */
+ writel(np->orig_mac[0], base + NvRegMacAddrA);
+ writel(np->orig_mac[1], base + NvRegMacAddrB);
+
/* FIXME: power down nic */
return 0;
@@ -2006,32 +2307,55 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
}
/* handle different descriptor versions */
- if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 ||
- pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 ||
- pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 ||
- pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
- pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13)
- np->desc_ver = DESC_VER_1;
- else
+ if (id->driver_data & DEV_HAS_HIGH_DMA) {
+ /* packet format 3: supports 40-bit addressing */
+ np->desc_ver = DESC_VER_3;
+ if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
+ printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
+ pci_name(pci_dev));
+ }
+ } else if (id->driver_data & DEV_HAS_LARGEDESC) {
+ /* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2;
+ } else {
+ /* original packet format */
+ np->desc_ver = DESC_VER_1;
+ }
+
+ np->pkt_limit = NV_PKTLIMIT_1;
+ if (id->driver_data & DEV_HAS_LARGEDESC)
+ np->pkt_limit = NV_PKTLIMIT_2;
err = -ENOMEM;
np->base = ioremap(addr, NV_PCI_REGSZ);
if (!np->base)
goto out_relreg;
dev->base_addr = (unsigned long)np->base;
+
dev->irq = pci_dev->irq;
- np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
- &np->ring_addr);
- if (!np->rx_ring)
- goto out_unmap;
- np->tx_ring = &np->rx_ring[RX_RING];
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->rx_ring.orig = pci_alloc_consistent(pci_dev,
+ sizeof(struct ring_desc) * (RX_RING + TX_RING),
+ &np->ring_addr);
+ if (!np->rx_ring.orig)
+ goto out_unmap;
+ np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
+ } else {
+ np->rx_ring.ex = pci_alloc_consistent(pci_dev,
+ sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
+ &np->ring_addr);
+ if (!np->rx_ring.ex)
+ goto out_unmap;
+ np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
+ }
dev->open = nv_open;
dev->stop = nv_close;
dev->hard_start_xmit = nv_start_xmit;
dev->get_stats = nv_get_stats;
dev->change_mtu = nv_change_mtu;
+ dev->set_mac_address = nv_set_mac_address;
dev->set_multicast_list = nv_set_multicast;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = nv_poll_controller;
@@ -2080,17 +2404,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (np->desc_ver == DESC_VER_1) {
np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
- if (id->driver_data & DEV_NEED_LASTPACKET1)
- np->tx_flags |= NV_TX_LASTPACKET1;
} else {
np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
- if (id->driver_data & DEV_NEED_LASTPACKET1)
- np->tx_flags |= NV_TX2_LASTPACKET1;
}
- if (id->driver_data & DEV_IRQMASK_1)
- np->irqmask = NVREG_IRQMASK_WANTED_1;
- if (id->driver_data & DEV_IRQMASK_2)
- np->irqmask = NVREG_IRQMASK_WANTED_2;
+ np->irqmask = NVREG_IRQMASK_WANTED;
if (id->driver_data & DEV_NEED_TIMERIRQ)
np->irqmask |= NVREG_IRQ_TIMER;
if (id->driver_data & DEV_NEED_LINKTIMER) {
@@ -2155,8 +2472,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
return 0;
out_freering:
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
- np->rx_ring, np->ring_addr);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
+ np->rx_ring.orig, np->ring_addr);
+ else
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
+ np->rx_ring.ex, np->ring_addr);
pci_set_drvdata(pci_dev, NULL);
out_unmap:
iounmap(get_hwbase(dev));
@@ -2174,18 +2495,14 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
struct fe_priv *np = get_nvpriv(dev);
- u8 __iomem *base = get_hwbase(dev);
unregister_netdev(dev);
- /* special op: write back the misordered MAC address - otherwise
- * the next nv_probe would see a wrong address.
- */
- writel(np->orig_mac[0], base + NvRegMacAddrA);
- writel(np->orig_mac[1], base + NvRegMacAddrB);
-
/* free all structures */
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr);
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
+ else
+ pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
iounmap(get_hwbase(dev));
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
@@ -2195,109 +2512,64 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
static struct pci_device_id pci_tbl[] = {
{ /* nForce Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_1,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce2 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_2,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce3 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_3,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
},
{ /* nForce3 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
},
{ /* nForce3 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_5,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
},
{ /* nForce3 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_6,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
},
{ /* nForce3 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_7,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
},
{ /* CK804 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_8,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
},
{ /* CK804 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_9,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
},
{ /* MCP04 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_10,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
},
{ /* MCP04 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_11,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
},
{ /* MCP51 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_12,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
},
{ /* MCP51 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_13,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
},
{ /* MCP55 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_14,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
},
{ /* MCP55 Ethernet Controller */
- .vendor = PCI_VENDOR_ID_NVIDIA,
- .device = PCI_DEVICE_ID_NVIDIA_NVENET_15,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
},
{0,},
};
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 0cd54306e63..de087cd609d 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -1,6 +1,6 @@
config MKISS
tristate "Serial port KISS driver"
- depends on AX25 && BROKEN_ON_SMP
+ depends on AX25
---help---
KISS is a protocol used for the exchange of data between a computer
and a Terminal Node Controller (a small embedded system commonly
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a7f15d9f13e..5298096afbd 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -54,6 +54,7 @@
#include <linux/kmod.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
+#include <linux/jiffies.h>
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
/* prototypes for ax25_encapsulate and ax25_rebuild_header */
#include <net/ax25.h>
@@ -287,7 +288,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
* measure the interrupt frequency
*/
bc->debug_vals.cur_intcnt++;
- if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+ if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
bc->debug_vals.last_jiffies = cur_jiffies;
bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 612ad452bee..3b1bef1ee21 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -84,6 +84,7 @@
#include <linux/baycom.h>
#include <linux/parport.h>
#include <linux/bitops.h>
+#include <linux/jiffies.h>
#include <asm/bug.h>
#include <asm/system.h>
@@ -165,7 +166,7 @@ static void __inline__ baycom_int_freq(struct baycom_state *bc)
* measure the interrupt frequency
*/
bc->debug_vals.cur_intcnt++;
- if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+ if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
bc->debug_vals.last_jiffies = cur_jiffies;
bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 25f270b0537..232793d2ce6 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -79,6 +79,7 @@
#include <asm/io.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
+#include <linux/jiffies.h>
/* --------------------------------------------------------------------- */
@@ -159,7 +160,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
* measure the interrupt frequency
*/
bc->debug_vals.cur_intcnt++;
- if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+ if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
bc->debug_vals.last_jiffies = cur_jiffies;
bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index eead85d0096..be596a3eb3f 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -69,6 +69,7 @@
#include <asm/io.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
+#include <linux/jiffies.h>
/* --------------------------------------------------------------------- */
@@ -150,7 +151,7 @@ static inline void baycom_int_freq(struct baycom_state *bc)
* measure the interrupt frequency
*/
bc->debug_vals.cur_intcnt++;
- if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+ if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
bc->debug_vals.last_jiffies = cur_jiffies;
bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3035422f5ad..63b1a2b86ac 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -1,30 +1,19 @@
/*
- * MKISS Driver
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
*
- * This module:
- * This module is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
*
- * This module implements the AX.25 protocol for kernel-based
- * devices like TTYs. It interfaces between a raw TTY, and the
- * kernel's AX.25 protocol layers, just like slip.c.
- * AX.25 needs to be separated from slip.c while slip.c is no
- * longer a static kernel device since it is a module.
- * This method clears the way to implement other kiss protocols
- * like mkiss smack g8bpq ..... so far only mkiss is implemented.
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
- * Hans Alblas <hans@esrac.ele.tue.nl>
- *
- * History
- * Jonathan (G4KLX) Fixed to match Linux networking changes - 2.1.15.
- * Matthias (DG2FEF) Added support for FlexNet CRC (on special request)
- * Fixed bug in ax25_close(): dev_lock_wait() was
- * called twice, causing a deadlock.
- * Jeroen (PE1RXQ) Removed old MKISS_MAGIC stuff and calls to
- * MOD_*_USE_COUNT
- * Remove cli() and fix rtnl lock usage.
+ * Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
+ * Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
*/
#include <linux/config.h>
@@ -46,177 +35,300 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
+#include <linux/jiffies.h>
#include <net/ax25.h>
-#include "mkiss.h"
-
#ifdef CONFIG_INET
#include <linux/ip.h>
#include <linux/tcp.h>
#endif
-static char banner[] __initdata = KERN_INFO "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n";
-
-typedef struct ax25_ctrl {
- struct ax_disp ctrl; /* */
- struct net_device dev; /* the device */
-} ax25_ctrl_t;
-
-static ax25_ctrl_t **ax25_ctrls;
-
-int ax25_maxdev = AX25_MAXDEV; /* Can be overridden with insmod! */
-
-static struct tty_ldisc ax_ldisc;
-
-static int ax25_init(struct net_device *);
-static int kiss_esc(unsigned char *, unsigned char *, int);
-static int kiss_esc_crc(unsigned char *, unsigned char *, unsigned short, int);
-static void kiss_unesc(struct ax_disp *, unsigned char);
+#define AX_MTU 236
+
+/* SLIP/KISS protocol characters. */
+#define END 0300 /* indicates end of frame */
+#define ESC 0333 /* indicates byte stuffing */
+#define ESC_END 0334 /* ESC ESC_END means END 'data' */
+#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
+
+struct mkiss {
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ spinlock_t buflock;/* lock for rbuf and xbuf */
+ unsigned char *rbuff; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char *xbuff; /* transmitter buffer */
+ unsigned char *xhead; /* pointer to next byte to XMIT */
+ int xleft; /* bytes left in XMIT queue */
+
+ struct net_device_stats stats;
+
+ /* Detailed SLIP statistics. */
+ int mtu; /* Our mtu (to spot changes!) */
+ int buffsize; /* Max buffers sizes */
+
+ unsigned long flags; /* Flag values/ mode etc */
+ /* long req'd: used by set_bit --RR */
+#define AXF_INUSE 0 /* Channel in use */
+#define AXF_ESCAPE 1 /* ESC received */
+#define AXF_ERROR 2 /* Parity, etc. error */
+#define AXF_KEEPTEST 3 /* Keepalive test flag */
+#define AXF_OUTWAIT 4 /* is outpacket was flag */
+
+ int mode;
+ int crcmode; /* MW: for FlexNet, SMACK etc. */
+#define CRC_MODE_NONE 0
+#define CRC_MODE_FLEX 1
+#define CRC_MODE_SMACK 2
+
+ atomic_t refcnt;
+ struct semaphore dead_sem;
+};
/*---------------------------------------------------------------------------*/
-static const unsigned short Crc_flex_table[] = {
- 0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38,
- 0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770,
- 0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9,
- 0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1,
- 0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a,
- 0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672,
- 0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb,
- 0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3,
- 0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c,
- 0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574,
- 0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd,
- 0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5,
- 0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e,
- 0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476,
- 0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf,
- 0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7,
- 0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30,
- 0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378,
- 0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1,
- 0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9,
- 0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32,
- 0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a,
- 0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3,
- 0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb,
- 0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34,
- 0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c,
- 0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5,
- 0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd,
- 0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36,
- 0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e,
- 0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7,
- 0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff
+static const unsigned short crc_flex_table[] = {
+ 0x0f87, 0x1e0e, 0x2c95, 0x3d1c, 0x49a3, 0x582a, 0x6ab1, 0x7b38,
+ 0x83cf, 0x9246, 0xa0dd, 0xb154, 0xc5eb, 0xd462, 0xe6f9, 0xf770,
+ 0x1f06, 0x0e8f, 0x3c14, 0x2d9d, 0x5922, 0x48ab, 0x7a30, 0x6bb9,
+ 0x934e, 0x82c7, 0xb05c, 0xa1d5, 0xd56a, 0xc4e3, 0xf678, 0xe7f1,
+ 0x2e85, 0x3f0c, 0x0d97, 0x1c1e, 0x68a1, 0x7928, 0x4bb3, 0x5a3a,
+ 0xa2cd, 0xb344, 0x81df, 0x9056, 0xe4e9, 0xf560, 0xc7fb, 0xd672,
+ 0x3e04, 0x2f8d, 0x1d16, 0x0c9f, 0x7820, 0x69a9, 0x5b32, 0x4abb,
+ 0xb24c, 0xa3c5, 0x915e, 0x80d7, 0xf468, 0xe5e1, 0xd77a, 0xc6f3,
+ 0x4d83, 0x5c0a, 0x6e91, 0x7f18, 0x0ba7, 0x1a2e, 0x28b5, 0x393c,
+ 0xc1cb, 0xd042, 0xe2d9, 0xf350, 0x87ef, 0x9666, 0xa4fd, 0xb574,
+ 0x5d02, 0x4c8b, 0x7e10, 0x6f99, 0x1b26, 0x0aaf, 0x3834, 0x29bd,
+ 0xd14a, 0xc0c3, 0xf258, 0xe3d1, 0x976e, 0x86e7, 0xb47c, 0xa5f5,
+ 0x6c81, 0x7d08, 0x4f93, 0x5e1a, 0x2aa5, 0x3b2c, 0x09b7, 0x183e,
+ 0xe0c9, 0xf140, 0xc3db, 0xd252, 0xa6ed, 0xb764, 0x85ff, 0x9476,
+ 0x7c00, 0x6d89, 0x5f12, 0x4e9b, 0x3a24, 0x2bad, 0x1936, 0x08bf,
+ 0xf048, 0xe1c1, 0xd35a, 0xc2d3, 0xb66c, 0xa7e5, 0x957e, 0x84f7,
+ 0x8b8f, 0x9a06, 0xa89d, 0xb914, 0xcdab, 0xdc22, 0xeeb9, 0xff30,
+ 0x07c7, 0x164e, 0x24d5, 0x355c, 0x41e3, 0x506a, 0x62f1, 0x7378,
+ 0x9b0e, 0x8a87, 0xb81c, 0xa995, 0xdd2a, 0xcca3, 0xfe38, 0xefb1,
+ 0x1746, 0x06cf, 0x3454, 0x25dd, 0x5162, 0x40eb, 0x7270, 0x63f9,
+ 0xaa8d, 0xbb04, 0x899f, 0x9816, 0xeca9, 0xfd20, 0xcfbb, 0xde32,
+ 0x26c5, 0x374c, 0x05d7, 0x145e, 0x60e1, 0x7168, 0x43f3, 0x527a,
+ 0xba0c, 0xab85, 0x991e, 0x8897, 0xfc28, 0xeda1, 0xdf3a, 0xceb3,
+ 0x3644, 0x27cd, 0x1556, 0x04df, 0x7060, 0x61e9, 0x5372, 0x42fb,
+ 0xc98b, 0xd802, 0xea99, 0xfb10, 0x8faf, 0x9e26, 0xacbd, 0xbd34,
+ 0x45c3, 0x544a, 0x66d1, 0x7758, 0x03e7, 0x126e, 0x20f5, 0x317c,
+ 0xd90a, 0xc883, 0xfa18, 0xeb91, 0x9f2e, 0x8ea7, 0xbc3c, 0xadb5,
+ 0x5542, 0x44cb, 0x7650, 0x67d9, 0x1366, 0x02ef, 0x3074, 0x21fd,
+ 0xe889, 0xf900, 0xcb9b, 0xda12, 0xaead, 0xbf24, 0x8dbf, 0x9c36,
+ 0x64c1, 0x7548, 0x47d3, 0x565a, 0x22e5, 0x336c, 0x01f7, 0x107e,
+ 0xf808, 0xe981, 0xdb1a, 0xca93, 0xbe2c, 0xafa5, 0x9d3e, 0x8cb7,
+ 0x7440, 0x65c9, 0x5752, 0x46db, 0x3264, 0x23ed, 0x1176, 0x00ff
};
-/*---------------------------------------------------------------------------*/
-
static unsigned short calc_crc_flex(unsigned char *cp, int size)
{
- unsigned short crc = 0xffff;
-
- while (size--)
- crc = (crc << 8) ^ Crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
+ unsigned short crc = 0xffff;
- return crc;
-}
+ while (size--)
+ crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
-/*---------------------------------------------------------------------------*/
+ return crc;
+}
static int check_crc_flex(unsigned char *cp, int size)
{
- unsigned short crc = 0xffff;
+ unsigned short crc = 0xffff;
- if (size < 3)
- return -1;
+ if (size < 3)
+ return -1;
- while (size--)
- crc = (crc << 8) ^ Crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
+ while (size--)
+ crc = (crc << 8) ^ crc_flex_table[((crc >> 8) ^ *cp++) & 0xff];
- if ((crc & 0xffff) != 0x7070)
- return -1;
+ if ((crc & 0xffff) != 0x7070)
+ return -1;
- return 0;
+ return 0;
}
-/*---------------------------------------------------------------------------*/
+/*
+ * Standard encapsulation
+ */
-/* Find a free channel, and link in this `tty' line. */
-static inline struct ax_disp *ax_alloc(void)
+static int kiss_esc(unsigned char *s, unsigned char *d, int len)
{
- ax25_ctrl_t *axp=NULL;
- int i;
+ unsigned char *ptr = d;
+ unsigned char c;
- for (i = 0; i < ax25_maxdev; i++) {
- axp = ax25_ctrls[i];
+ /*
+ * Send an initial END character to flush out any data that may have
+ * accumulated in the receiver due to line noise.
+ */
- /* Not allocated ? */
- if (axp == NULL)
- break;
+ *ptr++ = END;
- /* Not in use ? */
- if (!test_and_set_bit(AXF_INUSE, &axp->ctrl.flags))
+ while (len-- > 0) {
+ switch (c = *s++) {
+ case END:
+ *ptr++ = ESC;
+ *ptr++ = ESC_END;
break;
+ case ESC:
+ *ptr++ = ESC;
+ *ptr++ = ESC_ESC;
+ break;
+ default:
+ *ptr++ = c;
+ break;
+ }
}
- /* Sorry, too many, all slots in use */
- if (i >= ax25_maxdev)
- return NULL;
+ *ptr++ = END;
+
+ return ptr - d;
+}
+
+/*
+ * MW:
+ * OK its ugly, but tell me a better solution without copying the
+ * packet to a temporary buffer :-)
+ */
+static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc,
+ int len)
+{
+ unsigned char *ptr = d;
+ unsigned char c=0;
+
+ *ptr++ = END;
+ while (len > 0) {
+ if (len > 2)
+ c = *s++;
+ else if (len > 1)
+ c = crc >> 8;
+ else if (len > 0)
+ c = crc & 0xff;
+
+ len--;
- /* If no channels are available, allocate one */
- if (axp == NULL && (ax25_ctrls[i] = kmalloc(sizeof(ax25_ctrl_t), GFP_KERNEL)) != NULL) {
- axp = ax25_ctrls[i];
+ switch (c) {
+ case END:
+ *ptr++ = ESC;
+ *ptr++ = ESC_END;
+ break;
+ case ESC:
+ *ptr++ = ESC;
+ *ptr++ = ESC_ESC;
+ break;
+ default:
+ *ptr++ = c;
+ break;
+ }
}
- memset(axp, 0, sizeof(ax25_ctrl_t));
-
- /* Initialize channel control data */
- set_bit(AXF_INUSE, &axp->ctrl.flags);
- sprintf(axp->dev.name, "ax%d", i++);
- axp->ctrl.tty = NULL;
- axp->dev.base_addr = i;
- axp->dev.priv = (void *)&axp->ctrl;
- axp->dev.next = NULL;
- axp->dev.init = ax25_init;
-
- if (axp != NULL) {
- /*
- * register device so that it can be ifconfig'ed
- * ax25_init() will be called as a side-effect
- * SIDE-EFFECT WARNING: ax25_init() CLEARS axp->ctrl !
- */
- if (register_netdev(&axp->dev) == 0) {
- /* (Re-)Set the INUSE bit. Very Important! */
- set_bit(AXF_INUSE, &axp->ctrl.flags);
- axp->ctrl.dev = &axp->dev;
- axp->dev.priv = (void *) &axp->ctrl;
-
- return &axp->ctrl;
- } else {
- clear_bit(AXF_INUSE,&axp->ctrl.flags);
- printk(KERN_ERR "mkiss: ax_alloc() - register_netdev() failure.\n");
+ *ptr++ = END;
+
+ return ptr - d;
+}
+
+/* Send one completely decapsulated AX.25 packet to the AX.25 layer. */
+static void ax_bump(struct mkiss *ax)
+{
+ struct sk_buff *skb;
+ int count;
+
+ spin_lock_bh(&ax->buflock);
+ if (ax->rbuff[0] > 0x0f) {
+ if (ax->rbuff[0] & 0x20) {
+ ax->crcmode = CRC_MODE_FLEX;
+ if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
+ ax->stats.rx_errors++;
+ return;
+ }
+ ax->rcount -= 2;
+ /* dl9sau bugfix: the trailling two bytes flexnet crc
+ * will not be passed to the kernel. thus we have
+ * to correct the kissparm signature, because it
+ * indicates a crc but there's none
+ */
+ *ax->rbuff &= ~0x20;
}
+ }
+ spin_unlock_bh(&ax->buflock);
+
+ count = ax->rcount;
+
+ if ((skb = dev_alloc_skb(count)) == NULL) {
+ printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
+ ax->dev->name);
+ ax->stats.rx_dropped++;
+ return;
}
- return NULL;
+ spin_lock_bh(&ax->buflock);
+ memcpy(skb_put(skb,count), ax->rbuff, count);
+ spin_unlock_bh(&ax->buflock);
+ skb->protocol = ax25_type_trans(skb, ax->dev);
+ netif_rx(skb);
+ ax->dev->last_rx = jiffies;
+ ax->stats.rx_packets++;
+ ax->stats.rx_bytes += count;
}
-/* Free an AX25 channel. */
-static inline void ax_free(struct ax_disp *ax)
+static void kiss_unesc(struct mkiss *ax, unsigned char s)
{
- /* Free all AX25 frame buffers. */
- if (ax->rbuff)
- kfree(ax->rbuff);
- ax->rbuff = NULL;
- if (ax->xbuff)
- kfree(ax->xbuff);
- ax->xbuff = NULL;
- if (!test_and_clear_bit(AXF_INUSE, &ax->flags))
- printk(KERN_ERR "mkiss: %s: ax_free for already free unit.\n", ax->dev->name);
+ switch (s) {
+ case END:
+ /* drop keeptest bit = VSV */
+ if (test_bit(AXF_KEEPTEST, &ax->flags))
+ clear_bit(AXF_KEEPTEST, &ax->flags);
+
+ if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2))
+ ax_bump(ax);
+
+ clear_bit(AXF_ESCAPE, &ax->flags);
+ ax->rcount = 0;
+ return;
+
+ case ESC:
+ set_bit(AXF_ESCAPE, &ax->flags);
+ return;
+ case ESC_ESC:
+ if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
+ s = ESC;
+ break;
+ case ESC_END:
+ if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
+ s = END;
+ break;
+ }
+
+ spin_lock_bh(&ax->buflock);
+ if (!test_bit(AXF_ERROR, &ax->flags)) {
+ if (ax->rcount < ax->buffsize) {
+ ax->rbuff[ax->rcount++] = s;
+ spin_unlock_bh(&ax->buflock);
+ return;
+ }
+
+ ax->stats.rx_over_errors++;
+ set_bit(AXF_ERROR, &ax->flags);
+ }
+ spin_unlock_bh(&ax->buflock);
+}
+
+static int ax_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr_ax25 *sa = addr;
+
+ spin_lock_irq(&dev->xmit_lock);
+ memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ spin_unlock_irq(&dev->xmit_lock);
+
+ return 0;
}
-static void ax_changedmtu(struct ax_disp *ax)
+/*---------------------------------------------------------------------------*/
+
+static void ax_changedmtu(struct mkiss *ax)
{
struct net_device *dev = ax->dev;
unsigned char *xbuff, *rbuff, *oxbuff, *orbuff;
@@ -236,7 +348,8 @@ static void ax_changedmtu(struct ax_disp *ax)
rbuff = kmalloc(len + 4, GFP_ATOMIC);
if (xbuff == NULL || rbuff == NULL) {
- printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, MTU change cancelled.\n",
+ printk(KERN_ERR "mkiss: %s: unable to grow ax25 buffers, "
+ "MTU change cancelled.\n",
ax->dev->name);
dev->mtu = ax->mtu;
if (xbuff != NULL)
@@ -258,7 +371,7 @@ static void ax_changedmtu(struct ax_disp *ax)
memcpy(ax->xbuff, ax->xhead, ax->xleft);
} else {
ax->xleft = 0;
- ax->tx_dropped++;
+ ax->stats.tx_dropped++;
}
}
@@ -269,7 +382,7 @@ static void ax_changedmtu(struct ax_disp *ax)
memcpy(ax->rbuff, orbuff, ax->rcount);
} else {
ax->rcount = 0;
- ax->rx_over_errors++;
+ ax->stats.rx_over_errors++;
set_bit(AXF_ERROR, &ax->flags);
}
}
@@ -279,72 +392,14 @@ static void ax_changedmtu(struct ax_disp *ax)
spin_unlock_bh(&ax->buflock);
- if (oxbuff != NULL)
- kfree(oxbuff);
- if (orbuff != NULL)
- kfree(orbuff);
-}
-
-
-/* Set the "sending" flag. This must be atomic. */
-static inline void ax_lock(struct ax_disp *ax)
-{
- netif_stop_queue(ax->dev);
-}
-
-
-/* Clear the "sending" flag. This must be atomic. */
-static inline void ax_unlock(struct ax_disp *ax)
-{
- netif_start_queue(ax->dev);
-}
-
-/* Send one completely decapsulated AX.25 packet to the AX.25 layer. */
-static void ax_bump(struct ax_disp *ax)
-{
- struct sk_buff *skb;
- int count;
-
- spin_lock_bh(&ax->buflock);
- if (ax->rbuff[0] > 0x0f) {
- if (ax->rbuff[0] & 0x20) {
- ax->crcmode = CRC_MODE_FLEX;
- if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
- ax->rx_errors++;
- return;
- }
- ax->rcount -= 2;
- /* dl9sau bugfix: the trailling two bytes flexnet crc
- * will not be passed to the kernel. thus we have
- * to correct the kissparm signature, because it
- * indicates a crc but there's none
- */
- *ax->rbuff &= ~0x20;
- }
- }
- spin_unlock_bh(&ax->buflock);
-
- count = ax->rcount;
-
- if ((skb = dev_alloc_skb(count)) == NULL) {
- printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", ax->dev->name);
- ax->rx_dropped++;
- return;
- }
-
- spin_lock_bh(&ax->buflock);
- memcpy(skb_put(skb,count), ax->rbuff, count);
- spin_unlock_bh(&ax->buflock);
- skb->protocol = ax25_type_trans(skb, ax->dev);
- netif_rx(skb);
- ax->dev->last_rx = jiffies;
- ax->rx_packets++;
- ax->rx_bytes+=count;
+ kfree(oxbuff);
+ kfree(orbuff);
}
/* Encapsulate one AX.25 packet and stuff into a TTY queue. */
-static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
+static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
{
+ struct mkiss *ax = netdev_priv(dev);
unsigned char *p;
int actual, count;
@@ -354,8 +409,8 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
len = ax->mtu;
printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
- ax->tx_dropped++;
- ax_unlock(ax);
+ ax->stats.tx_dropped++;
+ netif_start_queue(dev);
return;
}
@@ -376,10 +431,11 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
break;
}
- ax->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
- ax->tx_packets++;
- ax->tx_bytes+=actual;
+ ax->stats.tx_packets++;
+ ax->stats.tx_bytes += actual;
+
ax->dev->trans_start = jiffies;
ax->xleft = count - actual;
ax->xhead = ax->xbuff + actual;
@@ -387,37 +443,10 @@ static void ax_encaps(struct ax_disp *ax, unsigned char *icp, int len)
spin_unlock_bh(&ax->buflock);
}
-/*
- * Called by the driver when there's room for more data. If we have
- * more packets to send, we send them here.
- */
-static void ax25_write_wakeup(struct tty_struct *tty)
-{
- int actual;
- struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
-
- /* First make sure we're connected. */
- if (ax == NULL || ax->magic != AX25_MAGIC || !netif_running(ax->dev))
- return;
- if (ax->xleft <= 0) {
- /* Now serial buffer is almost free & we can start
- * transmission of another packet
- */
- tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
-
- netif_wake_queue(ax->dev);
- return;
- }
-
- actual = tty->driver->write(tty, ax->xhead, ax->xleft);
- ax->xleft -= actual;
- ax->xhead += actual;
-}
-
/* Encapsulate an AX.25 packet and kick it into a TTY queue. */
static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ax_disp *ax = netdev_priv(dev);
+ struct mkiss *ax = netdev_priv(dev);
if (!netif_running(dev)) {
printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name);
@@ -429,7 +458,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
* May be we must check transmitter timeout here ?
* 14 Oct 1994 Dmitry Gorodchanin.
*/
- if (jiffies - dev->trans_start < 20 * HZ) {
+ if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
/* 20 sec timeout not reached */
return 1;
}
@@ -439,20 +468,30 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
"bad line quality" : "driver error");
ax->xleft = 0;
- ax->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
- ax_unlock(ax);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
+ netif_start_queue(dev);
}
/* We were not busy, so we are now... :-) */
if (skb != NULL) {
- ax_lock(ax);
- ax_encaps(ax, skb->data, skb->len);
+ netif_stop_queue(dev);
+ ax_encaps(dev, skb->data, skb->len);
kfree_skb(skb);
}
return 0;
}
+static int ax_open_dev(struct net_device *dev)
+{
+ struct mkiss *ax = netdev_priv(dev);
+
+ if (ax->tty == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
/* Return the frame type ID */
@@ -481,7 +520,7 @@ static int ax_rebuild_header(struct sk_buff *skb)
/* Open the low-level part of the AX25 channel. Easy! */
static int ax_open(struct net_device *dev)
{
- struct ax_disp *ax = netdev_priv(dev);
+ struct mkiss *ax = netdev_priv(dev);
unsigned long len;
if (ax->tty == NULL)
@@ -518,7 +557,6 @@ static int ax_open(struct net_device *dev)
spin_lock_init(&ax->buflock);
- netif_start_queue(dev);
return 0;
noxbuff:
@@ -532,68 +570,100 @@ norbuff:
/* Close the low-level part of the AX25 channel. Easy! */
static int ax_close(struct net_device *dev)
{
- struct ax_disp *ax = netdev_priv(dev);
+ struct mkiss *ax = netdev_priv(dev);
- if (ax->tty == NULL)
- return -EBUSY;
-
- ax->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ if (ax->tty)
+ clear_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
netif_stop_queue(dev);
return 0;
}
-static int ax25_receive_room(struct tty_struct *tty)
+static struct net_device_stats *ax_get_stats(struct net_device *dev)
{
- return 65536; /* We can handle an infinite amount of data. :-) */
+ struct mkiss *ax = netdev_priv(dev);
+
+ return &ax->stats;
+}
+
+static void ax_setup(struct net_device *dev)
+{
+ static char ax25_bcast[AX25_ADDR_LEN] =
+ {'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
+ static char ax25_test[AX25_ADDR_LEN] =
+ {'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
+
+ /* Finish setting up the DEVICE info. */
+ dev->mtu = AX_MTU;
+ dev->hard_start_xmit = ax_xmit;
+ dev->open = ax_open_dev;
+ dev->stop = ax_close;
+ dev->get_stats = ax_get_stats;
+ dev->set_mac_address = ax_set_mac_address;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_AX25;
+ dev->tx_queue_len = 10;
+ dev->hard_header = ax_header;
+ dev->rebuild_header = ax_rebuild_header;
+
+ memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
+ memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
+
+ dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
/*
- * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
- * a block of data has been received, which can now be decapsulated
- * and sent on to the AX.25 layer for further processing.
+ * We have a potential race on dereferencing tty->disc_data, because the tty
+ * layer provides no locking at all - thus one cpu could be running
+ * sixpack_receive_buf while another calls sixpack_close, which zeroes
+ * tty->disc_data and frees the memory that sixpack_receive_buf is using. The
+ * best way to fix this is to use a rwlock in the tty struct, but for now we
+ * use a single global rwlock for all ttys in ppp line discipline.
*/
-static void ax25_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
+
+static struct mkiss *mkiss_get(struct tty_struct *tty)
{
- struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
+ struct mkiss *ax;
- if (ax == NULL || ax->magic != AX25_MAGIC || !netif_running(ax->dev))
- return;
+ read_lock(&disc_data_lock);
+ ax = tty->disc_data;
+ if (ax)
+ atomic_inc(&ax->refcnt);
+ read_unlock(&disc_data_lock);
- /*
- * Argh! mtu change time! - costs us the packet part received
- * at the change
- */
- if (ax->mtu != ax->dev->mtu + 73)
- ax_changedmtu(ax);
-
- /* Read the characters out of the buffer */
- while (count--) {
- if (fp != NULL && *fp++) {
- if (!test_and_set_bit(AXF_ERROR, &ax->flags))
- ax->rx_errors++;
- cp++;
- continue;
- }
+ return ax;
+}
- kiss_unesc(ax, *cp++);
- }
+static void mkiss_put(struct mkiss *ax)
+{
+ if (atomic_dec_and_test(&ax->refcnt))
+ up(&ax->dead_sem);
}
-static int ax25_open(struct tty_struct *tty)
+static int mkiss_open(struct tty_struct *tty)
{
- struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
+ struct net_device *dev;
+ struct mkiss *ax;
int err;
- /* First make sure we're not already connected. */
- if (ax && ax->magic == AX25_MAGIC)
- return -EEXIST;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
- /* OK. Find a free AX25 channel to use. */
- if ((ax = ax_alloc()) == NULL)
- return -ENFILE;
+ dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
+ if (!dev) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ax = netdev_priv(dev);
+ ax->dev = dev;
+
+ spin_lock_init(&ax->buflock);
+ atomic_set(&ax->refcnt, 1);
+ init_MUTEX_LOCKED(&ax->dead_sem);
ax->tty = tty;
tty->disc_data = ax;
@@ -602,283 +672,212 @@ static int ax25_open(struct tty_struct *tty)
tty->driver->flush_buffer(tty);
/* Restore default settings */
- ax->dev->type = ARPHRD_AX25;
+ dev->type = ARPHRD_AX25;
/* Perform the low-level AX25 initialization. */
- if ((err = ax_open(ax->dev)))
- return err;
+ if ((err = ax_open(ax->dev))) {
+ goto out_free_netdev;
+ }
- /* Done. We have linked the TTY line to a channel. */
- return ax->dev->base_addr;
-}
+ if (register_netdev(dev))
+ goto out_free_buffers;
-static void ax25_close(struct tty_struct *tty)
-{
- struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
+ netif_start_queue(dev);
- /* First make sure we're connected. */
- if (ax == NULL || ax->magic != AX25_MAGIC)
- return;
+ /* Done. We have linked the TTY line to a channel. */
+ return 0;
- unregister_netdev(ax->dev);
+out_free_buffers:
+ kfree(ax->rbuff);
+ kfree(ax->xbuff);
- tty->disc_data = NULL;
- ax->tty = NULL;
+out_free_netdev:
+ free_netdev(dev);
- ax_free(ax);
+out:
+ return err;
}
-
-static struct net_device_stats *ax_get_stats(struct net_device *dev)
+static void mkiss_close(struct tty_struct *tty)
{
- static struct net_device_stats stats;
- struct ax_disp *ax = netdev_priv(dev);
-
- memset(&stats, 0, sizeof(struct net_device_stats));
-
- stats.rx_packets = ax->rx_packets;
- stats.tx_packets = ax->tx_packets;
- stats.rx_bytes = ax->rx_bytes;
- stats.tx_bytes = ax->tx_bytes;
- stats.rx_dropped = ax->rx_dropped;
- stats.tx_dropped = ax->tx_dropped;
- stats.tx_errors = ax->tx_errors;
- stats.rx_errors = ax->rx_errors;
- stats.rx_over_errors = ax->rx_over_errors;
-
- return &stats;
-}
+ struct mkiss *ax;
+ write_lock(&disc_data_lock);
+ ax = tty->disc_data;
+ tty->disc_data = NULL;
+ write_unlock(&disc_data_lock);
-/************************************************************************
- * STANDARD ENCAPSULATION *
- ************************************************************************/
-
-static int kiss_esc(unsigned char *s, unsigned char *d, int len)
-{
- unsigned char *ptr = d;
- unsigned char c;
+ if (ax == 0)
+ return;
/*
- * Send an initial END character to flush out any
- * data that may have accumulated in the receiver
- * due to line noise.
+ * We have now ensured that nobody can start using ap from now on, but
+ * we have to wait for all existing users to finish.
*/
+ if (!atomic_dec_and_test(&ax->refcnt))
+ down(&ax->dead_sem);
- *ptr++ = END;
-
- while (len-- > 0) {
- switch (c = *s++) {
- case END:
- *ptr++ = ESC;
- *ptr++ = ESC_END;
- break;
- case ESC:
- *ptr++ = ESC;
- *ptr++ = ESC_ESC;
- break;
- default:
- *ptr++ = c;
- break;
- }
- }
+ unregister_netdev(ax->dev);
- *ptr++ = END;
+ /* Free all AX25 frame buffers. */
+ kfree(ax->rbuff);
+ kfree(ax->xbuff);
- return ptr - d;
+ ax->tty = NULL;
}
-/*
- * MW:
- * OK its ugly, but tell me a better solution without copying the
- * packet to a temporary buffer :-)
- */
-static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc, int len)
+/* Perform I/O control on an active ax25 channel. */
+static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
{
- unsigned char *ptr = d;
- unsigned char c=0;
-
- *ptr++ = END;
- while (len > 0) {
- if (len > 2)
- c = *s++;
- else if (len > 1)
- c = crc >> 8;
- else if (len > 0)
- c = crc & 0xff;
+ struct mkiss *ax = mkiss_get(tty);
+ struct net_device *dev = ax->dev;
+ unsigned int tmp, err;
- len--;
+ /* First make sure we're connected. */
+ if (ax == NULL)
+ return -ENXIO;
- switch (c) {
- case END:
- *ptr++ = ESC;
- *ptr++ = ESC_END;
- break;
- case ESC:
- *ptr++ = ESC;
- *ptr++ = ESC_ESC;
- break;
- default:
- *ptr++ = c;
- break;
+ switch (cmd) {
+ case SIOCGIFNAME:
+ err = copy_to_user((void __user *) arg, ax->dev->name,
+ strlen(ax->dev->name) + 1) ? -EFAULT : 0;
+ break;
+
+ case SIOCGIFENCAP:
+ err = put_user(4, (int __user *) arg);
+ break;
+
+ case SIOCSIFENCAP:
+ if (get_user(tmp, (int __user *) arg)) {
+ err = -EFAULT;
+ break;
}
- }
- *ptr++ = END;
- return ptr - d;
-}
-static void kiss_unesc(struct ax_disp *ax, unsigned char s)
-{
- switch (s) {
- case END:
- /* drop keeptest bit = VSV */
- if (test_bit(AXF_KEEPTEST, &ax->flags))
- clear_bit(AXF_KEEPTEST, &ax->flags);
+ ax->mode = tmp;
+ dev->addr_len = AX25_ADDR_LEN;
+ dev->hard_header_len = AX25_KISS_HEADER_LEN +
+ AX25_MAX_HEADER_LEN + 3;
+ dev->type = ARPHRD_AX25;
- if (!test_and_clear_bit(AXF_ERROR, &ax->flags) && (ax->rcount > 2))
- ax_bump(ax);
+ err = 0;
+ break;
- clear_bit(AXF_ESCAPE, &ax->flags);
- ax->rcount = 0;
- return;
+ case SIOCSIFHWADDR: {
+ char addr[AX25_ADDR_LEN];
+printk(KERN_INFO "In SIOCSIFHWADDR");
- case ESC:
- set_bit(AXF_ESCAPE, &ax->flags);
- return;
- case ESC_ESC:
- if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
- s = ESC;
+ if (copy_from_user(&addr,
+ (void __user *) arg, AX25_ADDR_LEN)) {
+ err = -EFAULT;
break;
- case ESC_END:
- if (test_and_clear_bit(AXF_ESCAPE, &ax->flags))
- s = END;
- break;
- }
-
- spin_lock_bh(&ax->buflock);
- if (!test_bit(AXF_ERROR, &ax->flags)) {
- if (ax->rcount < ax->buffsize) {
- ax->rbuff[ax->rcount++] = s;
- spin_unlock_bh(&ax->buflock);
- return;
}
- ax->rx_over_errors++;
- set_bit(AXF_ERROR, &ax->flags);
+ spin_lock_irq(&dev->xmit_lock);
+ memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
+ spin_unlock_irq(&dev->xmit_lock);
+
+ err = 0;
+ break;
+ }
+ default:
+ err = -ENOIOCTLCMD;
}
- spin_unlock_bh(&ax->buflock);
-}
+ mkiss_put(ax);
-static int ax_set_mac_address(struct net_device *dev, void __user *addr)
-{
- if (copy_from_user(dev->dev_addr, addr, AX25_ADDR_LEN))
- return -EFAULT;
- return 0;
+ return err;
}
-static int ax_set_dev_mac_address(struct net_device *dev, void *addr)
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of data has been received, which can now be decapsulated
+ * and sent on to the AX.25 layer for further processing.
+ */
+static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
- struct sockaddr *sa = addr;
-
- memcpy(dev->dev_addr, sa->sa_data, AX25_ADDR_LEN);
+ struct mkiss *ax = mkiss_get(tty);
- return 0;
-}
-
-
-/* Perform I/O control on an active ax25 channel. */
-static int ax25_disp_ioctl(struct tty_struct *tty, void *file, int cmd, void __user *arg)
-{
- struct ax_disp *ax = (struct ax_disp *) tty->disc_data;
- unsigned int tmp;
+ if (!ax)
+ return;
- /* First make sure we're connected. */
- if (ax == NULL || ax->magic != AX25_MAGIC)
- return -EINVAL;
+ /*
+ * Argh! mtu change time! - costs us the packet part received
+ * at the change
+ */
+ if (ax->mtu != ax->dev->mtu + 73)
+ ax_changedmtu(ax);
- switch (cmd) {
- case SIOCGIFNAME:
- if (copy_to_user(arg, ax->dev->name, strlen(ax->dev->name) + 1))
- return -EFAULT;
- return 0;
-
- case SIOCGIFENCAP:
- return put_user(4, (int __user *)arg);
-
- case SIOCSIFENCAP:
- if (get_user(tmp, (int __user *)arg))
- return -EFAULT;
- ax->mode = tmp;
- ax->dev->addr_len = AX25_ADDR_LEN; /* sizeof an AX.25 addr */
- ax->dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3;
- ax->dev->type = ARPHRD_AX25;
- return 0;
-
- case SIOCSIFHWADDR:
- return ax_set_mac_address(ax->dev, arg);
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp != NULL && *fp++) {
+ if (!test_and_set_bit(AXF_ERROR, &ax->flags))
+ ax->stats.rx_errors++;
+ cp++;
+ continue;
+ }
- default:
- return -ENOIOCTLCMD;
+ kiss_unesc(ax, *cp++);
}
+
+ mkiss_put(ax);
+ if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
+ && tty->driver->unthrottle)
+ tty->driver->unthrottle(tty);
}
-static int ax_open_dev(struct net_device *dev)
+static int mkiss_receive_room(struct tty_struct *tty)
{
- struct ax_disp *ax = netdev_priv(dev);
-
- if (ax->tty == NULL)
- return -ENODEV;
-
- return 0;
+ return 65536; /* We can handle an infinite amount of data. :-) */
}
-
-/* Initialize the driver. Called by network startup. */
-static int ax25_init(struct net_device *dev)
+/*
+ * Called by the driver when there's room for more data. If we have
+ * more packets to send, we send them here.
+ */
+static void mkiss_write_wakeup(struct tty_struct *tty)
{
- struct ax_disp *ax = netdev_priv(dev);
-
- static char ax25_bcast[AX25_ADDR_LEN] =
- {'Q'<<1,'S'<<1,'T'<<1,' '<<1,' '<<1,' '<<1,'0'<<1};
- static char ax25_test[AX25_ADDR_LEN] =
- {'L'<<1,'I'<<1,'N'<<1,'U'<<1,'X'<<1,' '<<1,'1'<<1};
-
- if (ax == NULL) /* Allocation failed ?? */
- return -ENODEV;
+ struct mkiss *ax = mkiss_get(tty);
+ int actual;
- /* Set up the "AX25 Control Block". (And clear statistics) */
- memset(ax, 0, sizeof (struct ax_disp));
- ax->magic = AX25_MAGIC;
- ax->dev = dev;
+ if (!ax)
+ return;
- /* Finish setting up the DEVICE info. */
- dev->mtu = AX_MTU;
- dev->hard_start_xmit = ax_xmit;
- dev->open = ax_open_dev;
- dev->stop = ax_close;
- dev->get_stats = ax_get_stats;
- dev->set_mac_address = ax_set_dev_mac_address;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->type = ARPHRD_AX25;
- dev->tx_queue_len = 10;
- dev->hard_header = ax_header;
- dev->rebuild_header = ax_rebuild_header;
+ if (ax->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet
+ */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
+ netif_wake_queue(ax->dev);
+ goto out;
+ }
- /* New-style flags. */
- dev->flags = IFF_BROADCAST | IFF_MULTICAST;
+ actual = tty->driver->write(tty, ax->xhead, ax->xleft);
+ ax->xleft -= actual;
+ ax->xhead += actual;
- return 0;
+out:
+ mkiss_put(ax);
}
+static struct tty_ldisc ax_ldisc = {
+ .magic = TTY_LDISC_MAGIC,
+ .name = "mkiss",
+ .open = mkiss_open,
+ .close = mkiss_close,
+ .ioctl = mkiss_ioctl,
+ .receive_buf = mkiss_receive_buf,
+ .receive_room = mkiss_receive_room,
+ .write_wakeup = mkiss_write_wakeup
+};
-/* ******************************************************************** */
-/* * Init MKISS driver * */
-/* ******************************************************************** */
+static char banner[] __initdata = KERN_INFO \
+ "mkiss: AX.25 Multikiss, Hans Albas PE1AYX\n";
+static char msg_regfail[] __initdata = KERN_ERR \
+ "mkiss: can't register line discipline (err = %d)\n";
static int __init mkiss_init_driver(void)
{
@@ -886,64 +885,27 @@ static int __init mkiss_init_driver(void)
printk(banner);
- if (ax25_maxdev < 4)
- ax25_maxdev = 4; /* Sanity */
+ if ((status = tty_register_ldisc(N_AX25, &ax_ldisc)) != 0)
+ printk(msg_regfail);
- if ((ax25_ctrls = kmalloc(sizeof(void *) * ax25_maxdev, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "mkiss: Can't allocate ax25_ctrls[] array!\n");
- return -ENOMEM;
- }
-
- /* Clear the pointer array, we allocate devices when we need them */
- memset(ax25_ctrls, 0, sizeof(void*) * ax25_maxdev); /* Pointers */
-
- /* Fill in our line protocol discipline, and register it */
- ax_ldisc.magic = TTY_LDISC_MAGIC;
- ax_ldisc.name = "mkiss";
- ax_ldisc.open = ax25_open;
- ax_ldisc.close = ax25_close;
- ax_ldisc.ioctl = (int (*)(struct tty_struct *, struct file *,
- unsigned int, unsigned long))ax25_disp_ioctl;
- ax_ldisc.receive_buf = ax25_receive_buf;
- ax_ldisc.receive_room = ax25_receive_room;
- ax_ldisc.write_wakeup = ax25_write_wakeup;
-
- if ((status = tty_register_ldisc(N_AX25, &ax_ldisc)) != 0) {
- printk(KERN_ERR "mkiss: can't register line discipline (err = %d)\n", status);
- kfree(ax25_ctrls);
- }
return status;
}
+static const char msg_unregfail[] __exitdata = KERN_ERR \
+ "mkiss: can't unregister line discipline (err = %d)\n";
+
static void __exit mkiss_exit_driver(void)
{
- int i;
-
- for (i = 0; i < ax25_maxdev; i++) {
- if (ax25_ctrls[i]) {
- /*
- * VSV = if dev->start==0, then device
- * unregistered while close proc.
- */
- if (netif_running(&ax25_ctrls[i]->dev))
- unregister_netdev(&ax25_ctrls[i]->dev);
- kfree(ax25_ctrls[i]);
- }
- }
+ int ret;
- kfree(ax25_ctrls);
- ax25_ctrls = NULL;
-
- if ((i = tty_unregister_ldisc(N_AX25)))
- printk(KERN_ERR "mkiss: can't unregister line discipline (err = %d)\n", i);
+ if ((ret = tty_unregister_ldisc(N_AX25)))
+ printk(msg_unregfail, ret);
}
-MODULE_AUTHOR("Hans Albas PE1AYX <hans@esrac.ele.tue.nl>");
+MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>");
MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs");
-MODULE_PARM(ax25_maxdev, "i");
-MODULE_PARM_DESC(ax25_maxdev, "number of MKISS devices");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_AX25);
+
module_init(mkiss_init_driver);
module_exit(mkiss_exit_driver);
-
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index f8d3385c784..c83271b3862 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -119,7 +119,7 @@ struct ixgb_adapter;
* so a DMA handle can be stored along with the buffer */
struct ixgb_buffer {
struct sk_buff *skb;
- uint64_t dma;
+ dma_addr_t dma;
unsigned long time_stamp;
uint16_t length;
uint16_t next_to_watch;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 3aae110c556..661a46b95a6 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -565,24 +565,6 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
}
}
-/******************************************************************************
- * return the compatibility flags from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * compatibility flags if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_compatibility(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->compatibility));
-
- return(0);
-}
/******************************************************************************
* return the Printed Board Assembly number from EEPROM
@@ -602,81 +584,6 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw)
return(0);
}
-/******************************************************************************
- * return the Initialization Control Word 1 from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * Initialization Control Word 1 if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->init_ctrl_reg_1));
-
- return(0);
-}
-
-/******************************************************************************
- * return the Initialization Control Word 2 from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * Initialization Control Word 2 if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->init_ctrl_reg_2));
-
- return(0);
-}
-
-/******************************************************************************
- * return the Subsystem Id from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * Subsystem Id if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->subsystem_id));
-
- return(0);
-}
-
-/******************************************************************************
- * return the Sub Vendor Id from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * Sub Vendor Id if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->subvendor_id));
-
- return(0);
-}
/******************************************************************************
* return the Device Id from EEPROM
@@ -694,81 +601,6 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (le16_to_cpu(ee_map->device_id));
- return(0);
-}
-
-/******************************************************************************
- * return the Vendor Id from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * Device Id if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->vendor_id));
-
- return(0);
-}
-
-/******************************************************************************
- * return the Software Defined Pins Register from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * SDP Register if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->swdpins_reg));
-
- return(0);
+ return (0);
}
-/******************************************************************************
- * return the D3 Power Management Bits from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * D3 Power Management Bits if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint8_t
-ixgb_get_ee_d3_power(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->d3_power));
-
- return(0);
-}
-
-/******************************************************************************
- * return the D0 Power Management Bits from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * D0 Power Management Bits if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint8_t
-ixgb_get_ee_d0_power(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
- return (le16_to_cpu(ee_map->d0_power));
-
- return(0);
-}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 3fa113854ee..9d026ed77dd 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -98,10 +98,10 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
static int
ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL;
@@ -120,7 +120,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
static int
ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
if(ecmd->autoneg == AUTONEG_ENABLE ||
ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
@@ -130,6 +130,12 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ixgb_down(adapter, TRUE);
ixgb_reset(adapter);
ixgb_up(adapter);
+ /* be optimistic about our link, since we were up before */
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+
} else
ixgb_reset(adapter);
@@ -140,7 +146,7 @@ static void
ixgb_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
pause->autoneg = AUTONEG_DISABLE;
@@ -159,7 +165,7 @@ static int
ixgb_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
if(pause->autoneg == AUTONEG_ENABLE)
@@ -177,6 +183,11 @@ ixgb_set_pauseparam(struct net_device *netdev,
if(netif_running(adapter->netdev)) {
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
+ /* be optimistic about our link, since we were up before */
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
} else
ixgb_reset(adapter);
@@ -186,19 +197,26 @@ ixgb_set_pauseparam(struct net_device *netdev,
static uint32_t
ixgb_get_rx_csum(struct net_device *netdev)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
return adapter->rx_csum;
}
static int
ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
+
adapter->rx_csum = data;
if(netif_running(netdev)) {
ixgb_down(adapter,TRUE);
ixgb_up(adapter);
+ /* be optimistic about our link, since we were up before */
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
} else
ixgb_reset(adapter);
return 0;
@@ -246,14 +264,15 @@ static void
ixgb_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
uint32_t *reg = p;
uint32_t *reg_start = reg;
uint8_t i;
/* the 1 (one) below indicates an attempt at versioning, if the
- * interface in ethtool or the driver this 1 should be incremented */
+ * interface in ethtool or the driver changes, this 1 should be
+ * incremented */
regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
/* General Registers */
@@ -283,7 +302,8 @@ ixgb_get_regs(struct net_device *netdev,
*reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
*reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
- for (i = 0; i < IXGB_RAR_ENTRIES; i++) {
+ /* there are 16 RAR entries in hardware, we only use 3 */
+ for(i = 0; i < 16; i++) {
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
}
@@ -391,7 +411,7 @@ static int
ixgb_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, uint8_t *bytes)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
uint16_t *eeprom_buff;
int i, max_len, first_word, last_word;
@@ -439,7 +459,7 @@ static int
ixgb_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, uint8_t *bytes)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
uint16_t *eeprom_buff;
void *ptr;
@@ -497,7 +517,7 @@ static void
ixgb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
strncpy(drvinfo->driver, ixgb_driver_name, 32);
strncpy(drvinfo->version, ixgb_driver_version, 32);
@@ -512,7 +532,7 @@ static void
ixgb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
@@ -530,7 +550,7 @@ static int
ixgb_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
@@ -573,6 +593,11 @@ ixgb_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_new;
if((err = ixgb_up(adapter)))
return err;
+ /* be optimistic about our link, since we were up before */
+ adapter->link_speed = 10000;
+ adapter->link_duplex = FULL_DUPLEX;
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
}
return 0;
@@ -607,7 +632,7 @@ ixgb_led_blink_callback(unsigned long data)
static int
ixgb_phys_id(struct net_device *netdev, uint32_t data)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
@@ -643,7 +668,7 @@ static void
ixgb_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
int i;
ixgb_update_stats(adapter);
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 97898efe7cc..8bcf31ed10c 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -822,17 +822,8 @@ extern void ixgb_clear_vfta(struct ixgb_hw *hw);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
-uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw);
uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw *hw);
uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw);
-uint8_t ixgb_get_ee_d3_power(struct ixgb_hw *hw);
-uint8_t ixgb_get_ee_d0_power(struct ixgb_hw *hw);
boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 097b90ccf57..5c555373adb 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -29,6 +29,11 @@
#include "ixgb.h"
/* Change Log
+ * 1.0.96 04/19/05
+ * - Make needlessly global code static -- bunk@stusta.de
+ * - ethtool cleanup -- shemminger@osdl.org
+ * - Support for MODULE_VERSION -- linville@tuxdriver.com
+ * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
* 1.0.88 01/05/05
* - include fix to the condition that determines when to quit NAPI - Robert Olsson
* - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
@@ -47,10 +52,9 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-
-#define DRV_VERSION "1.0.95-k2"DRIVERNAPI
+#define DRV_VERSION "1.0.100-k2"DRIVERNAPI
char ixgb_driver_version[] = DRV_VERSION;
-char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
/* ixgb_pci_tbl - PCI Device ID Table
*
@@ -145,10 +149,12 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
/* some defines for controlling descriptor fetches in h/w */
-#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
-#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
- pushed this many descriptors from head */
#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
+#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
+ * this */
+#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
+ * is pushed this many descriptors
+ * from head */
/**
* ixgb_init_module - Driver Registration Routine
@@ -376,7 +382,7 @@ ixgb_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
- adapter = netdev->priv;
+ adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.back = adapter;
@@ -512,7 +518,7 @@ static void __devexit
ixgb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
unregister_netdev(netdev);
@@ -583,7 +589,7 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
static int
ixgb_open(struct net_device *netdev)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
int err;
/* allocate transmit descriptors */
@@ -626,7 +632,7 @@ err_setup_tx:
static int
ixgb_close(struct net_device *netdev)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
ixgb_down(adapter, TRUE);
@@ -1017,7 +1023,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
static int
ixgb_set_mac(struct net_device *netdev, void *p)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
if(!is_valid_ether_addr(addr->sa_data))
@@ -1043,7 +1049,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
static void
ixgb_set_multi(struct net_device *netdev)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
struct dev_mc_list *mc_ptr;
uint32_t rctl;
@@ -1371,7 +1377,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
static int
ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
unsigned int first;
unsigned int tx_flags = 0;
unsigned long flags;
@@ -1425,7 +1431,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
static void
ixgb_tx_timeout(struct net_device *netdev)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->tx_timeout_task);
@@ -1434,7 +1440,7 @@ ixgb_tx_timeout(struct net_device *netdev)
static void
ixgb_tx_timeout_task(struct net_device *netdev)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
ixgb_down(adapter, TRUE);
ixgb_up(adapter);
@@ -1451,7 +1457,7 @@ ixgb_tx_timeout_task(struct net_device *netdev)
static struct net_device_stats *
ixgb_get_stats(struct net_device *netdev)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
return &adapter->net_stats;
}
@@ -1467,7 +1473,7 @@ ixgb_get_stats(struct net_device *netdev)
static int
ixgb_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
@@ -1522,7 +1528,8 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
/* fix up multicast stats by removing broadcasts */
- multi -= bcast;
+ if(multi >= bcast)
+ multi -= bcast;
adapter->stats.mprcl += (multi & 0xFFFFFFFF);
adapter->stats.mprch += (multi >> 32);
@@ -1641,7 +1648,7 @@ static irqreturn_t
ixgb_intr(int irq, void *data, struct pt_regs *regs)
{
struct net_device *netdev = data;
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
uint32_t icr = IXGB_READ_REG(hw, ICR);
#ifndef CONFIG_IXGB_NAPI
@@ -1688,7 +1695,7 @@ ixgb_intr(int irq, void *data, struct pt_regs *regs)
static int
ixgb_clean(struct net_device *netdev, int *budget)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
int work_to_do = min(*budget, netdev->quota);
int tx_cleaned;
int work_done = 0;
@@ -2017,7 +2024,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
static void
ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl, rctl;
ixgb_irq_disable(adapter);
@@ -2055,7 +2062,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
static void
ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
uint32_t vfta, index;
/* add VID to filter table */
@@ -2069,7 +2076,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
static void
ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
{
- struct ixgb_adapter *adapter = netdev->priv;
+ struct ixgb_adapter *adapter = netdev_priv(netdev);
uint32_t vfta, index;
ixgb_irq_disable(adapter);
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 7fec613e167..8423cb6875f 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -1,5 +1,10 @@
/*
- * sonic.c
+ * jazzsonic.c
+ *
+ * (C) 2005 Finn Thain
+ *
+ * Converted to DMA API, and (from the mac68k project) introduced
+ * dhd's support for 16-bit cards.
*
* (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*
@@ -28,8 +33,8 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <asm/bootinfo.h>
#include <asm/system.h>
@@ -44,22 +49,20 @@ static struct platform_device *jazz_sonic_device;
#define SONIC_MEM_SIZE 0x100
-#define SREGS_PAD(n) u16 n;
-
#include "sonic.h"
/*
* Macros to access SONIC registers
*/
-#define SONIC_READ(reg) (*((volatile unsigned int *)base_addr+reg))
+#define SONIC_READ(reg) (*((volatile unsigned int *)dev->base_addr+reg))
#define SONIC_WRITE(reg,val) \
do { \
- *((volatile unsigned int *)base_addr+(reg)) = (val); \
+ *((volatile unsigned int *)dev->base_addr+(reg)) = (val); \
} while (0)
-/* use 0 for production, 1 for verification, >2 for debug */
+/* use 0 for production, 1 for verification, >1 for debug */
#ifdef SONIC_DEBUG
static unsigned int sonic_debug = SONIC_DEBUG;
#else
@@ -85,18 +88,18 @@ static unsigned short known_revisions[] =
0xffff /* end of list */
};
-static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
- unsigned int irq)
+static int __init sonic_probe1(struct net_device *dev)
{
static unsigned version_printed;
unsigned int silicon_revision;
unsigned int val;
- struct sonic_local *lp;
+ struct sonic_local *lp = netdev_priv(dev);
int err = -ENODEV;
int i;
- if (!request_mem_region(base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
+ if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
return -EBUSY;
+
/*
* get the Silicon Revision ID. If this is one of the known
* one assume that we found a SONIC ethernet controller at
@@ -120,11 +123,7 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
if (sonic_debug && version_printed++ == 0)
printk(version);
- printk("%s: Sonic ethernet found at 0x%08lx, ", dev->name, base_addr);
-
- /* Fill in the 'dev' fields. */
- dev->base_addr = base_addr;
- dev->irq = irq;
+ printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", lp->device->bus_id, dev->base_addr);
/*
* Put the sonic into software reset, then
@@ -138,84 +137,44 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
dev->dev_addr[i*2+1] = val >> 8;
}
- printk("HW Address ");
- for (i = 0; i < 6; i++) {
- printk("%2.2x", dev->dev_addr[i]);
- if (i<5)
- printk(":");
- }
-
- printk(" IRQ %d\n", irq);
-
err = -ENOMEM;
/* Initialize the device structure. */
- if (dev->priv == NULL) {
- /*
- * the memory be located in the same 64kb segment
- */
- lp = NULL;
- i = 0;
- do {
- lp = kmalloc(sizeof(*lp), GFP_KERNEL);
- if ((unsigned long) lp >> 16
- != ((unsigned long)lp + sizeof(*lp) ) >> 16) {
- /* FIXME, free the memory later */
- kfree(lp);
- lp = NULL;
- }
- } while (lp == NULL && i++ < 20);
-
- if (lp == NULL) {
- printk("%s: couldn't allocate memory for descriptors\n",
- dev->name);
- goto out;
- }
- memset(lp, 0, sizeof(struct sonic_local));
-
- /* get the virtual dma address */
- lp->cda_laddr = vdma_alloc(CPHYSADDR(lp),sizeof(*lp));
- if (lp->cda_laddr == ~0UL) {
- printk("%s: couldn't get DMA page entry for "
- "descriptors\n", dev->name);
- goto out1;
- }
-
- lp->tda_laddr = lp->cda_laddr + sizeof (lp->cda);
- lp->rra_laddr = lp->tda_laddr + sizeof (lp->tda);
- lp->rda_laddr = lp->rra_laddr + sizeof (lp->rra);
-
- /* allocate receive buffer area */
- /* FIXME, maybe we should use skbs */
- lp->rba = kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL);
- if (!lp->rba) {
- printk("%s: couldn't allocate receive buffers\n",
- dev->name);
- goto out2;
- }
+ lp->dma_bitmode = SONIC_BITMODE32;
- /* get virtual dma address */
- lp->rba_laddr = vdma_alloc(CPHYSADDR(lp->rba),
- SONIC_NUM_RRS * SONIC_RBSIZE);
- if (lp->rba_laddr == ~0UL) {
- printk("%s: couldn't get DMA page entry for receive "
- "buffers\n",dev->name);
- goto out3;
- }
-
- /* now convert pointer to KSEG1 pointer */
- lp->rba = (char *)KSEG1ADDR(lp->rba);
- flush_cache_all();
- dev->priv = (struct sonic_local *)KSEG1ADDR(lp);
+ /* Allocate the entire chunk of memory for the descriptors.
+ Note that this cannot cross a 64K boundary. */
+ if ((lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
+ printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id);
+ goto out;
}
- lp = (struct sonic_local *)dev->priv;
+ /* Now set up the pointers to point to the appropriate places */
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
dev->open = sonic_open;
dev->stop = sonic_close;
dev->hard_start_xmit = sonic_send_packet;
- dev->get_stats = sonic_get_stats;
+ dev->get_stats = sonic_get_stats;
dev->set_multicast_list = &sonic_multicast_list;
+ dev->tx_timeout = sonic_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
/*
@@ -226,14 +185,8 @@ static int __init sonic_probe1(struct net_device *dev, unsigned long base_addr,
SONIC_WRITE(SONIC_MPT,0xffff);
return 0;
-out3:
- kfree(lp->rba);
-out2:
- vdma_free(lp->cda_laddr);
-out1:
- kfree(lp);
out:
- release_region(base_addr, SONIC_MEM_SIZE);
+ release_region(dev->base_addr, SONIC_MEM_SIZE);
return err;
}
@@ -245,7 +198,6 @@ static int __init jazz_sonic_probe(struct device *device)
{
struct net_device *dev;
struct sonic_local *lp;
- unsigned long base_addr;
int err = 0;
int i;
@@ -255,21 +207,26 @@ static int __init jazz_sonic_probe(struct device *device)
if (mips_machgroup != MACH_GROUP_JAZZ)
return -ENODEV;
- dev = alloc_etherdev(0);
+ dev = alloc_etherdev(sizeof(struct sonic_local));
if (!dev)
return -ENOMEM;
+ lp = netdev_priv(dev);
+ lp->device = device;
+ SET_NETDEV_DEV(dev, device);
+ SET_MODULE_OWNER(dev);
+
netdev_boot_setup_check(dev);
- base_addr = dev->base_addr;
- if (base_addr >= KSEG0) { /* Check a single specified location. */
- err = sonic_probe1(dev, base_addr, dev->irq);
- } else if (base_addr != 0) { /* Don't probe at all. */
+ if (dev->base_addr >= KSEG0) { /* Check a single specified location. */
+ err = sonic_probe1(dev);
+ } else if (dev->base_addr != 0) { /* Don't probe at all. */
err = -ENXIO;
} else {
for (i = 0; sonic_portlist[i].port; i++) {
- int io = sonic_portlist[i].port;
- if (sonic_probe1(dev, io, sonic_portlist[i].irq) == 0)
+ dev->base_addr = sonic_portlist[i].port;
+ dev->irq = sonic_portlist[i].irq;
+ if (sonic_probe1(dev) == 0)
break;
}
if (!sonic_portlist[i].port)
@@ -281,14 +238,17 @@ static int __init jazz_sonic_probe(struct device *device)
if (err)
goto out1;
+ printk("%s: MAC ", dev->name);
+ for (i = 0; i < 6; i++) {
+ printk("%2.2x", dev->dev_addr[i]);
+ if (i < 5)
+ printk(":");
+ }
+ printk(" IRQ %d\n", dev->irq);
+
return 0;
out1:
- lp = dev->priv;
- vdma_free(lp->rba_laddr);
- kfree(lp->rba);
- vdma_free(lp->cda_laddr);
- kfree(lp);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
@@ -296,21 +256,22 @@ out:
return err;
}
-/*
- * SONIC uses a normal IRQ
- */
-#define sonic_request_irq request_irq
-#define sonic_free_irq free_irq
+MODULE_DESCRIPTION("Jazz SONIC ethernet driver");
+module_param(sonic_debug, int, 0);
+MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)");
-#define sonic_chiptomem(x) KSEG1ADDR(vdma_log2phys(x))
+#define SONIC_IRQ_FLAG SA_INTERRUPT
#include "sonic.c"
static int __devexit jazz_sonic_device_remove (struct device *device)
{
struct net_device *dev = device->driver_data;
+ struct sonic_local* lp = netdev_priv(dev);
unregister_netdev (dev);
+ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_region (dev->base_addr, SONIC_MEM_SIZE);
free_netdev (dev);
@@ -323,7 +284,7 @@ static struct device_driver jazz_sonic_driver = {
.probe = jazz_sonic_probe,
.remove = __devexit_p(jazz_sonic_device_remove),
};
-
+
static void jazz_sonic_platform_release (struct device *device)
{
struct platform_device *pldev;
@@ -336,10 +297,11 @@ static void jazz_sonic_platform_release (struct device *device)
static int __init jazz_sonic_init_module(void)
{
struct platform_device *pldev;
+ int err;
- if (driver_register(&jazz_sonic_driver)) {
+ if ((err = driver_register(&jazz_sonic_driver))) {
printk(KERN_ERR "Driver registration failed\n");
- return -ENOMEM;
+ return err;
}
jazz_sonic_device = NULL;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1f61f0cc95d..690a1aae0b3 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -68,6 +68,7 @@ static DEFINE_PER_CPU(struct net_device_stats, loopback_stats);
* of largesending device modulo TCP checksum, which is ignored for loopback.
*/
+#ifdef LOOPBACK_TSO
static void emulate_large_send_offload(struct sk_buff *skb)
{
struct iphdr *iph = skb->nh.iph;
@@ -119,6 +120,7 @@ static void emulate_large_send_offload(struct sk_buff *skb)
dev_kfree_skb(skb);
}
+#endif /* LOOPBACK_TSO */
/*
* The higher levels take care of making this non-reentrant (it's
@@ -130,12 +132,13 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
- skb->protocol=eth_type_trans(skb,dev);
- skb->dev=dev;
+ skb->protocol = eth_type_trans(skb,dev);
+ skb->dev = dev;
#ifndef LOOPBACK_MUST_CHECKSUM
skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif
+#ifdef LOOPBACK_TSO
if (skb_shinfo(skb)->tso_size) {
BUG_ON(skb->protocol != htons(ETH_P_IP));
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
@@ -143,14 +146,14 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
emulate_large_send_offload(skb);
return 0;
}
-
+#endif
dev->last_rx = jiffies;
lb_stats = &per_cpu(loopback_stats, get_cpu());
lb_stats->rx_bytes += skb->len;
- lb_stats->tx_bytes += skb->len;
+ lb_stats->tx_bytes = lb_stats->rx_bytes;
lb_stats->rx_packets++;
- lb_stats->tx_packets++;
+ lb_stats->tx_packets = lb_stats->rx_packets;
put_cpu();
netif_rx(skb);
@@ -208,9 +211,12 @@ struct net_device loopback_dev = {
.type = ARPHRD_LOOPBACK, /* 0x0001*/
.rebuild_header = eth_rebuild_header,
.flags = IFF_LOOPBACK,
- .features = NETIF_F_SG|NETIF_F_FRAGLIST
- |NETIF_F_NO_CSUM|NETIF_F_HIGHDMA
- |NETIF_F_LLTX,
+ .features = NETIF_F_SG | NETIF_F_FRAGLIST
+#ifdef LOOPBACK_TSO
+ | NETIF_F_TSO
+#endif
+ | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA
+ | NETIF_F_LLTX,
.ethtool_ops = &loopback_ethtool_ops,
};
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index be28c65de72..405e18365ed 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -1,6 +1,12 @@
/*
* macsonic.c
*
+ * (C) 2005 Finn Thain
+ *
+ * Converted to DMA API, converted to unified driver model, made it work as
+ * a module again, and from the mac68k project, introduced more 32-bit cards
+ * and dhd's support for 16-bit cards.
+ *
* (C) 1998 Alan Cox
*
* Debugging Andreas Ehliar, Michael Schmitz
@@ -26,8 +32,8 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/types.h>
-#include <linux/ctype.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -41,8 +47,8 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <asm/bootinfo.h>
#include <asm/system.h>
@@ -54,25 +60,28 @@
#include <asm/macints.h>
#include <asm/mac_via.h>
-#define SREGS_PAD(n) u16 n;
+static char mac_sonic_string[] = "macsonic";
+static struct platform_device *mac_sonic_device;
#include "sonic.h"
-#define SONIC_READ(reg) \
- nubus_readl(base_addr+(reg))
-#define SONIC_WRITE(reg,val) \
- nubus_writel((val), base_addr+(reg))
-#define sonic_read(dev, reg) \
- nubus_readl((dev)->base_addr+(reg))
-#define sonic_write(dev, reg, val) \
- nubus_writel((val), (dev)->base_addr+(reg))
-
+/* These should basically be bus-size and endian independent (since
+ the SONIC is at least smart enough that it uses the same endianness
+ as the host, unlike certain less enlightened Macintosh NICs) */
+#define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \
+ + lp->reg_offset))
+#define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \
+ + lp->reg_offset))
+
+/* use 0 for production, 1 for verification, >1 for debug */
+#ifdef SONIC_DEBUG
+static unsigned int sonic_debug = SONIC_DEBUG;
+#else
+static unsigned int sonic_debug = 1;
+#endif
-static int sonic_debug;
static int sonic_version_printed;
-static int reg_offset;
-
extern int mac_onboard_sonic_probe(struct net_device* dev);
extern int mac_nubus_sonic_probe(struct net_device* dev);
@@ -108,40 +117,6 @@ enum macsonic_type {
#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr)
-struct net_device * __init macsonic_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(0);
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0)
- sprintf(dev->name, "eth%d", unit);
-
- SET_MODULE_OWNER(dev);
-
- /* This will catch fatal stuff like -ENOMEM as well as success */
- err = mac_onboard_sonic_probe(dev);
- if (err == 0)
- goto found;
- if (err != -ENODEV)
- goto out;
- err = mac_nubus_sonic_probe(dev);
- if (err)
- goto out;
-found:
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- kfree(dev->priv);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
/*
* For reversing the PROM address
*/
@@ -160,103 +135,55 @@ static inline void bit_reverse_addr(unsigned char addr[6])
int __init macsonic_init(struct net_device* dev)
{
- struct sonic_local* lp = NULL;
- int i;
+ struct sonic_local* lp = netdev_priv(dev);
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
- for (i = 0; i < 20; i++) {
- unsigned long desc_base, desc_top;
- if((lp = kmalloc(sizeof(struct sonic_local), GFP_KERNEL | GFP_DMA)) == NULL) {
- printk(KERN_ERR "%s: couldn't allocate descriptor buffers\n", dev->name);
- return -ENOMEM;
- }
-
- desc_base = (unsigned long) lp;
- desc_top = desc_base + sizeof(struct sonic_local);
- if ((desc_top & 0xffff) >= (desc_base & 0xffff))
- break;
- /* Hmm. try again (FIXME: does this actually work?) */
- kfree(lp);
- printk(KERN_DEBUG
- "%s: didn't get continguous chunk [%08lx - %08lx], trying again\n",
- dev->name, desc_base, desc_top);
- }
-
- if (lp == NULL) {
- printk(KERN_ERR "%s: tried 20 times to allocate descriptor buffers, giving up.\n",
- dev->name);
+ if ((lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
+ printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", lp->device->bus_id);
return -ENOMEM;
- }
-
- dev->priv = lp;
-
-#if 0
- /* this code is only here as a curiousity... mainly, where the
- fuck did SONIC_BUS_SCALE come from, and what was it supposed
- to do? the normal allocation works great for 32 bit stuffs.. */
+ }
/* Now set up the pointers to point to the appropriate places */
- lp->cda = lp->sonic_desc;
- lp->tda = lp->cda + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
- * SONIC_BUS_SCALE(lp->dma_bitmode));
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
-#endif
-
- memset(lp, 0, sizeof(struct sonic_local));
-
- lp->cda_laddr = (unsigned int)&(lp->cda);
- lp->tda_laddr = (unsigned int)lp->tda;
- lp->rra_laddr = (unsigned int)lp->rra;
- lp->rda_laddr = (unsigned int)lp->rda;
-
- /* FIXME, maybe we should use skbs */
- if ((lp->rba = (char *)
- kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL | GFP_DMA)) == NULL) {
- printk(KERN_ERR "%s: couldn't allocate receive buffers\n", dev->name);
- dev->priv = NULL;
- kfree(lp);
- return -ENOMEM;
- }
-
- lp->rba_laddr = (unsigned int)lp->rba;
-
- {
- int rs, ds;
-
- /* almost always 12*4096, but let's not take chances */
- rs = ((SONIC_NUM_RRS * SONIC_RBSIZE + 4095) / 4096) * 4096;
- /* almost always under a page, but let's not take chances */
- ds = ((sizeof(struct sonic_local) + 4095) / 4096) * 4096;
- kernel_set_cachemode(lp->rba, rs, IOMAP_NOCACHE_SER);
- kernel_set_cachemode(lp, ds, IOMAP_NOCACHE_SER);
- }
-
-#if 0
- flush_cache_all();
-#endif
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
dev->open = sonic_open;
dev->stop = sonic_close;
dev->hard_start_xmit = sonic_send_packet;
dev->get_stats = sonic_get_stats;
dev->set_multicast_list = &sonic_multicast_list;
+ dev->tx_timeout = sonic_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
/*
* clear tally counter
*/
- sonic_write(dev, SONIC_CRCT, 0xffff);
- sonic_write(dev, SONIC_FAET, 0xffff);
- sonic_write(dev, SONIC_MPT, 0xffff);
+ SONIC_WRITE(SONIC_CRCT, 0xffff);
+ SONIC_WRITE(SONIC_FAET, 0xffff);
+ SONIC_WRITE(SONIC_MPT, 0xffff);
return 0;
}
int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
{
+ struct sonic_local *lp = netdev_priv(dev);
const int prom_addr = ONBOARD_SONIC_PROM_BASE;
int i;
@@ -270,6 +197,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
why this is so. */
if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
+ memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
memcmp(dev->dev_addr, "\x00\x05\x02", 3))
bit_reverse_addr(dev->dev_addr);
else
@@ -281,22 +209,23 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
the card... */
if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
+ memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
memcmp(dev->dev_addr, "\x00\x05\x02", 3))
{
unsigned short val;
printk(KERN_INFO "macsonic: PROM seems to be wrong, trying CAM entry 15\n");
- sonic_write(dev, SONIC_CMD, SONIC_CR_RST);
- sonic_write(dev, SONIC_CEP, 15);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ SONIC_WRITE(SONIC_CEP, 15);
- val = sonic_read(dev, SONIC_CAP2);
+ val = SONIC_READ(SONIC_CAP2);
dev->dev_addr[5] = val >> 8;
dev->dev_addr[4] = val & 0xff;
- val = sonic_read(dev, SONIC_CAP1);
+ val = SONIC_READ(SONIC_CAP1);
dev->dev_addr[3] = val >> 8;
dev->dev_addr[2] = val & 0xff;
- val = sonic_read(dev, SONIC_CAP0);
+ val = SONIC_READ(SONIC_CAP0);
dev->dev_addr[1] = val >> 8;
dev->dev_addr[0] = val & 0xff;
@@ -311,6 +240,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev)
if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
+ memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
memcmp(dev->dev_addr, "\x00\x05\x02", 3))
{
/*
@@ -325,8 +255,9 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
{
/* Bwahahaha */
static int once_is_more_than_enough;
- int i;
- int dma_bitmode;
+ struct sonic_local* lp = netdev_priv(dev);
+ int sr;
+ int commslot = 0;
if (once_is_more_than_enough)
return -ENODEV;
@@ -335,20 +266,18 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
if (!MACH_IS_MAC)
return -ENODEV;
- printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
-
if (macintosh_config->ether_type != MAC_ETHER_SONIC)
- {
- printk("none.\n");
return -ENODEV;
- }
-
+
+ printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
+
/* Bogus probing, on the models which may or may not have
Ethernet (BTW, the Ethernet *is* always at the same
address, and nothing else lives there, at least if Apple's
documentation is to be believed) */
if (macintosh_config->ident == MAC_MODEL_Q630 ||
macintosh_config->ident == MAC_MODEL_P588 ||
+ macintosh_config->ident == MAC_MODEL_P575 ||
macintosh_config->ident == MAC_MODEL_C610) {
unsigned long flags;
int card_present;
@@ -361,13 +290,13 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
printk("none.\n");
return -ENODEV;
}
+ commslot = 1;
}
printk("yes\n");
- /* Danger! My arms are flailing wildly! You *must* set this
- before using sonic_read() */
-
+ /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
+ * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
dev->base_addr = ONBOARD_SONIC_REGISTERS;
if (via_alt_mapping)
dev->irq = IRQ_AUTO_3;
@@ -379,84 +308,66 @@ int __init mac_onboard_sonic_probe(struct net_device* dev)
sonic_version_printed = 1;
}
printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n",
- dev->name, dev->base_addr);
-
- /* Now do a song and dance routine in an attempt to determine
- the bus width */
+ lp->device->bus_id, dev->base_addr);
/* The PowerBook's SONIC is 16 bit always. */
if (macintosh_config->ident == MAC_MODEL_PB520) {
- reg_offset = 0;
- dma_bitmode = 0;
- } else if (macintosh_config->ident == MAC_MODEL_C610) {
- reg_offset = 0;
- dma_bitmode = 1;
- } else {
+ lp->reg_offset = 0;
+ lp->dma_bitmode = SONIC_BITMODE16;
+ sr = SONIC_READ(SONIC_SR);
+ } else if (commslot) {
/* Some of the comm-slot cards are 16 bit. But some
- of them are not. The 32-bit cards use offset 2 and
- pad with zeroes or sometimes ones (I think...)
- Therefore, if we try offset 0 and get a silicon
- revision of 0, we assume 16 bit. */
- int sr;
-
- /* Technically this is not necessary since we zeroed
- it above */
- reg_offset = 0;
- dma_bitmode = 0;
- sr = sonic_read(dev, SONIC_SR);
- if (sr == 0 || sr == 0xffff) {
- reg_offset = 2;
- /* 83932 is 0x0004, 83934 is 0x0100 or 0x0101 */
- sr = sonic_read(dev, SONIC_SR);
- dma_bitmode = 1;
-
+ of them are not. The 32-bit cards use offset 2 and
+ have known revisions, we try reading the revision
+ register at offset 2, if we don't get a known revision
+ we assume 16 bit at offset 0. */
+ lp->reg_offset = 2;
+ lp->dma_bitmode = SONIC_BITMODE16;
+
+ sr = SONIC_READ(SONIC_SR);
+ if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101)
+ /* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */
+ lp->dma_bitmode = SONIC_BITMODE32;
+ else {
+ lp->dma_bitmode = SONIC_BITMODE16;
+ lp->reg_offset = 0;
+ sr = SONIC_READ(SONIC_SR);
}
- printk(KERN_INFO
- "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
- dev->name, sr, dma_bitmode?32:16, reg_offset);
+ } else {
+ /* All onboard cards are at offset 2 with 32 bit DMA. */
+ lp->reg_offset = 2;
+ lp->dma_bitmode = SONIC_BITMODE32;
+ sr = SONIC_READ(SONIC_SR);
}
-
+ printk(KERN_INFO
+ "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
+ lp->device->bus_id, sr, lp->dma_bitmode?32:16, lp->reg_offset);
- /* this carries my sincere apologies -- by the time I got to updating
- the driver, support for "reg_offsets" appeares nowhere in the sonic
- code, going back for over a year. Fortunately, my Mac does't seem
- to use whatever this was.
+#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
+ printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id,
+ SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
+#endif
- If you know how this is supposed to be implemented, either fix it,
- or contact me (sammy@oh.verio.com) to explain what it is. --Sam */
-
- if(reg_offset) {
- printk("%s: register offset unsupported. please fix this if you know what it is.\n", dev->name);
- return -ENODEV;
- }
-
/* Software reset, then initialize control registers. */
- sonic_write(dev, SONIC_CMD, SONIC_CR_RST);
- sonic_write(dev, SONIC_DCR, SONIC_DCR_BMS |
- SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_EXBUS |
- (dma_bitmode ? SONIC_DCR_DW : 0));
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+
+ SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS |
+ SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
+ (lp->dma_bitmode ? SONIC_DCR_DW : 0));
/* This *must* be written back to in order to restore the
- extended programmable output bits */
- sonic_write(dev, SONIC_DCR2, 0);
+ * extended programmable output bits, as it may not have been
+ * initialised since the hardware reset. */
+ SONIC_WRITE(SONIC_DCR2, 0);
/* Clear *and* disable interrupts to be on the safe side */
- sonic_write(dev, SONIC_ISR,0x7fff);
- sonic_write(dev, SONIC_IMR,0);
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
/* Now look for the MAC address. */
if (mac_onboard_sonic_ethernet_addr(dev) != 0)
return -ENODEV;
- printk(KERN_INFO "MAC ");
- for (i = 0; i < 6; i++) {
- printk("%2.2x", dev->dev_addr[i]);
- if (i < 5)
- printk(":");
- }
-
- printk(" IRQ %d\n", dev->irq);
-
/* Shared init code */
return macsonic_init(dev);
}
@@ -468,8 +379,10 @@ int __init mac_nubus_sonic_ethernet_addr(struct net_device* dev,
int i;
for(i = 0; i < 6; i++)
dev->dev_addr[i] = SONIC_READ_PROM(i);
- /* For now we are going to assume that they're all bit-reversed */
- bit_reverse_addr(dev->dev_addr);
+
+ /* Some of the addresses are bit-reversed */
+ if (id != MACSONIC_DAYNA)
+ bit_reverse_addr(dev->dev_addr);
return 0;
}
@@ -487,6 +400,15 @@ int __init macsonic_ident(struct nubus_dev* ndev)
else
return MACSONIC_APPLE;
}
+
+ if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
+ ndev->dr_sw == NUBUS_DRSW_DAYNA)
+ return MACSONIC_DAYNA;
+
+ if (ndev->dr_hw == NUBUS_DRHW_SONIC_LC &&
+ ndev->dr_sw == 0) { /* huh? */
+ return MACSONIC_APPLE16;
+ }
return -1;
}
@@ -494,12 +416,12 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
{
static int slots;
struct nubus_dev* ndev = NULL;
+ struct sonic_local* lp = netdev_priv(dev);
unsigned long base_addr, prom_addr;
u16 sonic_dcr;
- int id;
- int i;
- int dma_bitmode;
-
+ int id = -1;
+ int reg_offset, dma_bitmode;
+
/* Find the first SONIC that hasn't been initialized already */
while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
NUBUS_TYPE_ETHERNET, ndev)) != NULL)
@@ -521,51 +443,52 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
case MACSONIC_DUODOCK:
base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE;
- sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1
- | SONIC_DCR_TFT0;
+ sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 |
+ SONIC_DCR_TFT0;
reg_offset = 2;
- dma_bitmode = 1;
+ dma_bitmode = SONIC_BITMODE32;
break;
case MACSONIC_APPLE:
base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0;
reg_offset = 0;
- dma_bitmode = 1;
+ dma_bitmode = SONIC_BITMODE32;
break;
case MACSONIC_APPLE16:
base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
- sonic_dcr = SONIC_DCR_EXBUS
- | SONIC_DCR_RFT1 | SONIC_DCR_TFT0
- | SONIC_DCR_PO1 | SONIC_DCR_BMS;
+ sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
+ SONIC_DCR_PO1 | SONIC_DCR_BMS;
reg_offset = 0;
- dma_bitmode = 0;
+ dma_bitmode = SONIC_BITMODE16;
break;
case MACSONIC_DAYNALINK:
base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE;
- sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0
- | SONIC_DCR_PO1 | SONIC_DCR_BMS;
+ sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
+ SONIC_DCR_PO1 | SONIC_DCR_BMS;
reg_offset = 0;
- dma_bitmode = 0;
+ dma_bitmode = SONIC_BITMODE16;
break;
case MACSONIC_DAYNA:
base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS;
prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR;
- sonic_dcr = SONIC_DCR_BMS
- | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1;
+ sonic_dcr = SONIC_DCR_BMS |
+ SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1;
reg_offset = 0;
- dma_bitmode = 0;
+ dma_bitmode = SONIC_BITMODE16;
break;
default:
printk(KERN_ERR "macsonic: WTF, id is %d\n", id);
return -ENODEV;
}
- /* Danger! My arms are flailing wildly! You *must* set this
- before using sonic_read() */
+ /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
+ * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
dev->base_addr = base_addr;
+ lp->reg_offset = reg_offset;
+ lp->dma_bitmode = dma_bitmode;
dev->irq = SLOT2IRQ(ndev->board->slot);
if (!sonic_version_printed) {
@@ -573,29 +496,66 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
sonic_version_printed = 1;
}
printk(KERN_INFO "%s: %s in slot %X\n",
- dev->name, ndev->board->name, ndev->board->slot);
+ lp->device->bus_id, ndev->board->name, ndev->board->slot);
printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
- dev->name, sonic_read(dev, SONIC_SR), dma_bitmode?32:16, reg_offset);
+ lp->device->bus_id, SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset);
- if(reg_offset) {
- printk("%s: register offset unsupported. please fix this if you know what it is.\n", dev->name);
- return -ENODEV;
- }
+#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
+ printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", lp->device->bus_id,
+ SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
+#endif
/* Software reset, then initialize control registers. */
- sonic_write(dev, SONIC_CMD, SONIC_CR_RST);
- sonic_write(dev, SONIC_DCR, sonic_dcr
- | (dma_bitmode ? SONIC_DCR_DW : 0));
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+ SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0));
+ /* This *must* be written back to in order to restore the
+ * extended programmable output bits, since it may not have been
+ * initialised since the hardware reset. */
+ SONIC_WRITE(SONIC_DCR2, 0);
/* Clear *and* disable interrupts to be on the safe side */
- sonic_write(dev, SONIC_ISR,0x7fff);
- sonic_write(dev, SONIC_IMR,0);
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
/* Now look for the MAC address. */
if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0)
return -ENODEV;
- printk(KERN_INFO "MAC ");
+ /* Shared init code */
+ return macsonic_init(dev);
+}
+
+static int __init mac_sonic_probe(struct device *device)
+{
+ struct net_device *dev;
+ struct sonic_local *lp;
+ int err;
+ int i;
+
+ dev = alloc_etherdev(sizeof(struct sonic_local));
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+ lp->device = device;
+ SET_NETDEV_DEV(dev, device);
+ SET_MODULE_OWNER(dev);
+
+ /* This will catch fatal stuff like -ENOMEM as well as success */
+ err = mac_onboard_sonic_probe(dev);
+ if (err == 0)
+ goto found;
+ if (err != -ENODEV)
+ goto out;
+ err = mac_nubus_sonic_probe(dev);
+ if (err)
+ goto out;
+found:
+ err = register_netdev(dev);
+ if (err)
+ goto out;
+
+ printk("%s: MAC ", dev->name);
for (i = 0; i < 6; i++) {
printk("%2.2x", dev->dev_addr[i]);
if (i < 5)
@@ -603,55 +563,95 @@ int __init mac_nubus_sonic_probe(struct net_device* dev)
}
printk(" IRQ %d\n", dev->irq);
- /* Shared init code */
- return macsonic_init(dev);
-}
+ return 0;
-#ifdef MODULE
-static struct net_device *dev_macsonic;
+out:
+ free_netdev(dev);
-MODULE_PARM(sonic_debug, "i");
+ return err;
+}
+
+MODULE_DESCRIPTION("Macintosh SONIC ethernet driver");
+module_param(sonic_debug, int, 0);
MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
-int
-init_module(void)
+#define SONIC_IRQ_FLAG IRQ_FLG_FAST
+
+#include "sonic.c"
+
+static int __devexit mac_sonic_device_remove (struct device *device)
{
- dev_macsonic = macsonic_probe(-1);
- if (IS_ERR(dev_macsonic)) {
- printk(KERN_WARNING "macsonic.c: No card found\n");
- return PTR_ERR(dev_macsonic);
- }
+ struct net_device *dev = device->driver_data;
+ struct sonic_local* lp = netdev_priv(dev);
+
+ unregister_netdev (dev);
+ dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
+ free_netdev (dev);
+
return 0;
}
-void
-cleanup_module(void)
+static struct device_driver mac_sonic_driver = {
+ .name = mac_sonic_string,
+ .bus = &platform_bus_type,
+ .probe = mac_sonic_probe,
+ .remove = __devexit_p(mac_sonic_device_remove),
+};
+
+static void mac_sonic_platform_release(struct device *device)
{
- unregister_netdev(dev_macsonic);
- kfree(dev_macsonic->priv);
- free_netdev(dev_macsonic);
+ struct platform_device *pldev;
+
+ /* free device */
+ pldev = to_platform_device (device);
+ kfree (pldev);
}
-#endif /* MODULE */
+static int __init mac_sonic_init_module(void)
+{
+ struct platform_device *pldev;
+ int err;
-#define vdma_alloc(foo, bar) ((u32)foo)
-#define vdma_free(baz)
-#define sonic_chiptomem(bat) (bat)
-#define PHYSADDR(quux) (quux)
-#define CPHYSADDR(quux) (quux)
+ if ((err = driver_register(&mac_sonic_driver))) {
+ printk(KERN_ERR "Driver registration failed\n");
+ return err;
+ }
-#define sonic_request_irq request_irq
-#define sonic_free_irq free_irq
+ mac_sonic_device = NULL;
-#include "sonic.c"
+ if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) {
+ goto out_unregister;
+ }
-/*
- * Local variables:
- * compile-command: "m68k-linux-gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -ffixed-a2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c -o macsonic.o macsonic.c"
- * version-control: t
- * kept-new-versions: 5
- * c-indent-level: 8
- * tab-width: 8
- * End:
- *
- */
+ memset(pldev, 0, sizeof (*pldev));
+ pldev->name = mac_sonic_string;
+ pldev->id = 0;
+ pldev->dev.release = mac_sonic_platform_release;
+ mac_sonic_device = pldev;
+
+ if (platform_device_register (pldev)) {
+ kfree(pldev);
+ mac_sonic_device = NULL;
+ }
+
+ return 0;
+
+out_unregister:
+ platform_device_unregister(pldev);
+
+ return -ENOMEM;
+}
+
+static void __exit mac_sonic_cleanup_module(void)
+{
+ driver_unregister(&mac_sonic_driver);
+
+ if (mac_sonic_device) {
+ platform_device_unregister(mac_sonic_device);
+ mac_sonic_device = NULL;
+ }
+}
+
+module_init(mac_sonic_init_module);
+module_exit(mac_sonic_cleanup_module);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 0405e1f0d3d..fb6b232069d 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1157,16 +1157,20 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!skb_shinfo(skb)->nr_frags) {
linear:
if (skb->ip_summed != CHECKSUM_HW) {
+ /* Errata BTS #50, IHL must be 5 if no HW checksum */
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
- ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC;
+ ETH_TX_FIRST_DESC |
+ ETH_TX_LAST_DESC |
+ 5 << ETH_TX_IHL_SHIFT;
pkt_info.l4i_chk = 0;
} else {
- u32 ipheader = skb->nh.iph->ihl << 11;
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
- ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC |
- ETH_GEN_TCP_UDP_CHECKSUM |
- ETH_GEN_IP_V_4_CHECKSUM | ipheader;
+ ETH_TX_FIRST_DESC |
+ ETH_TX_LAST_DESC |
+ ETH_GEN_TCP_UDP_CHECKSUM |
+ ETH_GEN_IP_V_4_CHECKSUM |
+ skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
/* CPU already calculated pseudo header checksum. */
if (skb->nh.iph->protocol == IPPROTO_UDP) {
pkt_info.cmd_sts |= ETH_UDP_FRAME;
@@ -1193,7 +1197,6 @@ linear:
stats->tx_bytes += pkt_info.byte_cnt;
} else {
unsigned int frag;
- u32 ipheader;
/* Since hardware can't handle unaligned fragments smaller
* than 9 bytes, if we find any, we linearize the skb
@@ -1222,12 +1225,16 @@ linear:
DMA_TO_DEVICE);
pkt_info.l4i_chk = 0;
pkt_info.return_info = 0;
- pkt_info.cmd_sts = ETH_TX_FIRST_DESC;
- if (skb->ip_summed == CHECKSUM_HW) {
- ipheader = skb->nh.iph->ihl << 11;
- pkt_info.cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
- ETH_GEN_IP_V_4_CHECKSUM | ipheader;
+ if (skb->ip_summed != CHECKSUM_HW)
+ /* Errata BTS #50, IHL must be 5 if no HW checksum */
+ pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
+ 5 << ETH_TX_IHL_SHIFT;
+ else {
+ pkt_info.cmd_sts = ETH_TX_FIRST_DESC |
+ ETH_GEN_TCP_UDP_CHECKSUM |
+ ETH_GEN_IP_V_4_CHECKSUM |
+ skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
/* CPU already calculated pseudo header checksum. */
if (skb->nh.iph->protocol == IPPROTO_UDP) {
pkt_info.cmd_sts |= ETH_UDP_FRAME;
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 57c4f8fbfdb..7678b59c295 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -49,7 +49,7 @@
/* Checksum offload for Tx works for most packets, but
* fails if previous packet sent did not use hw csum
*/
-#undef MV643XX_CHECKSUM_OFFLOAD_TX
+#define MV643XX_CHECKSUM_OFFLOAD_TX
#define MV643XX_NAPI
#define MV643XX_TX_FAST_REFILL
#undef MV643XX_RX_QUEUE_FILL_ON_TASK /* Does not work, yet */
@@ -217,6 +217,8 @@
#define ETH_TX_ENABLE_INTERRUPT (BIT23)
#define ETH_AUTO_MODE (BIT30)
+#define ETH_TX_IHL_SHIFT 11
+
/* typedefs */
typedef enum _eth_func_ret_status {
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 4a391ea0f58..a1ac4bd1696 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -486,9 +486,9 @@ struct netdrv_private {
MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_PARM (multicast_filter_limit, "i");
-MODULE_PARM (max_interrupt_work, "i");
-MODULE_PARM (media, "1-" __MODULE_STRING(8) "i");
+module_param(multicast_filter_limit, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param_array(media, int, NULL, 0);
MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9d8197bb293..384a736a0d2 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -134,7 +134,7 @@ typedef struct local_info_t {
u_char mc_filter[8];
} local_info_t;
-#define MC_FILTERBREAK 64
+#define MC_FILTERBREAK 8
/*====================================================================*/
/*
@@ -1012,7 +1012,7 @@ static void fjn_reset(struct net_device *dev)
outb(BANK_1U, ioaddr + CONFIG_1);
/* set the multicast table to accept none. */
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 8; i++)
outb(0x00, ioaddr + MAR_ADR + i);
/* Switch to bank 2 (runtime mode) */
@@ -1269,6 +1269,16 @@ static void set_rx_mode(struct net_device *dev)
u_long flags;
int i;
+ int saved_config_0 = inb(ioaddr + CONFIG_0);
+
+ local_irq_save(flags);
+
+ /* Disable Tx and Rx */
+ if (sram_config == 0)
+ outb(CONFIG0_RST, ioaddr + CONFIG_0);
+ else
+ outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
+
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
printk("%s: Promiscuous mode enabled.\n", dev->name);
@@ -1290,20 +1300,23 @@ static void set_rx_mode(struct net_device *dev)
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
unsigned int bit =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
- mc_filter[bit >> 3] |= (1 << bit);
+ ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ mc_filter[bit >> 3] |= (1 << (bit & 7));
}
+ outb(2, ioaddr + RX_MODE); /* Use normal mode. */
}
- local_irq_save(flags);
if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
int saved_bank = inb(ioaddr + CONFIG_1);
/* Switch to bank 1 and set the multicast table. */
outb(0xe4, ioaddr + CONFIG_1);
for (i = 0; i < 8; i++)
- outb(mc_filter[i], ioaddr + 8 + i);
+ outb(mc_filter[i], ioaddr + MAR_ADR + i);
memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
outb(saved_bank, ioaddr + CONFIG_1);
}
+
+ outb(saved_config_0, ioaddr + CONFIG_0);
+
local_irq_restore(flags);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
new file mode 100644
index 00000000000..6a2fe358347
--- /dev/null
+++ b/drivers/net/phy/Kconfig
@@ -0,0 +1,57 @@
+#
+# PHY Layer Configuration
+#
+
+menu "PHY device support"
+
+config PHYLIB
+ tristate "PHY Device support and infrastructure"
+ depends on NET_ETHERNET
+ help
+ Ethernet controllers are usually attached to PHY
+ devices. This option provides infrastructure for
+ managing PHY devices.
+
+config PHYCONTROL
+ bool " Support for automatically handling PHY state changes"
+ depends on PHYLIB
+ help
+ Adds code to perform all the work for keeping PHY link
+ state (speed/duplex/etc) up-to-date. Also handles
+ interrupts.
+
+comment "MII PHY device drivers"
+ depends on PHYLIB
+
+config MARVELL_PHY
+ tristate "Drivers for Marvell PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently has a driver for the 88E1011S
+
+config DAVICOM_PHY
+ tristate "Drivers for Davicom PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently supports dm9161e and dm9131
+
+config QSEMI_PHY
+ tristate "Drivers for Quality Semiconductor PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently supports the qs6612
+
+config LXT_PHY
+ tristate "Drivers for the Intel LXT PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently supports the lxt970, lxt971
+
+config CICADA_PHY
+ tristate "Drivers for the Cicada PHYs"
+ depends on PHYLIB
+ ---help---
+ Currently supports the cis8204
+
+endmenu
+
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
new file mode 100644
index 00000000000..e4116a5fbb4
--- /dev/null
+++ b/drivers/net/phy/Makefile
@@ -0,0 +1,10 @@
+# Makefile for Linux PHY drivers
+
+libphy-objs := phy.o phy_device.o mdio_bus.o
+
+obj-$(CONFIG_PHYLIB) += libphy.o
+obj-$(CONFIG_MARVELL_PHY) += marvell.o
+obj-$(CONFIG_DAVICOM_PHY) += davicom.o
+obj-$(CONFIG_CICADA_PHY) += cicada.o
+obj-$(CONFIG_LXT_PHY) += lxt.o
+obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
new file mode 100644
index 00000000000..c47fb2ecd14
--- /dev/null
+++ b/drivers/net/phy/cicada.c
@@ -0,0 +1,134 @@
+/*
+ * drivers/net/phy/cicada.c
+ *
+ * Driver for Cicada PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* Cicada Extended Control Register 1 */
+#define MII_CIS8201_EXT_CON1 0x17
+#define MII_CIS8201_EXTCON1_INIT 0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MII_CIS8201_IMASK 0x19
+#define MII_CIS8201_IMASK_IEN 0x8000
+#define MII_CIS8201_IMASK_SPEED 0x4000
+#define MII_CIS8201_IMASK_LINK 0x2000
+#define MII_CIS8201_IMASK_DUPLEX 0x1000
+#define MII_CIS8201_IMASK_MASK 0xf000
+
+/* Cicada Interrupt Status Register */
+#define MII_CIS8201_ISTAT 0x1a
+#define MII_CIS8201_ISTAT_STATUS 0x8000
+#define MII_CIS8201_ISTAT_SPEED 0x4000
+#define MII_CIS8201_ISTAT_LINK 0x2000
+#define MII_CIS8201_ISTAT_DUPLEX 0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MII_CIS8201_AUX_CONSTAT 0x1c
+#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
+#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
+#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
+#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
+#define MII_CIS8201_AUXCONSTAT_100 0x0008
+
+MODULE_DESCRIPTION("Cicadia PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+static int cis820x_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_CIS8201_AUX_CONSTAT,
+ MII_CIS8201_AUXCONSTAT_INIT);
+
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_CIS8201_EXT_CON1,
+ MII_CIS8201_EXTCON1_INIT);
+
+ return err;
+}
+
+static int cis820x_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_CIS8201_ISTAT);
+
+ return (err < 0) ? err : 0;
+}
+
+static int cis820x_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_CIS8201_IMASK,
+ MII_CIS8201_IMASK_MASK);
+ else
+ err = phy_write(phydev, MII_CIS8201_IMASK, 0);
+
+ return err;
+}
+
+/* Cicada 820x */
+static struct phy_driver cis8204_driver = {
+ .phy_id = 0x000fc440,
+ .name = "Cicada Cis8204",
+ .phy_id_mask = 0x000fffc0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &cis820x_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &cis820x_ack_interrupt,
+ .config_intr = &cis820x_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init cis8204_init(void)
+{
+ return phy_driver_register(&cis8204_driver);
+}
+
+static void __exit cis8204_exit(void)
+{
+ phy_driver_unregister(&cis8204_driver);
+}
+
+module_init(cis8204_init);
+module_exit(cis8204_exit);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
new file mode 100644
index 00000000000..6caf499fae3
--- /dev/null
+++ b/drivers/net/phy/davicom.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/net/phy/davicom.c
+ *
+ * Driver for Davicom PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define MII_DM9161_SCR 0x10
+#define MII_DM9161_SCR_INIT 0x0610
+
+/* DM9161 Interrupt Register */
+#define MII_DM9161_INTR 0x15
+#define MII_DM9161_INTR_PEND 0x8000
+#define MII_DM9161_INTR_DPLX_MASK 0x0800
+#define MII_DM9161_INTR_SPD_MASK 0x0400
+#define MII_DM9161_INTR_LINK_MASK 0x0200
+#define MII_DM9161_INTR_MASK 0x0100
+#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
+#define MII_DM9161_INTR_SPD_CHANGE 0x0008
+#define MII_DM9161_INTR_LINK_CHANGE 0x0004
+#define MII_DM9161_INTR_INIT 0x0000
+#define MII_DM9161_INTR_STOP \
+(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
+ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MII_DM9161_10BTCSR 0x12
+#define MII_DM9161_10BTCSR_INIT 0x7800
+
+MODULE_DESCRIPTION("Davicom PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+
+#define DM9161_DELAY 1
+static int dm9161_config_intr(struct phy_device *phydev)
+{
+ int temp;
+
+ temp = phy_read(phydev, MII_DM9161_INTR);
+
+ if (temp < 0)
+ return temp;
+
+ if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
+ temp &= ~(MII_DM9161_INTR_STOP);
+ else
+ temp |= MII_DM9161_INTR_STOP;
+
+ temp = phy_write(phydev, MII_DM9161_INTR, temp);
+
+ return temp;
+}
+
+static int dm9161_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* Isolate the PHY */
+ err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
+
+ if (err < 0)
+ return err;
+
+ /* Configure the new settings */
+ err = genphy_config_aneg(phydev);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dm9161_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Isolate the PHY */
+ err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
+
+ if (err < 0)
+ return err;
+
+ /* Do not bypass the scrambler/descrambler */
+ err = phy_write(phydev, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
+
+ if (err < 0)
+ return err;
+
+ /* Clear 10BTCSR to default */
+ err = phy_write(phydev, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
+
+ if (err < 0)
+ return err;
+
+ /* Reconnect the PHY, and enable Autonegotiation */
+ err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dm9161_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DM9161_INTR);
+
+ return (err < 0) ? err : 0;
+}
+
+static struct phy_driver dm9161_driver = {
+ .phy_id = 0x0181b880,
+ .name = "Davicom DM9161E",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .config_init = dm9161_config_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver dm9131_driver = {
+ .phy_id = 0x00181b80,
+ .name = "Davicom DM9131",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init davicom_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&dm9161_driver);
+ if (ret)
+ goto err1;
+
+ ret = phy_driver_register(&dm9131_driver);
+ if (ret)
+ goto err2;
+ return 0;
+
+ err2:
+ phy_driver_unregister(&dm9161_driver);
+ err1:
+ return ret;
+}
+
+static void __exit davicom_exit(void)
+{
+ phy_driver_unregister(&dm9161_driver);
+ phy_driver_unregister(&dm9131_driver);
+}
+
+module_init(davicom_init);
+module_exit(davicom_exit);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
new file mode 100644
index 00000000000..4c840448ec8
--- /dev/null
+++ b/drivers/net/phy/lxt.c
@@ -0,0 +1,179 @@
+/*
+ * drivers/net/phy/lxt.c
+ *
+ * Driver for Intel LXT PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* The Level one LXT970 is used by many boards */
+
+#define MII_LXT970_IER 17 /* Interrupt Enable Register */
+
+#define MII_LXT970_IER_IEN 0x0002
+
+#define MII_LXT970_ISR 18 /* Interrupt Status Register */
+
+#define MII_LXT970_CONFIG 19 /* Configuration Register */
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards */
+
+/* register definitions for the 971 */
+#define MII_LXT971_IER 18 /* Interrupt Enable Register */
+#define MII_LXT971_IER_IEN 0x00f2
+
+#define MII_LXT971_ISR 19 /* Interrupt Status Register */
+
+
+MODULE_DESCRIPTION("Intel LXT PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+static int lxt970_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, MII_BMSR);
+
+ if (err < 0)
+ return err;
+
+ err = phy_read(phydev, MII_LXT970_ISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int lxt970_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
+ else
+ err = phy_write(phydev, MII_LXT970_IER, 0);
+
+ return err;
+}
+
+static int lxt970_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_LXT970_CONFIG, 0);
+
+ return err;
+}
+
+
+static int lxt971_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_LXT971_ISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int lxt971_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
+ else
+ err = phy_write(phydev, MII_LXT971_IER, 0);
+
+ return err;
+}
+
+static struct phy_driver lxt970_driver = {
+ .phy_id = 0x07810000,
+ .name = "LXT970",
+ .phy_id_mask = 0x0fffffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = lxt970_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = lxt970_ack_interrupt,
+ .config_intr = lxt970_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver lxt971_driver = {
+ .phy_id = 0x0001378e,
+ .name = "LXT971",
+ .phy_id_mask = 0x0fffffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = lxt971_ack_interrupt,
+ .config_intr = lxt971_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init lxt_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&lxt970_driver);
+ if (ret)
+ goto err1;
+
+ ret = phy_driver_register(&lxt971_driver);
+ if (ret)
+ goto err2;
+ return 0;
+
+ err2:
+ phy_driver_unregister(&lxt970_driver);
+ err1:
+ return ret;
+}
+
+static void __exit lxt_exit(void)
+{
+ phy_driver_unregister(&lxt970_driver);
+ phy_driver_unregister(&lxt971_driver);
+}
+
+module_init(lxt_init);
+module_exit(lxt_exit);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
new file mode 100644
index 00000000000..4a72b025006
--- /dev/null
+++ b/drivers/net/phy/marvell.c
@@ -0,0 +1,140 @@
+/*
+ * drivers/net/phy/marvell.c
+ *
+ * Driver for Marvell PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define MII_M1011_IEVENT 0x13
+#define MII_M1011_IEVENT_CLEAR 0x0000
+
+#define MII_M1011_IMASK 0x12
+#define MII_M1011_IMASK_INIT 0x6400
+#define MII_M1011_IMASK_CLEAR 0x0000
+
+MODULE_DESCRIPTION("Marvell PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+static int marvell_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ /* Clear the interrupts by reading the reg */
+ err = phy_read(phydev, MII_M1011_IEVENT);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int marvell_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+ else
+ err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
+
+ return err;
+}
+
+static int marvell_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x1f);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x200c);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x5);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x100);
+ if (err < 0)
+ return err;
+
+
+ err = genphy_config_aneg(phydev);
+
+ return err;
+}
+
+
+static struct phy_driver m88e1101_driver = {
+ .phy_id = 0x01410c00,
+ .phy_id_mask = 0xffffff00,
+ .name = "Marvell 88E1101",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &marvell_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init marvell_init(void)
+{
+ return phy_driver_register(&m88e1101_driver);
+}
+
+static void __exit marvell_exit(void)
+{
+ phy_driver_unregister(&m88e1101_driver);
+}
+
+module_init(marvell_init);
+module_exit(marvell_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
new file mode 100644
index 00000000000..41f62c0c5fc
--- /dev/null
+++ b/drivers/net/phy/mdio_bus.c
@@ -0,0 +1,176 @@
+/*
+ * drivers/net/phy/mdio_bus.c
+ *
+ * MDIO Bus interface
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* mdiobus_register
+ *
+ * description: Called by a bus driver to bring up all the PHYs
+ * on a given bus, and attach them to the bus
+ */
+int mdiobus_register(struct mii_bus *bus)
+{
+ int i;
+ int err = 0;
+
+ spin_lock_init(&bus->mdio_lock);
+
+ if (NULL == bus || NULL == bus->name ||
+ NULL == bus->read ||
+ NULL == bus->write)
+ return -EINVAL;
+
+ if (bus->reset)
+ bus->reset(bus);
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = get_phy_device(bus, i);
+
+ if (IS_ERR(phydev))
+ return PTR_ERR(phydev);
+
+ /* There's a PHY at this address
+ * We need to set:
+ * 1) IRQ
+ * 2) bus_id
+ * 3) parent
+ * 4) bus
+ * 5) mii_bus
+ * And, we need to register it */
+ if (phydev) {
+ phydev->irq = bus->irq[i];
+
+ phydev->dev.parent = bus->dev;
+ phydev->dev.bus = &mdio_bus_type;
+ sprintf(phydev->dev.bus_id, "phy%d:%d", bus->id, i);
+
+ phydev->bus = bus;
+
+ err = device_register(&phydev->dev);
+
+ if (err)
+ printk(KERN_ERR "phy %d failed to register\n",
+ i);
+ }
+
+ bus->phy_map[i] = phydev;
+ }
+
+ pr_info("%s: probed\n", bus->name);
+
+ return err;
+}
+EXPORT_SYMBOL(mdiobus_register);
+
+void mdiobus_unregister(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if (bus->phy_map[i]) {
+ device_unregister(&bus->phy_map[i]->dev);
+ kfree(bus->phy_map[i]);
+ }
+ }
+}
+EXPORT_SYMBOL(mdiobus_unregister);
+
+/* mdio_bus_match
+ *
+ * description: Given a PHY device, and a PHY driver, return 1 if
+ * the driver supports the device. Otherwise, return 0
+ */
+static int mdio_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+ struct phy_driver *phydrv = to_phy_driver(drv);
+
+ return (phydrv->phy_id == (phydev->phy_id & phydrv->phy_id_mask));
+}
+
+/* Suspend and resume. Copied from platform_suspend and
+ * platform_resume
+ */
+static int mdio_bus_suspend(struct device * dev, u32 state)
+{
+ int ret = 0;
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->suspend) {
+ ret = drv->suspend(dev, state, SUSPEND_DISABLE);
+ if (ret == 0)
+ ret = drv->suspend(dev, state, SUSPEND_SAVE_STATE);
+ if (ret == 0)
+ ret = drv->suspend(dev, state, SUSPEND_POWER_DOWN);
+ }
+ return ret;
+}
+
+static int mdio_bus_resume(struct device * dev)
+{
+ int ret = 0;
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->resume) {
+ ret = drv->resume(dev, RESUME_POWER_ON);
+ if (ret == 0)
+ ret = drv->resume(dev, RESUME_RESTORE_STATE);
+ if (ret == 0)
+ ret = drv->resume(dev, RESUME_ENABLE);
+ }
+ return ret;
+}
+
+struct bus_type mdio_bus_type = {
+ .name = "mdio_bus",
+ .match = mdio_bus_match,
+ .suspend = mdio_bus_suspend,
+ .resume = mdio_bus_resume,
+};
+
+int __init mdio_bus_init(void)
+{
+ return bus_register(&mdio_bus_type);
+}
+
+void __exit mdio_bus_exit(void)
+{
+ bus_unregister(&mdio_bus_type);
+}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
new file mode 100644
index 00000000000..d9e11f93bf3
--- /dev/null
+++ b/drivers/net/phy/phy.c
@@ -0,0 +1,871 @@
+/*
+ * drivers/net/phy/phy.c
+ *
+ * Framework for configuring and reading PHY devices
+ * Based on code in sungem_phy.c and gianfar_phy.c
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* Convenience function to print out the current phy status
+ */
+void phy_print_status(struct phy_device *phydev)
+{
+ pr_info("%s: Link is %s", phydev->dev.bus_id,
+ phydev->link ? "Up" : "Down");
+ if (phydev->link)
+ printk(" - %d/%s", phydev->speed,
+ DUPLEX_FULL == phydev->duplex ?
+ "Full" : "Half");
+
+ printk("\n");
+}
+EXPORT_SYMBOL(phy_print_status);
+
+
+/* Convenience functions for reading/writing a given PHY
+ * register. They MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation. */
+int phy_read(struct phy_device *phydev, u16 regnum)
+{
+ int retval;
+ struct mii_bus *bus = phydev->bus;
+
+ spin_lock_bh(&bus->mdio_lock);
+ retval = bus->read(bus, phydev->addr, regnum);
+ spin_unlock_bh(&bus->mdio_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(phy_read);
+
+int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
+{
+ int err;
+ struct mii_bus *bus = phydev->bus;
+
+ spin_lock_bh(&bus->mdio_lock);
+ err = bus->write(bus, phydev->addr, regnum, val);
+ spin_unlock_bh(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_write);
+
+
+int phy_clear_interrupt(struct phy_device *phydev)
+{
+ int err = 0;
+
+ if (phydev->drv->ack_interrupt)
+ err = phydev->drv->ack_interrupt(phydev);
+
+ return err;
+}
+
+
+int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+{
+ int err = 0;
+
+ phydev->interrupts = interrupts;
+ if (phydev->drv->config_intr)
+ err = phydev->drv->config_intr(phydev);
+
+ return err;
+}
+
+
+/* phy_aneg_done
+ *
+ * description: Reads the status register and returns 0 either if
+ * auto-negotiation is incomplete, or if there was an error.
+ * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ */
+static inline int phy_aneg_done(struct phy_device *phydev)
+{
+ int retval;
+
+ retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+
+/* A structure for mapping a particular speed and duplex
+ * combination to a particular SUPPORTED and ADVERTISED value */
+struct phy_setting {
+ int speed;
+ int duplex;
+ u32 setting;
+};
+
+/* A mapping of all SUPPORTED settings to speed/duplex */
+static struct phy_setting settings[] = {
+ {
+ .speed = 10000,
+ .duplex = DUPLEX_FULL,
+ .setting = SUPPORTED_10000baseT_Full,
+ },
+ {
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .setting = SUPPORTED_1000baseT_Full,
+ },
+ {
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_HALF,
+ .setting = SUPPORTED_1000baseT_Half,
+ },
+ {
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ .setting = SUPPORTED_100baseT_Full,
+ },
+ {
+ .speed = SPEED_100,
+ .duplex = DUPLEX_HALF,
+ .setting = SUPPORTED_100baseT_Half,
+ },
+ {
+ .speed = SPEED_10,
+ .duplex = DUPLEX_FULL,
+ .setting = SUPPORTED_10baseT_Full,
+ },
+ {
+ .speed = SPEED_10,
+ .duplex = DUPLEX_HALF,
+ .setting = SUPPORTED_10baseT_Half,
+ },
+};
+
+#define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting))
+
+/* phy_find_setting
+ *
+ * description: Searches the settings array for the setting which
+ * matches the desired speed and duplex, and returns the index
+ * of that setting. Returns the index of the last setting if
+ * none of the others match.
+ */
+static inline int phy_find_setting(int speed, int duplex)
+{
+ int idx = 0;
+
+ while (idx < ARRAY_SIZE(settings) &&
+ (settings[idx].speed != speed ||
+ settings[idx].duplex != duplex))
+ idx++;
+
+ return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
+}
+
+/* phy_find_valid
+ * idx: The first index in settings[] to search
+ * features: A mask of the valid settings
+ *
+ * description: Returns the index of the first valid setting less
+ * than or equal to the one pointed to by idx, as determined by
+ * the mask in features. Returns the index of the last setting
+ * if nothing else matches.
+ */
+static inline int phy_find_valid(int idx, u32 features)
+{
+ while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
+ idx++;
+
+ return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
+}
+
+/* phy_sanitize_settings
+ *
+ * description: Make sure the PHY is set to supported speeds and
+ * duplexes. Drop down by one in this order: 1000/FULL,
+ * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF
+ */
+void phy_sanitize_settings(struct phy_device *phydev)
+{
+ u32 features = phydev->supported;
+ int idx;
+
+ /* Sanitize settings based on PHY capabilities */
+ if ((features & SUPPORTED_Autoneg) == 0)
+ phydev->autoneg = 0;
+
+ idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
+ features);
+
+ phydev->speed = settings[idx].speed;
+ phydev->duplex = settings[idx].duplex;
+}
+EXPORT_SYMBOL(phy_sanitize_settings);
+
+/* phy_ethtool_sset:
+ * A generic ethtool sset function. Handles all the details
+ *
+ * A few notes about parameter checking:
+ * - We don't set port or transceiver, so we don't care what they
+ * were set to.
+ * - phy_start_aneg() will make sure forced settings are sane, and
+ * choose the next best ones from the ones selected, so we don't
+ * care if ethtool tries to give us bad values
+ *
+ * A note about the PHYCONTROL Layer. If you turn off
+ * CONFIG_PHYCONTROL, you will need to read the PHY status
+ * registers after this function completes, and update your
+ * controller manually.
+ */
+int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
+{
+ if (cmd->phy_address != phydev->addr)
+ return -EINVAL;
+
+ /* We make sure that we don't pass unsupported
+ * values in to the PHY */
+ cmd->advertising &= phydev->supported;
+
+ /* Verify the settings we care about. */
+ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE
+ && ((cmd->speed != SPEED_1000
+ && cmd->speed != SPEED_100
+ && cmd->speed != SPEED_10)
+ || (cmd->duplex != DUPLEX_HALF
+ && cmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ phydev->autoneg = cmd->autoneg;
+
+ phydev->speed = cmd->speed;
+
+ phydev->advertising = cmd->advertising;
+
+ if (AUTONEG_ENABLE == cmd->autoneg)
+ phydev->advertising |= ADVERTISED_Autoneg;
+ else
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+
+ phydev->duplex = cmd->duplex;
+
+ /* Restart the PHY */
+ phy_start_aneg(phydev);
+
+ return 0;
+}
+
+int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
+{
+ cmd->supported = phydev->supported;
+
+ cmd->advertising = phydev->advertising;
+
+ cmd->speed = phydev->speed;
+ cmd->duplex = phydev->duplex;
+ cmd->port = PORT_MII;
+ cmd->phy_address = phydev->addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->autoneg = phydev->autoneg;
+
+ return 0;
+}
+
+
+/* Note that this function is currently incompatible with the
+ * PHYCONTROL layer. It changes registers without regard to
+ * current state. Use at own risk
+ */
+int phy_mii_ioctl(struct phy_device *phydev,
+ struct mii_ioctl_data *mii_data, int cmd)
+{
+ u16 val = mii_data->val_in;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ mii_data->phy_id = phydev->addr;
+ break;
+ case SIOCGMIIREG:
+ mii_data->val_out = phy_read(phydev, mii_data->reg_num);
+ break;
+
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (mii_data->phy_id == phydev->addr) {
+ switch(mii_data->reg_num) {
+ case MII_BMCR:
+ if (val & (BMCR_RESET|BMCR_ANENABLE))
+ phydev->autoneg = AUTONEG_DISABLE;
+ else
+ phydev->autoneg = AUTONEG_ENABLE;
+ if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ break;
+ case MII_ADVERTISE:
+ phydev->advertising = val;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+
+ phy_write(phydev, mii_data->reg_num, val);
+
+ if (mii_data->reg_num == MII_BMCR
+ && val & BMCR_RESET
+ && phydev->drv->config_init)
+ phydev->drv->config_init(phydev);
+ break;
+ }
+
+ return 0;
+}
+
+/* phy_start_aneg
+ *
+ * description: Sanitizes the settings (if we're not
+ * autonegotiating them), and then calls the driver's
+ * config_aneg function. If the PHYCONTROL Layer is operating,
+ * we change the state to reflect the beginning of
+ * Auto-negotiation or forcing.
+ */
+int phy_start_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ spin_lock(&phydev->lock);
+
+ if (AUTONEG_DISABLE == phydev->autoneg)
+ phy_sanitize_settings(phydev);
+
+ err = phydev->drv->config_aneg(phydev);
+
+#ifdef CONFIG_PHYCONTROL
+ if (err < 0)
+ goto out_unlock;
+
+ if (phydev->state != PHY_HALTED) {
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ phydev->state = PHY_AN;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
+ } else {
+ phydev->state = PHY_FORCING;
+ phydev->link_timeout = PHY_FORCE_TIMEOUT;
+ }
+ }
+
+out_unlock:
+#endif
+ spin_unlock(&phydev->lock);
+ return err;
+}
+EXPORT_SYMBOL(phy_start_aneg);
+
+
+#ifdef CONFIG_PHYCONTROL
+static void phy_change(void *data);
+static void phy_timer(unsigned long data);
+
+/* phy_start_machine:
+ *
+ * description: The PHY infrastructure can run a state machine
+ * which tracks whether the PHY is starting up, negotiating,
+ * etc. This function starts the timer which tracks the state
+ * of the PHY. If you want to be notified when the state
+ * changes, pass in the callback, otherwise, pass NULL. If you
+ * want to maintain your own state machine, do not call this
+ * function. */
+void phy_start_machine(struct phy_device *phydev,
+ void (*handler)(struct net_device *))
+{
+ phydev->adjust_state = handler;
+
+ init_timer(&phydev->phy_timer);
+ phydev->phy_timer.function = &phy_timer;
+ phydev->phy_timer.data = (unsigned long) phydev;
+ mod_timer(&phydev->phy_timer, jiffies + HZ);
+}
+
+/* phy_stop_machine
+ *
+ * description: Stops the state machine timer, sets the state to
+ * UP (unless it wasn't up yet), and then frees the interrupt,
+ * if it is in use. This function must be called BEFORE
+ * phy_detach.
+ */
+void phy_stop_machine(struct phy_device *phydev)
+{
+ del_timer_sync(&phydev->phy_timer);
+
+ spin_lock(&phydev->lock);
+ if (phydev->state > PHY_UP)
+ phydev->state = PHY_UP;
+ spin_unlock(&phydev->lock);
+
+ if (phydev->irq != PHY_POLL)
+ phy_stop_interrupts(phydev);
+
+ phydev->adjust_state = NULL;
+}
+
+/* phy_force_reduction
+ *
+ * description: Reduces the speed/duplex settings by
+ * one notch. The order is so:
+ * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF,
+ * 10/FULL, 10/HALF. The function bottoms out at 10/HALF.
+ */
+static void phy_force_reduction(struct phy_device *phydev)
+{
+ int idx;
+
+ idx = phy_find_setting(phydev->speed, phydev->duplex);
+
+ idx++;
+
+ idx = phy_find_valid(idx, phydev->supported);
+
+ phydev->speed = settings[idx].speed;
+ phydev->duplex = settings[idx].duplex;
+
+ pr_info("Trying %d/%s\n", phydev->speed,
+ DUPLEX_FULL == phydev->duplex ?
+ "FULL" : "HALF");
+}
+
+
+/* phy_error:
+ *
+ * Moves the PHY to the HALTED state in response to a read
+ * or write error, and tells the controller the link is down.
+ * Must not be called from interrupt context, or while the
+ * phydev->lock is held.
+ */
+void phy_error(struct phy_device *phydev)
+{
+ spin_lock(&phydev->lock);
+ phydev->state = PHY_HALTED;
+ spin_unlock(&phydev->lock);
+}
+
+/* phy_interrupt
+ *
+ * description: When a PHY interrupt occurs, the handler disables
+ * interrupts, and schedules a work task to clear the interrupt.
+ */
+static irqreturn_t phy_interrupt(int irq, void *phy_dat, struct pt_regs *regs)
+{
+ struct phy_device *phydev = phy_dat;
+
+ /* The MDIO bus is not allowed to be written in interrupt
+ * context, so we need to disable the irq here. A work
+ * queue will write the PHY to disable and clear the
+ * interrupt, and then reenable the irq line. */
+ disable_irq_nosync(irq);
+
+ schedule_work(&phydev->phy_queue);
+
+ return IRQ_HANDLED;
+}
+
+/* Enable the interrupts from the PHY side */
+int phy_enable_interrupts(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_clear_interrupt(phydev);
+
+ if (err < 0)
+ return err;
+
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_enable_interrupts);
+
+/* Disable the PHY interrupts from the PHY side */
+int phy_disable_interrupts(struct phy_device *phydev)
+{
+ int err;
+
+ /* Disable PHY interrupts */
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
+
+ if (err)
+ goto phy_err;
+
+ /* Clear the interrupt */
+ err = phy_clear_interrupt(phydev);
+
+ if (err)
+ goto phy_err;
+
+ return 0;
+
+phy_err:
+ phy_error(phydev);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_disable_interrupts);
+
+/* phy_start_interrupts
+ *
+ * description: Request the interrupt for the given PHY. If
+ * this fails, then we set irq to PHY_POLL.
+ * Otherwise, we enable the interrupts in the PHY.
+ * Returns 0 on success.
+ * This should only be called with a valid IRQ number.
+ */
+int phy_start_interrupts(struct phy_device *phydev)
+{
+ int err = 0;
+
+ INIT_WORK(&phydev->phy_queue, phy_change, phydev);
+
+ if (request_irq(phydev->irq, phy_interrupt,
+ SA_SHIRQ,
+ "phy_interrupt",
+ phydev) < 0) {
+ printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
+ phydev->bus->name,
+ phydev->irq);
+ phydev->irq = PHY_POLL;
+ return 0;
+ }
+
+ err = phy_enable_interrupts(phydev);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_start_interrupts);
+
+int phy_stop_interrupts(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_disable_interrupts(phydev);
+
+ if (err)
+ phy_error(phydev);
+
+ free_irq(phydev->irq, phydev);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_stop_interrupts);
+
+
+/* Scheduled by the phy_interrupt/timer to handle PHY changes */
+static void phy_change(void *data)
+{
+ int err;
+ struct phy_device *phydev = data;
+
+ err = phy_disable_interrupts(phydev);
+
+ if (err)
+ goto phy_err;
+
+ spin_lock(&phydev->lock);
+ if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
+ phydev->state = PHY_CHANGELINK;
+ spin_unlock(&phydev->lock);
+
+ enable_irq(phydev->irq);
+
+ /* Reenable interrupts */
+ err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
+
+ if (err)
+ goto irq_enable_err;
+
+ return;
+
+irq_enable_err:
+ disable_irq(phydev->irq);
+phy_err:
+ phy_error(phydev);
+}
+
+/* Bring down the PHY link, and stop checking the status. */
+void phy_stop(struct phy_device *phydev)
+{
+ spin_lock(&phydev->lock);
+
+ if (PHY_HALTED == phydev->state)
+ goto out_unlock;
+
+ if (phydev->irq != PHY_POLL) {
+ /* Clear any pending interrupts */
+ phy_clear_interrupt(phydev);
+
+ /* Disable PHY Interrupts */
+ phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
+ }
+
+ phydev->state = PHY_HALTED;
+
+out_unlock:
+ spin_unlock(&phydev->lock);
+}
+
+
+/* phy_start
+ *
+ * description: Indicates the attached device's readiness to
+ * handle PHY-related work. Used during startup to start the
+ * PHY, and after a call to phy_stop() to resume operation.
+ * Also used to indicate the MDIO bus has cleared an error
+ * condition.
+ */
+void phy_start(struct phy_device *phydev)
+{
+ spin_lock(&phydev->lock);
+
+ switch (phydev->state) {
+ case PHY_STARTING:
+ phydev->state = PHY_PENDING;
+ break;
+ case PHY_READY:
+ phydev->state = PHY_UP;
+ break;
+ case PHY_HALTED:
+ phydev->state = PHY_RESUMING;
+ default:
+ break;
+ }
+ spin_unlock(&phydev->lock);
+}
+EXPORT_SYMBOL(phy_stop);
+EXPORT_SYMBOL(phy_start);
+
+/* PHY timer which handles the state machine */
+static void phy_timer(unsigned long data)
+{
+ struct phy_device *phydev = (struct phy_device *)data;
+ int needs_aneg = 0;
+ int err = 0;
+
+ spin_lock(&phydev->lock);
+
+ if (phydev->adjust_state)
+ phydev->adjust_state(phydev->attached_dev);
+
+ switch(phydev->state) {
+ case PHY_DOWN:
+ case PHY_STARTING:
+ case PHY_READY:
+ case PHY_PENDING:
+ break;
+ case PHY_UP:
+ needs_aneg = 1;
+
+ phydev->link_timeout = PHY_AN_TIMEOUT;
+
+ break;
+ case PHY_AN:
+ /* Check if negotiation is done. Break
+ * if there's an error */
+ err = phy_aneg_done(phydev);
+ if (err < 0)
+ break;
+
+ /* If auto-negotiation is done, we change to
+ * either RUNNING, or NOLINK */
+ if (err > 0) {
+ err = phy_read_status(phydev);
+
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ }
+
+ phydev->adjust_link(phydev->attached_dev);
+
+ } else if (0 == phydev->link_timeout--) {
+ /* The counter expired, so either we
+ * switch to forced mode, or the
+ * magic_aneg bit exists, and we try aneg
+ * again */
+ if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
+ int idx;
+
+ /* We'll start from the
+ * fastest speed, and work
+ * our way down */
+ idx = phy_find_valid(0,
+ phydev->supported);
+
+ phydev->speed = settings[idx].speed;
+ phydev->duplex = settings[idx].duplex;
+
+ phydev->autoneg = AUTONEG_DISABLE;
+ phydev->state = PHY_FORCING;
+ phydev->link_timeout =
+ PHY_FORCE_TIMEOUT;
+
+ pr_info("Trying %d/%s\n",
+ phydev->speed,
+ DUPLEX_FULL ==
+ phydev->duplex ?
+ "FULL" : "HALF");
+ }
+
+ needs_aneg = 1;
+ }
+ break;
+ case PHY_NOLINK:
+ err = phy_read_status(phydev);
+
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+ }
+ break;
+ case PHY_FORCING:
+ err = phy_read_status(phydev);
+
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ if (0 == phydev->link_timeout--) {
+ phy_force_reduction(phydev);
+ needs_aneg = 1;
+ }
+ }
+
+ phydev->adjust_link(phydev->attached_dev);
+ break;
+ case PHY_RUNNING:
+ /* Only register a CHANGE if we are
+ * polling */
+ if (PHY_POLL == phydev->irq)
+ phydev->state = PHY_CHANGELINK;
+ break;
+ case PHY_CHANGELINK:
+ err = phy_read_status(phydev);
+
+ if (err)
+ break;
+
+ if (phydev->link) {
+ phydev->state = PHY_RUNNING;
+ netif_carrier_on(phydev->attached_dev);
+ } else {
+ phydev->state = PHY_NOLINK;
+ netif_carrier_off(phydev->attached_dev);
+ }
+
+ phydev->adjust_link(phydev->attached_dev);
+
+ if (PHY_POLL != phydev->irq)
+ err = phy_config_interrupt(phydev,
+ PHY_INTERRUPT_ENABLED);
+ break;
+ case PHY_HALTED:
+ if (phydev->link) {
+ phydev->link = 0;
+ netif_carrier_off(phydev->attached_dev);
+ phydev->adjust_link(phydev->attached_dev);
+ }
+ break;
+ case PHY_RESUMING:
+
+ err = phy_clear_interrupt(phydev);
+
+ if (err)
+ break;
+
+ err = phy_config_interrupt(phydev,
+ PHY_INTERRUPT_ENABLED);
+
+ if (err)
+ break;
+
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ err = phy_aneg_done(phydev);
+ if (err < 0)
+ break;
+
+ /* err > 0 if AN is done.
+ * Otherwise, it's 0, and we're
+ * still waiting for AN */
+ if (err > 0) {
+ phydev->state = PHY_RUNNING;
+ } else {
+ phydev->state = PHY_AN;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
+ }
+ } else
+ phydev->state = PHY_RUNNING;
+ break;
+ }
+
+ spin_unlock(&phydev->lock);
+
+ if (needs_aneg)
+ err = phy_start_aneg(phydev);
+
+ if (err < 0)
+ phy_error(phydev);
+
+ mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
+}
+
+#endif /* CONFIG_PHYCONTROL */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
new file mode 100644
index 00000000000..33f7bdb5857
--- /dev/null
+++ b/drivers/net/phy/phy_device.c
@@ -0,0 +1,696 @@
+/*
+ * drivers/net/phy/phy_device.c
+ *
+ * Framework for finding and configuring PHYs.
+ * Also contains generic PHY driver
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+static struct phy_driver genphy_driver;
+extern int mdio_bus_init(void);
+extern void mdio_bus_exit(void);
+
+/* get_phy_device
+ *
+ * description: Reads the ID registers of the PHY at addr on the
+ * bus, then allocates and returns the phy_device to
+ * represent it.
+ */
+struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
+{
+ int phy_reg;
+ u32 phy_id;
+ struct phy_device *dev = NULL;
+
+ /* Grab the bits from PHYIR1, and put them
+ * in the upper half */
+ phy_reg = bus->read(bus, addr, MII_PHYSID1);
+
+ if (phy_reg < 0)
+ return ERR_PTR(phy_reg);
+
+ phy_id = (phy_reg & 0xffff) << 16;
+
+ /* Grab the bits from PHYIR2, and put them in the lower half */
+ phy_reg = bus->read(bus, addr, MII_PHYSID2);
+
+ if (phy_reg < 0)
+ return ERR_PTR(phy_reg);
+
+ phy_id |= (phy_reg & 0xffff);
+
+ /* If the phy_id is all Fs, there is no device there */
+ if (0xffffffff == phy_id)
+ return NULL;
+
+ /* Otherwise, we allocate the device, and initialize the
+ * default values */
+ dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
+
+ if (NULL == dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->speed = 0;
+ dev->duplex = -1;
+ dev->pause = dev->asym_pause = 0;
+ dev->link = 1;
+
+ dev->autoneg = AUTONEG_ENABLE;
+
+ dev->addr = addr;
+ dev->phy_id = phy_id;
+ dev->bus = bus;
+
+ dev->state = PHY_DOWN;
+
+ spin_lock_init(&dev->lock);
+
+ return dev;
+}
+
+#ifdef CONFIG_PHYCONTROL
+/* phy_prepare_link:
+ *
+ * description: Tells the PHY infrastructure to handle the
+ * gory details on monitoring link status (whether through
+ * polling or an interrupt), and to call back to the
+ * connected device driver when the link status changes.
+ * If you want to monitor your own link state, don't call
+ * this function */
+void phy_prepare_link(struct phy_device *phydev,
+ void (*handler)(struct net_device *))
+{
+ phydev->adjust_link = handler;
+}
+
+/* phy_connect:
+ *
+ * description: Convenience function for connecting ethernet
+ * devices to PHY devices. The default behavior is for
+ * the PHY infrastructure to handle everything, and only notify
+ * the connected driver when the link status changes. If you
+ * don't want, or can't use the provided functionality, you may
+ * choose to call only the subset of functions which provide
+ * the desired functionality.
+ */
+struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
+ void (*handler)(struct net_device *), u32 flags)
+{
+ struct phy_device *phydev;
+
+ phydev = phy_attach(dev, phy_id, flags);
+
+ if (IS_ERR(phydev))
+ return phydev;
+
+ phy_prepare_link(phydev, handler);
+
+ phy_start_machine(phydev, NULL);
+
+ if (phydev->irq > 0)
+ phy_start_interrupts(phydev);
+
+ return phydev;
+}
+EXPORT_SYMBOL(phy_connect);
+
+void phy_disconnect(struct phy_device *phydev)
+{
+ if (phydev->irq > 0)
+ phy_stop_interrupts(phydev);
+
+ phy_stop_machine(phydev);
+
+ phydev->adjust_link = NULL;
+
+ phy_detach(phydev);
+}
+EXPORT_SYMBOL(phy_disconnect);
+
+#endif /* CONFIG_PHYCONTROL */
+
+/* phy_attach:
+ *
+ * description: Called by drivers to attach to a particular PHY
+ * device. The phy_device is found, and properly hooked up
+ * to the phy_driver. If no driver is attached, then the
+ * genphy_driver is used. The phy_device is given a ptr to
+ * the attaching device, and given a callback for link status
+ * change. The phy_device is returned to the attaching
+ * driver.
+ */
+static int phy_compare_id(struct device *dev, void *data)
+{
+ return strcmp((char *)data, dev->bus_id) ? 0 : 1;
+}
+
+struct phy_device *phy_attach(struct net_device *dev,
+ const char *phy_id, u32 flags)
+{
+ struct bus_type *bus = &mdio_bus_type;
+ struct phy_device *phydev;
+ struct device *d;
+
+ /* Search the list of PHY devices on the mdio bus for the
+ * PHY with the requested name */
+ d = bus_find_device(bus, NULL, (void *)phy_id, phy_compare_id);
+
+ if (d) {
+ phydev = to_phy_device(d);
+ } else {
+ printk(KERN_ERR "%s not found\n", phy_id);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* Assume that if there is no driver, that it doesn't
+ * exist, and we should use the genphy driver. */
+ if (NULL == d->driver) {
+ int err;
+ down_write(&d->bus->subsys.rwsem);
+ d->driver = &genphy_driver.driver;
+
+ err = d->driver->probe(d);
+
+ if (err < 0)
+ return ERR_PTR(err);
+
+ device_bind_driver(d);
+ up_write(&d->bus->subsys.rwsem);
+ }
+
+ if (phydev->attached_dev) {
+ printk(KERN_ERR "%s: %s already attached\n",
+ dev->name, phy_id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ phydev->attached_dev = dev;
+
+ phydev->dev_flags = flags;
+
+ return phydev;
+}
+EXPORT_SYMBOL(phy_attach);
+
+void phy_detach(struct phy_device *phydev)
+{
+ phydev->attached_dev = NULL;
+
+ /* If the device had no specific driver before (i.e. - it
+ * was using the generic driver), we unbind the device
+ * from the generic driver so that there's a chance a
+ * real driver could be loaded */
+ if (phydev->dev.driver == &genphy_driver.driver) {
+ down_write(&phydev->dev.bus->subsys.rwsem);
+ device_release_driver(&phydev->dev);
+ up_write(&phydev->dev.bus->subsys.rwsem);
+ }
+}
+EXPORT_SYMBOL(phy_detach);
+
+
+/* Generic PHY support and helper functions */
+
+/* genphy_config_advert
+ *
+ * description: Writes MII_ADVERTISE with the appropriate values,
+ * after sanitizing the values to make sure we only advertise
+ * what is supported
+ */
+int genphy_config_advert(struct phy_device *phydev)
+{
+ u32 advertise;
+ int adv;
+ int err;
+
+ /* Only allow advertising what
+ * this PHY supports */
+ phydev->advertising &= phydev->supported;
+ advertise = phydev->advertising;
+
+ /* Setup standard advertisement */
+ adv = phy_read(phydev, MII_ADVERTISE);
+
+ if (adv < 0)
+ return adv;
+
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ if (advertise & ADVERTISED_Pause)
+ adv |= ADVERTISE_PAUSE_CAP;
+ if (advertise & ADVERTISED_Asym_Pause)
+ adv |= ADVERTISE_PAUSE_ASYM;
+
+ err = phy_write(phydev, MII_ADVERTISE, adv);
+
+ if (err < 0)
+ return err;
+
+ /* Configure gigabit if it's supported */
+ if (phydev->supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full)) {
+ adv = phy_read(phydev, MII_CTRL1000);
+
+ if (adv < 0)
+ return adv;
+
+ adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+ if (advertise & SUPPORTED_1000baseT_Half)
+ adv |= ADVERTISE_1000HALF;
+ if (advertise & SUPPORTED_1000baseT_Full)
+ adv |= ADVERTISE_1000FULL;
+ err = phy_write(phydev, MII_CTRL1000, adv);
+
+ if (err < 0)
+ return err;
+ }
+
+ return adv;
+}
+EXPORT_SYMBOL(genphy_config_advert);
+
+/* genphy_setup_forced
+ *
+ * description: Configures MII_BMCR to force speed/duplex
+ * to the values in phydev. Assumes that the values are valid.
+ * Please see phy_sanitize_settings() */
+int genphy_setup_forced(struct phy_device *phydev)
+{
+ int ctl = BMCR_RESET;
+
+ phydev->pause = phydev->asym_pause = 0;
+
+ if (SPEED_1000 == phydev->speed)
+ ctl |= BMCR_SPEED1000;
+ else if (SPEED_100 == phydev->speed)
+ ctl |= BMCR_SPEED100;
+
+ if (DUPLEX_FULL == phydev->duplex)
+ ctl |= BMCR_FULLDPLX;
+
+ ctl = phy_write(phydev, MII_BMCR, ctl);
+
+ if (ctl < 0)
+ return ctl;
+
+ /* We just reset the device, so we'd better configure any
+ * settings the PHY requires to operate */
+ if (phydev->drv->config_init)
+ ctl = phydev->drv->config_init(phydev);
+
+ return ctl;
+}
+
+
+/* Enable and Restart Autonegotiation */
+int genphy_restart_aneg(struct phy_device *phydev)
+{
+ int ctl;
+
+ ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+
+ /* Don't isolate the PHY if we're negotiating */
+ ctl &= ~(BMCR_ISOLATE);
+
+ ctl = phy_write(phydev, MII_BMCR, ctl);
+
+ return ctl;
+}
+
+
+/* genphy_config_aneg
+ *
+ * description: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we write the BMCR
+ */
+int genphy_config_aneg(struct phy_device *phydev)
+{
+ int err = 0;
+
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ err = genphy_config_advert(phydev);
+
+ if (err < 0)
+ return err;
+
+ err = genphy_restart_aneg(phydev);
+ } else
+ err = genphy_setup_forced(phydev);
+
+ return err;
+}
+EXPORT_SYMBOL(genphy_config_aneg);
+
+/* genphy_update_link
+ *
+ * description: Update the value in phydev->link to reflect the
+ * current link value. In order to do this, we need to read
+ * the status register twice, keeping the second value
+ */
+int genphy_update_link(struct phy_device *phydev)
+{
+ int status;
+
+ /* Do a fake read */
+ status = phy_read(phydev, MII_BMSR);
+
+ if (status < 0)
+ return status;
+
+ /* Read link and autonegotiation status */
+ status = phy_read(phydev, MII_BMSR);
+
+ if (status < 0)
+ return status;
+
+ if ((status & BMSR_LSTATUS) == 0)
+ phydev->link = 0;
+ else
+ phydev->link = 1;
+
+ return 0;
+}
+
+/* genphy_read_status
+ *
+ * description: Check the link, then figure out the current state
+ * by comparing what we advertise with what the link partner
+ * advertises. Start by checking the gigabit possibilities,
+ * then move on to 10/100.
+ */
+int genphy_read_status(struct phy_device *phydev)
+{
+ int adv;
+ int err;
+ int lpa;
+ int lpagb = 0;
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ if (phydev->supported & (SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full)) {
+ lpagb = phy_read(phydev, MII_STAT1000);
+
+ if (lpagb < 0)
+ return lpagb;
+
+ adv = phy_read(phydev, MII_CTRL1000);
+
+ if (adv < 0)
+ return adv;
+
+ lpagb &= adv << 2;
+ }
+
+ lpa = phy_read(phydev, MII_LPA);
+
+ if (lpa < 0)
+ return lpa;
+
+ adv = phy_read(phydev, MII_ADVERTISE);
+
+ if (adv < 0)
+ return adv;
+
+ lpa &= adv;
+
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_HALF;
+ phydev->pause = phydev->asym_pause = 0;
+
+ if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+ phydev->speed = SPEED_1000;
+
+ if (lpagb & LPA_1000FULL)
+ phydev->duplex = DUPLEX_FULL;
+ } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ phydev->speed = SPEED_100;
+
+ if (lpa & LPA_100FULL)
+ phydev->duplex = DUPLEX_FULL;
+ } else
+ if (lpa & LPA_10FULL)
+ phydev->duplex = DUPLEX_FULL;
+
+ if (phydev->duplex == DUPLEX_FULL){
+ phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+ }
+ } else {
+ int bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ phydev->pause = phydev->asym_pause = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_read_status);
+
+static int genphy_config_init(struct phy_device *phydev)
+{
+ u32 val;
+ u32 features;
+
+ /* For now, I'll claim that the generic driver supports
+ * all possible port types */
+ features = (SUPPORTED_TP | SUPPORTED_MII
+ | SUPPORTED_AUI | SUPPORTED_FIBRE |
+ SUPPORTED_BNC);
+
+ /* Do we support autonegotiation? */
+ val = phy_read(phydev, MII_BMSR);
+
+ if (val < 0)
+ return val;
+
+ if (val & BMSR_ANEGCAPABLE)
+ features |= SUPPORTED_Autoneg;
+
+ if (val & BMSR_100FULL)
+ features |= SUPPORTED_100baseT_Full;
+ if (val & BMSR_100HALF)
+ features |= SUPPORTED_100baseT_Half;
+ if (val & BMSR_10FULL)
+ features |= SUPPORTED_10baseT_Full;
+ if (val & BMSR_10HALF)
+ features |= SUPPORTED_10baseT_Half;
+
+ if (val & BMSR_ESTATEN) {
+ val = phy_read(phydev, MII_ESTATUS);
+
+ if (val < 0)
+ return val;
+
+ if (val & ESTATUS_1000_TFULL)
+ features |= SUPPORTED_1000baseT_Full;
+ if (val & ESTATUS_1000_THALF)
+ features |= SUPPORTED_1000baseT_Half;
+ }
+
+ phydev->supported = features;
+ phydev->advertising = features;
+
+ return 0;
+}
+
+
+/* phy_probe
+ *
+ * description: Take care of setting up the phy_device structure,
+ * set the state to READY (the driver's init function should
+ * set it to STARTING if needed).
+ */
+static int phy_probe(struct device *dev)
+{
+ struct phy_device *phydev;
+ struct phy_driver *phydrv;
+ struct device_driver *drv;
+ int err = 0;
+
+ phydev = to_phy_device(dev);
+
+ /* Make sure the driver is held.
+ * XXX -- Is this correct? */
+ drv = get_driver(phydev->dev.driver);
+ phydrv = to_phy_driver(drv);
+ phydev->drv = phydrv;
+
+ /* Disable the interrupt if the PHY doesn't support it */
+ if (!(phydrv->flags & PHY_HAS_INTERRUPT))
+ phydev->irq = PHY_POLL;
+
+ spin_lock(&phydev->lock);
+
+ /* Start out supporting everything. Eventually,
+ * a controller will attach, and may modify one
+ * or both of these values */
+ phydev->supported = phydrv->features;
+ phydev->advertising = phydrv->features;
+
+ /* Set the state to READY by default */
+ phydev->state = PHY_READY;
+
+ if (phydev->drv->probe)
+ err = phydev->drv->probe(phydev);
+
+ spin_unlock(&phydev->lock);
+
+ if (err < 0)
+ return err;
+
+ if (phydev->drv->config_init)
+ err = phydev->drv->config_init(phydev);
+
+ return err;
+}
+
+static int phy_remove(struct device *dev)
+{
+ struct phy_device *phydev;
+
+ phydev = to_phy_device(dev);
+
+ spin_lock(&phydev->lock);
+ phydev->state = PHY_DOWN;
+ spin_unlock(&phydev->lock);
+
+ if (phydev->drv->remove)
+ phydev->drv->remove(phydev);
+
+ put_driver(dev->driver);
+ phydev->drv = NULL;
+
+ return 0;
+}
+
+int phy_driver_register(struct phy_driver *new_driver)
+{
+ int retval;
+
+ memset(&new_driver->driver, 0, sizeof(new_driver->driver));
+ new_driver->driver.name = new_driver->name;
+ new_driver->driver.bus = &mdio_bus_type;
+ new_driver->driver.probe = phy_probe;
+ new_driver->driver.remove = phy_remove;
+
+ retval = driver_register(&new_driver->driver);
+
+ if (retval) {
+ printk(KERN_ERR "%s: Error %d in registering driver\n",
+ new_driver->name, retval);
+
+ return retval;
+ }
+
+ pr_info("%s: Registered new driver\n", new_driver->name);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_driver_register);
+
+void phy_driver_unregister(struct phy_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(phy_driver_unregister);
+
+static struct phy_driver genphy_driver = {
+ .phy_id = 0xffffffff,
+ .phy_id_mask = 0xffffffff,
+ .name = "Generic PHY",
+ .config_init = genphy_config_init,
+ .features = 0,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = {.owner= THIS_MODULE, },
+};
+
+static int __init phy_init(void)
+{
+ int rc;
+
+ rc = mdio_bus_init();
+ if (rc)
+ return rc;
+
+ rc = phy_driver_register(&genphy_driver);
+ if (rc)
+ mdio_bus_exit();
+
+ return rc;
+}
+
+static void __exit phy_exit(void)
+{
+ phy_driver_unregister(&genphy_driver);
+ mdio_bus_exit();
+}
+
+subsys_initcall(phy_init);
+module_exit(phy_exit);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
new file mode 100644
index 00000000000..d461ba45763
--- /dev/null
+++ b/drivers/net/phy/qsemi.c
@@ -0,0 +1,143 @@
+/*
+ * drivers/net/phy/qsemi.c
+ *
+ * Driver for Quality Semiconductor PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* ------------------------------------------------------------------------- */
+/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
+
+/* register definitions */
+
+#define MII_QS6612_MCR 17 /* Mode Control Register */
+#define MII_QS6612_FTR 27 /* Factory Test Register */
+#define MII_QS6612_MCO 28 /* Misc. Control Register */
+#define MII_QS6612_ISR 29 /* Interrupt Source Register */
+#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
+#define MII_QS6612_IMR_INIT 0x003a
+#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
+
+#define QS6612_PCR_AN_COMPLETE 0x1000
+#define QS6612_PCR_RLBEN 0x0200
+#define QS6612_PCR_DCREN 0x0100
+#define QS6612_PCR_4B5BEN 0x0040
+#define QS6612_PCR_TX_ISOLATE 0x0020
+#define QS6612_PCR_MLT3_DIS 0x0002
+#define QS6612_PCR_SCRM_DESCRM 0x0001
+
+MODULE_DESCRIPTION("Quality Semiconductor PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+/* Returns 0, unless there's a write error */
+static int qs6612_config_init(struct phy_device *phydev)
+{
+ /* The PHY powers up isolated on the RPX,
+ * so send a command to allow operation.
+ * XXX - My docs indicate this should be 0x0940
+ * ...or something. The current value sets three
+ * reserved bits, bit 11, which specifies it should be
+ * set to one, bit 10, which specifies it should be set
+ * to 0, and bit 7, which doesn't specify. However, my
+ * docs are preliminary, and I will leave it like this
+ * until someone more knowledgable corrects me or it.
+ * -- Andy Fleming
+ */
+ return phy_write(phydev, MII_QS6612_PCR, 0x0dc0);
+}
+
+static int qs6612_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, MII_QS6612_ISR);
+
+ if (err < 0)
+ return err;
+
+ err = phy_read(phydev, MII_BMSR);
+
+ if (err < 0)
+ return err;
+
+ err = phy_read(phydev, MII_EXPANSION);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int qs6612_config_intr(struct phy_device *phydev)
+{
+ int err;
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, MII_QS6612_IMR,
+ MII_QS6612_IMR_INIT);
+ else
+ err = phy_write(phydev, MII_QS6612_IMR, 0);
+
+ return err;
+
+}
+
+static struct phy_driver qs6612_driver = {
+ .phy_id = 0x00181440,
+ .name = "QS6612",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = qs6612_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = qs6612_ack_interrupt,
+ .config_intr = qs6612_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init qs6612_init(void)
+{
+ return phy_driver_register(&qs6612_driver);
+}
+
+static void __exit qs6612_exit(void)
+{
+ phy_driver_unregister(&qs6612_driver);
+}
+
+module_init(qs6612_init);
+module_exit(qs6612_exit);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d5afe05cd82..f0471d102e3 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -187,6 +187,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), },
{ PCI_DEVICE(0x16ec, 0x0116), },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, },
{0,},
};
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 7092ca6b277..2234a8f05eb 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -62,6 +62,7 @@ typedef struct _XENA_dev_config {
#define ADAPTER_STATUS_RMAC_REMOTE_FAULT BIT(6)
#define ADAPTER_STATUS_RMAC_LOCAL_FAULT BIT(7)
#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8)
+#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8)
#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8)
#define ADAPTER_STATUS_MC_DRAM_READY BIT(24)
#define ADAPTER_STATUS_MC_QUEUES_READY BIT(25)
@@ -77,21 +78,34 @@ typedef struct _XENA_dev_config {
#define ADAPTER_ECC_EN BIT(55)
u64 serr_source;
-#define SERR_SOURCE_PIC BIT(0)
-#define SERR_SOURCE_TXDMA BIT(1)
-#define SERR_SOURCE_RXDMA BIT(2)
+#define SERR_SOURCE_PIC BIT(0)
+#define SERR_SOURCE_TXDMA BIT(1)
+#define SERR_SOURCE_RXDMA BIT(2)
#define SERR_SOURCE_MAC BIT(3)
#define SERR_SOURCE_MC BIT(4)
#define SERR_SOURCE_XGXS BIT(5)
-#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
- SERR_SOURCE_TXDMA | \
- SERR_SOURCE_RXDMA | \
- SERR_SOURCE_MAC | \
- SERR_SOURCE_MC | \
- SERR_SOURCE_XGXS)
-
-
- u8 unused_0[0x800 - 0x120];
+#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \
+ SERR_SOURCE_TXDMA | \
+ SERR_SOURCE_RXDMA | \
+ SERR_SOURCE_MAC | \
+ SERR_SOURCE_MC | \
+ SERR_SOURCE_XGXS)
+
+ u64 pci_mode;
+#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60)
+#define PCI_MODE_PCI_33 0
+#define PCI_MODE_PCI_66 0x1
+#define PCI_MODE_PCIX_M1_66 0x2
+#define PCI_MODE_PCIX_M1_100 0x3
+#define PCI_MODE_PCIX_M1_133 0x4
+#define PCI_MODE_PCIX_M2_66 0x5
+#define PCI_MODE_PCIX_M2_100 0x6
+#define PCI_MODE_PCIX_M2_133 0x7
+#define PCI_MODE_UNSUPPORTED BIT(0)
+#define PCI_MODE_32_BITS BIT(8)
+#define PCI_MODE_UNKNOWN_MODE BIT(9)
+
+ u8 unused_0[0x800 - 0x128];
/* PCI-X Controller registers */
u64 pic_int_status;
@@ -153,7 +167,11 @@ typedef struct _XENA_dev_config {
u8 unused4[0x08];
u64 gpio_int_reg;
+#define GPIO_INT_REG_LINK_DOWN BIT(1)
+#define GPIO_INT_REG_LINK_UP BIT(2)
u64 gpio_int_mask;
+#define GPIO_INT_MASK_LINK_DOWN BIT(1)
+#define GPIO_INT_MASK_LINK_UP BIT(2)
u64 gpio_alarms;
u8 unused5[0x38];
@@ -223,19 +241,16 @@ typedef struct _XENA_dev_config {
u64 xmsi_data;
u64 rx_mat;
+#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8)
u8 unused6[0x8];
- u64 tx_mat0_7;
- u64 tx_mat8_15;
- u64 tx_mat16_23;
- u64 tx_mat24_31;
- u64 tx_mat32_39;
- u64 tx_mat40_47;
- u64 tx_mat48_55;
- u64 tx_mat56_63;
+ u64 tx_mat0_n[0x8];
+#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
- u8 unused_1[0x10];
+ u8 unused_1[0x8];
+ u64 stat_byte_cnt;
+#define STAT_BC(n) vBIT(n,4,12)
/* Automated statistics collection */
u64 stat_cfg;
@@ -246,6 +261,7 @@ typedef struct _XENA_dev_config {
#define STAT_TRSF_PER(n) TBD
#define PER_SEC 0x208d5
#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32)
+#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32)
u64 stat_addr;
@@ -267,8 +283,15 @@ typedef struct _XENA_dev_config {
u64 gpio_control;
#define GPIO_CTRL_GPIO_0 BIT(8)
+ u64 misc_control;
+#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
+
+ u8 unused7_1[0x240 - 0x208];
+
+ u64 wreq_split_mask;
+#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12)
- u8 unused7[0x600];
+ u8 unused7_2[0x800 - 0x248];
/* TxDMA registers */
u64 txdma_int_status;
@@ -290,6 +313,7 @@ typedef struct _XENA_dev_config {
u64 pcc_err_reg;
#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8)
+#define PCC_ENABLE_FOUR vBIT(0x0F,0,8)
u64 pcc_err_mask;
u64 pcc_err_alarm;
@@ -468,6 +492,7 @@ typedef struct _XENA_dev_config {
#define PRC_CTRL_NO_SNOOP (BIT(22)|BIT(23))
#define PRC_CTRL_NO_SNOOP_DESC BIT(22)
#define PRC_CTRL_NO_SNOOP_BUFF BIT(23)
+#define PRC_CTRL_BIMODAL_INTERRUPT BIT(37)
#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24)
u64 prc_alarm_action;
@@ -691,6 +716,10 @@ typedef struct _XENA_dev_config {
#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22)
#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
#define MC_ERR_REG_SM_ERR BIT(31)
+#define MC_ERR_REG_ECC_ALL_SNG (BIT(6) | \
+ BIT(7) | BIT(17) | BIT(19))
+#define MC_ERR_REG_ECC_ALL_DBL (BIT(14) | \
+ BIT(15) | BIT(18) | BIT(20))
u64 mc_err_mask;
u64 mc_err_alarm;
@@ -736,7 +765,19 @@ typedef struct _XENA_dev_config {
u64 mc_rldram_test_d1;
u8 unused24[0x300 - 0x288];
u64 mc_rldram_test_d2;
- u8 unused25[0x700 - 0x308];
+
+ u8 unused24_1[0x360 - 0x308];
+ u64 mc_rldram_ctrl;
+#define MC_RLDRAM_ENABLE_ODT BIT(7)
+
+ u8 unused24_2[0x640 - 0x368];
+ u64 mc_rldram_ref_per_herc;
+#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16)
+
+ u8 unused24_3[0x660 - 0x648];
+ u64 mc_rldram_mrs_herc;
+
+ u8 unused25[0x700 - 0x668];
u64 mc_debug_ctrl;
u8 unused26[0x3000 - 0x2f08];
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ea638b162d3..7ca78228b10 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -11,29 +11,28 @@
* See the file COPYING in this distribution for more information.
*
* Credits:
- * Jeff Garzik : For pointing out the improper error condition
- * check in the s2io_xmit routine and also some
- * issues in the Tx watch dog function. Also for
- * patiently answering all those innumerable
+ * Jeff Garzik : For pointing out the improper error condition
+ * check in the s2io_xmit routine and also some
+ * issues in the Tx watch dog function. Also for
+ * patiently answering all those innumerable
* questions regaring the 2.6 porting issues.
* Stephen Hemminger : Providing proper 2.6 porting mechanism for some
* macros available only in 2.6 Kernel.
- * Francois Romieu : For pointing out all code part that were
+ * Francois Romieu : For pointing out all code part that were
* deprecated and also styling related comments.
- * Grant Grundler : For helping me get rid of some Architecture
+ * Grant Grundler : For helping me get rid of some Architecture
* dependent code.
* Christopher Hellwig : Some more 2.6 specific issues in the driver.
- *
+ *
* The module loadable parameters that are supported by the driver and a brief
* explaination of all the variables.
- * rx_ring_num : This can be used to program the number of receive rings used
- * in the driver.
- * rx_ring_len: This defines the number of descriptors each ring can have. This
+ * rx_ring_num : This can be used to program the number of receive rings used
+ * in the driver.
+ * rx_ring_len: This defines the number of descriptors each ring can have. This
* is also an array of size 8.
* tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
- * tx_fifo_len: This too is an array of 8. Each element defines the number of
+ * tx_fifo_len: This too is an array of 8. Each element defines the number of
* Tx descriptors that can be associated with each corresponding FIFO.
- * in PCI Configuration space.
************************************************************************/
#include <linux/config.h>
@@ -56,27 +55,39 @@
#include <linux/ethtool.h>
#include <linux/version.h>
#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
-#include <asm/io.h>
#include <asm/system.h>
#include <asm/uaccess.h>
+#include <asm/io.h>
/* local include */
#include "s2io.h"
#include "s2io-regs.h"
/* S2io Driver name & version. */
-static char s2io_driver_name[] = "s2io";
-static char s2io_driver_version[] = "Version 1.7.7.1";
+static char s2io_driver_name[] = "Neterion";
+static char s2io_driver_version[] = "Version 2.0.3.1";
+
+static inline int RXD_IS_UP2DT(RxD_t *rxdp)
+{
+ int ret;
+
+ ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
+ (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
-/*
+ return ret;
+}
+
+/*
* Cards with following subsystem_id have a link state indication
* problem, 600B, 600C, 600D, 640B, 640C and 640D.
* macro below identifies these cards given the subsystem_id.
*/
-#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
- (((subid >= 0x600B) && (subid <= 0x600D)) || \
- ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
+#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
+ (dev_type == XFRAME_I_DEVICE) ? \
+ ((((subid >= 0x600B) && (subid <= 0x600D)) || \
+ ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
@@ -86,9 +97,12 @@ static char s2io_driver_version[] = "Version 1.7.7.1";
static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
{
int level = 0;
- if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
+ mac_info_t *mac_control;
+
+ mac_control = &sp->mac_control;
+ if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
level = LOW;
- if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
+ if (rxb_size <= MAX_RXDS_PER_BLOCK) {
level = PANIC;
}
}
@@ -145,6 +159,9 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"rmac_pause_cnt"},
{"rmac_accepted_ip"},
{"rmac_err_tcp"},
+ {"\n DRIVER STATISTICS"},
+ {"single_bit_ecc_errs"},
+ {"double_bit_ecc_errs"},
};
#define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
@@ -153,8 +170,37 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
+#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
+ init_timer(&timer); \
+ timer.function = handle; \
+ timer.data = (unsigned long) arg; \
+ mod_timer(&timer, (jiffies + exp)) \
+
+/* Add the vlan */
+static void s2io_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ nic_t *nic = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nic->tx_lock, flags);
+ nic->vlgrp = grp;
+ spin_unlock_irqrestore(&nic->tx_lock, flags);
+}
+
+/* Unregister the vlan */
+static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
+{
+ nic_t *nic = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nic->tx_lock, flags);
+ if (nic->vlgrp)
+ nic->vlgrp->vlan_devices[vid] = NULL;
+ spin_unlock_irqrestore(&nic->tx_lock, flags);
+}
-/*
+/*
* Constants to be programmed into the Xena's registers, to configure
* the XAUI.
*/
@@ -162,7 +208,28 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
#define END_SIGN 0x0
-static u64 default_mdio_cfg[] = {
+static u64 herc_act_dtx_cfg[] = {
+ /* Set address */
+ 0x8000051536750000ULL, 0x80000515367500E0ULL,
+ /* Write data */
+ 0x8000051536750004ULL, 0x80000515367500E4ULL,
+ /* Set address */
+ 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
+ /* Write data */
+ 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
+ /* Set address */
+ 0x801205150D440000ULL, 0x801205150D4400E0ULL,
+ /* Write data */
+ 0x801205150D440004ULL, 0x801205150D4400E4ULL,
+ /* Set address */
+ 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
+ /* Write data */
+ 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
+ /* Done */
+ END_SIGN
+};
+
+static u64 xena_mdio_cfg[] = {
/* Reset PMA PLL */
0xC001010000000000ULL, 0xC0010100000000E0ULL,
0xC0010100008000E4ULL,
@@ -172,7 +239,7 @@ static u64 default_mdio_cfg[] = {
END_SIGN
};
-static u64 default_dtx_cfg[] = {
+static u64 xena_dtx_cfg[] = {
0x8000051500000000ULL, 0x80000515000000E0ULL,
0x80000515D93500E4ULL, 0x8001051500000000ULL,
0x80010515000000E0ULL, 0x80010515001E00E4ULL,
@@ -196,8 +263,7 @@ static u64 default_dtx_cfg[] = {
END_SIGN
};
-
-/*
+/*
* Constants for Fixing the MacAddress problem seen mostly on
* Alpha machines.
*/
@@ -226,20 +292,25 @@ static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
static unsigned int rx_ring_num = 1;
static unsigned int rx_ring_sz[MAX_RX_RINGS] =
{[0 ...(MAX_RX_RINGS - 1)] = 0 };
-static unsigned int Stats_refresh_time = 4;
+static unsigned int rts_frm_len[MAX_RX_RINGS] =
+ {[0 ...(MAX_RX_RINGS - 1)] = 0 };
+static unsigned int use_continuous_tx_intrs = 1;
static unsigned int rmac_pause_time = 65535;
static unsigned int mc_pause_threshold_q0q3 = 187;
static unsigned int mc_pause_threshold_q4q7 = 187;
static unsigned int shared_splits;
static unsigned int tmac_util_period = 5;
static unsigned int rmac_util_period = 5;
+static unsigned int bimodal = 0;
#ifndef CONFIG_S2IO_NAPI
static unsigned int indicate_max_pkts;
#endif
+/* Frequency of Rx desc syncs expressed as power of 2 */
+static unsigned int rxsync_frequency = 3;
-/*
+/*
* S2IO device table.
- * This table lists all the devices that this driver supports.
+ * This table lists all the devices that this driver supports.
*/
static struct pci_device_id s2io_tbl[] __devinitdata = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
@@ -247,9 +318,9 @@ static struct pci_device_id s2io_tbl[] __devinitdata = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
- PCI_ANY_ID, PCI_ANY_ID},
- {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
- PCI_ANY_ID, PCI_ANY_ID},
+ PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
+ PCI_ANY_ID, PCI_ANY_ID},
{0,}
};
@@ -268,8 +339,8 @@ static struct pci_driver s2io_driver = {
/**
* init_shared_mem - Allocation and Initialization of Memory
* @nic: Device private variable.
- * Description: The function allocates all the memory areas shared
- * between the NIC and the driver. This includes Tx descriptors,
+ * Description: The function allocates all the memory areas shared
+ * between the NIC and the driver. This includes Tx descriptors,
* Rx descriptors and the statistics block.
*/
@@ -279,11 +350,11 @@ static int init_shared_mem(struct s2io_nic *nic)
void *tmp_v_addr, *tmp_v_addr_next;
dma_addr_t tmp_p_addr, tmp_p_addr_next;
RxD_block_t *pre_rxd_blk = NULL;
- int i, j, blk_cnt;
+ int i, j, blk_cnt, rx_sz, tx_sz;
int lst_size, lst_per_page;
struct net_device *dev = nic->dev;
#ifdef CONFIG_2BUFF_MODE
- unsigned long tmp;
+ u64 tmp;
buffAdd_t *ba;
#endif
@@ -300,36 +371,41 @@ static int init_shared_mem(struct s2io_nic *nic)
size += config->tx_cfg[i].fifo_len;
}
if (size > MAX_AVAILABLE_TXDS) {
- DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
- dev->name);
- DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
- DBG_PRINT(ERR_DBG, "that can be used\n");
+ DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
+ __FUNCTION__);
+ DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
return FAILURE;
}
lst_size = (sizeof(TxD_t) * config->max_txds);
+ tx_sz = lst_size * size;
lst_per_page = PAGE_SIZE / lst_size;
for (i = 0; i < config->tx_fifo_num; i++) {
int fifo_len = config->tx_cfg[i].fifo_len;
int list_holder_size = fifo_len * sizeof(list_info_hold_t);
- nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
- if (!nic->list_info[i]) {
+ mac_control->fifos[i].list_info = kmalloc(list_holder_size,
+ GFP_KERNEL);
+ if (!mac_control->fifos[i].list_info) {
DBG_PRINT(ERR_DBG,
"Malloc failed for list_info\n");
return -ENOMEM;
}
- memset(nic->list_info[i], 0, list_holder_size);
+ memset(mac_control->fifos[i].list_info, 0, list_holder_size);
}
for (i = 0; i < config->tx_fifo_num; i++) {
int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
lst_per_page);
- mac_control->tx_curr_put_info[i].offset = 0;
- mac_control->tx_curr_put_info[i].fifo_len =
+ mac_control->fifos[i].tx_curr_put_info.offset = 0;
+ mac_control->fifos[i].tx_curr_put_info.fifo_len =
config->tx_cfg[i].fifo_len - 1;
- mac_control->tx_curr_get_info[i].offset = 0;
- mac_control->tx_curr_get_info[i].fifo_len =
+ mac_control->fifos[i].tx_curr_get_info.offset = 0;
+ mac_control->fifos[i].tx_curr_get_info.fifo_len =
config->tx_cfg[i].fifo_len - 1;
+ mac_control->fifos[i].fifo_no = i;
+ mac_control->fifos[i].nic = nic;
+ mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
+
for (j = 0; j < page_num; j++) {
int k = 0;
dma_addr_t tmp_p;
@@ -345,16 +421,15 @@ static int init_shared_mem(struct s2io_nic *nic)
while (k < lst_per_page) {
int l = (j * lst_per_page) + k;
if (l == config->tx_cfg[i].fifo_len)
- goto end_txd_alloc;
- nic->list_info[i][l].list_virt_addr =
+ break;
+ mac_control->fifos[i].list_info[l].list_virt_addr =
tmp_v + (k * lst_size);
- nic->list_info[i][l].list_phy_addr =
+ mac_control->fifos[i].list_info[l].list_phy_addr =
tmp_p + (k * lst_size);
k++;
}
}
}
- end_txd_alloc:
/* Allocation and initialization of RXDs in Rings */
size = 0;
@@ -367,21 +442,26 @@ static int init_shared_mem(struct s2io_nic *nic)
return FAILURE;
}
size += config->rx_cfg[i].num_rxd;
- nic->block_count[i] =
+ mac_control->rings[i].block_count =
config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
- nic->pkt_cnt[i] =
- config->rx_cfg[i].num_rxd - nic->block_count[i];
+ mac_control->rings[i].pkt_cnt =
+ config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
}
+ size = (size * (sizeof(RxD_t)));
+ rx_sz = size;
for (i = 0; i < config->rx_ring_num; i++) {
- mac_control->rx_curr_get_info[i].block_index = 0;
- mac_control->rx_curr_get_info[i].offset = 0;
- mac_control->rx_curr_get_info[i].ring_len =
+ mac_control->rings[i].rx_curr_get_info.block_index = 0;
+ mac_control->rings[i].rx_curr_get_info.offset = 0;
+ mac_control->rings[i].rx_curr_get_info.ring_len =
config->rx_cfg[i].num_rxd - 1;
- mac_control->rx_curr_put_info[i].block_index = 0;
- mac_control->rx_curr_put_info[i].offset = 0;
- mac_control->rx_curr_put_info[i].ring_len =
+ mac_control->rings[i].rx_curr_put_info.block_index = 0;
+ mac_control->rings[i].rx_curr_put_info.offset = 0;
+ mac_control->rings[i].rx_curr_put_info.ring_len =
config->rx_cfg[i].num_rxd - 1;
+ mac_control->rings[i].nic = nic;
+ mac_control->rings[i].ring_no = i;
+
blk_cnt =
config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
/* Allocating all the Rx blocks */
@@ -395,32 +475,36 @@ static int init_shared_mem(struct s2io_nic *nic)
&tmp_p_addr);
if (tmp_v_addr == NULL) {
/*
- * In case of failure, free_shared_mem()
- * is called, which should free any
- * memory that was alloced till the
+ * In case of failure, free_shared_mem()
+ * is called, which should free any
+ * memory that was alloced till the
* failure happened.
*/
- nic->rx_blocks[i][j].block_virt_addr =
+ mac_control->rings[i].rx_blocks[j].block_virt_addr =
tmp_v_addr;
return -ENOMEM;
}
memset(tmp_v_addr, 0, size);
- nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
- nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
+ mac_control->rings[i].rx_blocks[j].block_virt_addr =
+ tmp_v_addr;
+ mac_control->rings[i].rx_blocks[j].block_dma_addr =
+ tmp_p_addr;
}
/* Interlinking all Rx Blocks */
for (j = 0; j < blk_cnt; j++) {
- tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
+ tmp_v_addr =
+ mac_control->rings[i].rx_blocks[j].block_virt_addr;
tmp_v_addr_next =
- nic->rx_blocks[i][(j + 1) %
+ mac_control->rings[i].rx_blocks[(j + 1) %
blk_cnt].block_virt_addr;
- tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
+ tmp_p_addr =
+ mac_control->rings[i].rx_blocks[j].block_dma_addr;
tmp_p_addr_next =
- nic->rx_blocks[i][(j + 1) %
+ mac_control->rings[i].rx_blocks[(j + 1) %
blk_cnt].block_dma_addr;
pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
- pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
+ pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
* marker.
*/
#ifndef CONFIG_2BUFF_MODE
@@ -433,43 +517,43 @@ static int init_shared_mem(struct s2io_nic *nic)
}
#ifdef CONFIG_2BUFF_MODE
- /*
+ /*
* Allocation of Storages for buffer addresses in 2BUFF mode
* and the buffers as well.
*/
for (i = 0; i < config->rx_ring_num; i++) {
blk_cnt =
config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
- nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
+ mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
GFP_KERNEL);
- if (!nic->ba[i])
+ if (!mac_control->rings[i].ba)
return -ENOMEM;
for (j = 0; j < blk_cnt; j++) {
int k = 0;
- nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
+ mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
(MAX_RXDS_PER_BLOCK + 1)),
GFP_KERNEL);
- if (!nic->ba[i][j])
+ if (!mac_control->rings[i].ba[j])
return -ENOMEM;
while (k != MAX_RXDS_PER_BLOCK) {
- ba = &nic->ba[i][j][k];
+ ba = &mac_control->rings[i].ba[j][k];
- ba->ba_0_org = kmalloc
+ ba->ba_0_org = (void *) kmalloc
(BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
if (!ba->ba_0_org)
return -ENOMEM;
- tmp = (unsigned long) ba->ba_0_org;
+ tmp = (u64) ba->ba_0_org;
tmp += ALIGN_SIZE;
- tmp &= ~((unsigned long) ALIGN_SIZE);
+ tmp &= ~((u64) ALIGN_SIZE);
ba->ba_0 = (void *) tmp;
- ba->ba_1_org = kmalloc
+ ba->ba_1_org = (void *) kmalloc
(BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
if (!ba->ba_1_org)
return -ENOMEM;
- tmp = (unsigned long) ba->ba_1_org;
+ tmp = (u64) ba->ba_1_org;
tmp += ALIGN_SIZE;
- tmp &= ~((unsigned long) ALIGN_SIZE);
+ tmp &= ~((u64) ALIGN_SIZE);
ba->ba_1 = (void *) tmp;
k++;
}
@@ -483,9 +567,9 @@ static int init_shared_mem(struct s2io_nic *nic)
(nic->pdev, size, &mac_control->stats_mem_phy);
if (!mac_control->stats_mem) {
- /*
- * In case of failure, free_shared_mem() is called, which
- * should free any memory that was alloced till the
+ /*
+ * In case of failure, free_shared_mem() is called, which
+ * should free any memory that was alloced till the
* failure happened.
*/
return -ENOMEM;
@@ -495,15 +579,14 @@ static int init_shared_mem(struct s2io_nic *nic)
tmp_v_addr = mac_control->stats_mem;
mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
memset(tmp_v_addr, 0, size);
-
DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
(unsigned long long) tmp_p_addr);
return SUCCESS;
}
-/**
- * free_shared_mem - Free the allocated Memory
+/**
+ * free_shared_mem - Free the allocated Memory
* @nic: Device private variable.
* Description: This function is to free all memory locations allocated by
* the init_shared_mem() function and return it to the kernel.
@@ -533,15 +616,19 @@ static void free_shared_mem(struct s2io_nic *nic)
lst_per_page);
for (j = 0; j < page_num; j++) {
int mem_blks = (j * lst_per_page);
- if (!nic->list_info[i][mem_blks].list_virt_addr)
+ if ((!mac_control->fifos[i].list_info) ||
+ (!mac_control->fifos[i].list_info[mem_blks].
+ list_virt_addr))
break;
pci_free_consistent(nic->pdev, PAGE_SIZE,
- nic->list_info[i][mem_blks].
+ mac_control->fifos[i].
+ list_info[mem_blks].
list_virt_addr,
- nic->list_info[i][mem_blks].
+ mac_control->fifos[i].
+ list_info[mem_blks].
list_phy_addr);
}
- kfree(nic->list_info[i]);
+ kfree(mac_control->fifos[i].list_info);
}
#ifndef CONFIG_2BUFF_MODE
@@ -550,10 +637,12 @@ static void free_shared_mem(struct s2io_nic *nic)
size = SIZE_OF_BLOCK;
#endif
for (i = 0; i < config->rx_ring_num; i++) {
- blk_cnt = nic->block_count[i];
+ blk_cnt = mac_control->rings[i].block_count;
for (j = 0; j < blk_cnt; j++) {
- tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
- tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
+ tmp_v_addr = mac_control->rings[i].rx_blocks[j].
+ block_virt_addr;
+ tmp_p_addr = mac_control->rings[i].rx_blocks[j].
+ block_dma_addr;
if (tmp_v_addr == NULL)
break;
pci_free_consistent(nic->pdev, size,
@@ -566,35 +655,21 @@ static void free_shared_mem(struct s2io_nic *nic)
for (i = 0; i < config->rx_ring_num; i++) {
blk_cnt =
config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
- if (!nic->ba[i])
- goto end_free;
for (j = 0; j < blk_cnt; j++) {
int k = 0;
- if (!nic->ba[i][j]) {
- kfree(nic->ba[i]);
- goto end_free;
- }
+ if (!mac_control->rings[i].ba[j])
+ continue;
while (k != MAX_RXDS_PER_BLOCK) {
- buffAdd_t *ba = &nic->ba[i][j][k];
- if (!ba || !ba->ba_0_org || !ba->ba_1_org)
- {
- kfree(nic->ba[i]);
- kfree(nic->ba[i][j]);
- if(ba->ba_0_org)
- kfree(ba->ba_0_org);
- if(ba->ba_1_org)
- kfree(ba->ba_1_org);
- goto end_free;
- }
+ buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
kfree(ba->ba_0_org);
kfree(ba->ba_1_org);
k++;
}
- kfree(nic->ba[i][j]);
+ kfree(mac_control->rings[i].ba[j]);
}
- kfree(nic->ba[i]);
+ if (mac_control->rings[i].ba)
+ kfree(mac_control->rings[i].ba);
}
-end_free:
#endif
if (mac_control->stats_mem) {
@@ -605,12 +680,93 @@ end_free:
}
}
-/**
- * init_nic - Initialization of hardware
+/**
+ * s2io_verify_pci_mode -
+ */
+
+static int s2io_verify_pci_mode(nic_t *nic)
+{
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ register u64 val64 = 0;
+ int mode;
+
+ val64 = readq(&bar0->pci_mode);
+ mode = (u8)GET_PCI_MODE(val64);
+
+ if ( val64 & PCI_MODE_UNKNOWN_MODE)
+ return -1; /* Unknown PCI mode */
+ return mode;
+}
+
+
+/**
+ * s2io_print_pci_mode -
+ */
+static int s2io_print_pci_mode(nic_t *nic)
+{
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ register u64 val64 = 0;
+ int mode;
+ struct config_param *config = &nic->config;
+
+ val64 = readq(&bar0->pci_mode);
+ mode = (u8)GET_PCI_MODE(val64);
+
+ if ( val64 & PCI_MODE_UNKNOWN_MODE)
+ return -1; /* Unknown PCI mode */
+
+ if (val64 & PCI_MODE_32_BITS) {
+ DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
+ } else {
+ DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
+ }
+
+ switch(mode) {
+ case PCI_MODE_PCI_33:
+ DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
+ config->bus_speed = 33;
+ break;
+ case PCI_MODE_PCI_66:
+ DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
+ config->bus_speed = 133;
+ break;
+ case PCI_MODE_PCIX_M1_66:
+ DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
+ config->bus_speed = 133; /* Herc doubles the clock rate */
+ break;
+ case PCI_MODE_PCIX_M1_100:
+ DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
+ config->bus_speed = 200;
+ break;
+ case PCI_MODE_PCIX_M1_133:
+ DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
+ config->bus_speed = 266;
+ break;
+ case PCI_MODE_PCIX_M2_66:
+ DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
+ config->bus_speed = 133;
+ break;
+ case PCI_MODE_PCIX_M2_100:
+ DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
+ config->bus_speed = 200;
+ break;
+ case PCI_MODE_PCIX_M2_133:
+ DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
+ config->bus_speed = 266;
+ break;
+ default:
+ return -1; /* Unsupported bus speed */
+ }
+
+ return mode;
+}
+
+/**
+ * init_nic - Initialization of hardware
* @nic: device peivate variable
- * Description: The function sequentially configures every block
- * of the H/W from their reset values.
- * Return Value: SUCCESS on success and
+ * Description: The function sequentially configures every block
+ * of the H/W from their reset values.
+ * Return Value: SUCCESS on success and
* '-1' on failure (endian settings incorrect).
*/
@@ -626,21 +782,32 @@ static int init_nic(struct s2io_nic *nic)
struct config_param *config;
int mdio_cnt = 0, dtx_cnt = 0;
unsigned long long mem_share;
+ int mem_size;
mac_control = &nic->mac_control;
config = &nic->config;
- /* Initialize swapper control register */
- if (s2io_set_swapper(nic)) {
+ /* to set the swapper controle on the card */
+ if(s2io_set_swapper(nic)) {
DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
return -1;
}
+ /*
+ * Herc requires EOI to be removed from reset before XGXS, so..
+ */
+ if (nic->device_type & XFRAME_II_DEVICE) {
+ val64 = 0xA500000000ULL;
+ writeq(val64, &bar0->sw_reset);
+ msleep(500);
+ val64 = readq(&bar0->sw_reset);
+ }
+
/* Remove XGXS from reset state */
val64 = 0;
writeq(val64, &bar0->sw_reset);
- val64 = readq(&bar0->sw_reset);
msleep(500);
+ val64 = readq(&bar0->sw_reset);
/* Enable Receiving broadcasts */
add = &bar0->mac_cfg;
@@ -660,48 +827,58 @@ static int init_nic(struct s2io_nic *nic)
val64 = dev->mtu;
writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
- /*
- * Configuring the XAUI Interface of Xena.
+ /*
+ * Configuring the XAUI Interface of Xena.
* ***************************************
- * To Configure the Xena's XAUI, one has to write a series
- * of 64 bit values into two registers in a particular
- * sequence. Hence a macro 'SWITCH_SIGN' has been defined
- * which will be defined in the array of configuration values
- * (default_dtx_cfg & default_mdio_cfg) at appropriate places
- * to switch writing from one regsiter to another. We continue
+ * To Configure the Xena's XAUI, one has to write a series
+ * of 64 bit values into two registers in a particular
+ * sequence. Hence a macro 'SWITCH_SIGN' has been defined
+ * which will be defined in the array of configuration values
+ * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
+ * to switch writing from one regsiter to another. We continue
* writing these values until we encounter the 'END_SIGN' macro.
- * For example, After making a series of 21 writes into
- * dtx_control register the 'SWITCH_SIGN' appears and hence we
+ * For example, After making a series of 21 writes into
+ * dtx_control register the 'SWITCH_SIGN' appears and hence we
* start writing into mdio_control until we encounter END_SIGN.
*/
- while (1) {
- dtx_cfg:
- while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
- if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
- dtx_cnt++;
- goto mdio_cfg;
- }
- SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
+ if (nic->device_type & XFRAME_II_DEVICE) {
+ while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
+ SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
&bar0->dtx_control, UF);
- val64 = readq(&bar0->dtx_control);
+ if (dtx_cnt & 0x1)
+ msleep(1); /* Necessary!! */
dtx_cnt++;
}
- mdio_cfg:
- while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
- if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
+ } else {
+ while (1) {
+ dtx_cfg:
+ while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
+ if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
+ dtx_cnt++;
+ goto mdio_cfg;
+ }
+ SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
+ &bar0->dtx_control, UF);
+ val64 = readq(&bar0->dtx_control);
+ dtx_cnt++;
+ }
+ mdio_cfg:
+ while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
+ if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
+ mdio_cnt++;
+ goto dtx_cfg;
+ }
+ SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
+ &bar0->mdio_control, UF);
+ val64 = readq(&bar0->mdio_control);
mdio_cnt++;
+ }
+ if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
+ (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
+ break;
+ } else {
goto dtx_cfg;
}
- SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
- &bar0->mdio_control, UF);
- val64 = readq(&bar0->mdio_control);
- mdio_cnt++;
- }
- if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
- (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
- break;
- } else {
- goto dtx_cfg;
}
}
@@ -748,12 +925,20 @@ static int init_nic(struct s2io_nic *nic)
val64 |= BIT(0); /* To enable the FIFO partition. */
writeq(val64, &bar0->tx_fifo_partition_0);
+ /*
+ * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
+ * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
+ */
+ if ((nic->device_type == XFRAME_I_DEVICE) &&
+ (get_xena_rev_id(nic->pdev) < 4))
+ writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
+
val64 = readq(&bar0->tx_fifo_partition_0);
DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
&bar0->tx_fifo_partition_0, (unsigned long long) val64);
- /*
- * Initialization of Tx_PA_CONFIG register to ignore packet
+ /*
+ * Initialization of Tx_PA_CONFIG register to ignore packet
* integrity checking.
*/
val64 = readq(&bar0->tx_pa_cfg);
@@ -770,85 +955,304 @@ static int init_nic(struct s2io_nic *nic)
}
writeq(val64, &bar0->rx_queue_priority);
- /*
- * Allocating equal share of memory to all the
+ /*
+ * Allocating equal share of memory to all the
* configured Rings.
*/
val64 = 0;
+ if (nic->device_type & XFRAME_II_DEVICE)
+ mem_size = 32;
+ else
+ mem_size = 64;
+
for (i = 0; i < config->rx_ring_num; i++) {
switch (i) {
case 0:
- mem_share = (64 / config->rx_ring_num +
- 64 % config->rx_ring_num);
+ mem_share = (mem_size / config->rx_ring_num +
+ mem_size % config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
continue;
case 1:
- mem_share = (64 / config->rx_ring_num);
+ mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
continue;
case 2:
- mem_share = (64 / config->rx_ring_num);
+ mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
continue;
case 3:
- mem_share = (64 / config->rx_ring_num);
+ mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
continue;
case 4:
- mem_share = (64 / config->rx_ring_num);
+ mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
continue;
case 5:
- mem_share = (64 / config->rx_ring_num);
+ mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
continue;
case 6:
- mem_share = (64 / config->rx_ring_num);
+ mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
continue;
case 7:
- mem_share = (64 / config->rx_ring_num);
+ mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
continue;
}
}
writeq(val64, &bar0->rx_queue_cfg);
- /*
- * Initializing the Tx round robin registers to 0.
- * Filling Tx and Rx round robin registers as per the
- * number of FIFOs and Rings is still TODO.
- */
- writeq(0, &bar0->tx_w_round_robin_0);
- writeq(0, &bar0->tx_w_round_robin_1);
- writeq(0, &bar0->tx_w_round_robin_2);
- writeq(0, &bar0->tx_w_round_robin_3);
- writeq(0, &bar0->tx_w_round_robin_4);
-
- /*
- * TODO
- * Disable Rx steering. Hard coding all packets be steered to
- * Queue 0 for now.
+ /*
+ * Filling Tx round robin registers
+ * as per the number of FIFOs
*/
- val64 = 0x8080808080808080ULL;
- writeq(val64, &bar0->rts_qos_steering);
+ switch (config->tx_fifo_num) {
+ case 1:
+ val64 = 0x0000000000000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 2:
+ val64 = 0x0000010000010000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0100000100000100ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0001000001000001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0000010000010000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0100000000000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 3:
+ val64 = 0x0001000102000001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0001020000010001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0200000100010200ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0001000102000001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0001020000000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 4:
+ val64 = 0x0001020300010200ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0100000102030001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0200010000010203ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0001020001000001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0203000100000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 5:
+ val64 = 0x0001000203000102ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0001020001030004ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0001000203000102ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0001020001030004ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0001000000000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 6:
+ val64 = 0x0001020304000102ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0304050001020001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0203000100000102ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0304000102030405ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0001000200000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 7:
+ val64 = 0x0001020001020300ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0102030400010203ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0405060001020001ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0304050000010200ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0102030000000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ case 8:
+ val64 = 0x0001020300040105ULL;
+ writeq(val64, &bar0->tx_w_round_robin_0);
+ val64 = 0x0200030106000204ULL;
+ writeq(val64, &bar0->tx_w_round_robin_1);
+ val64 = 0x0103000502010007ULL;
+ writeq(val64, &bar0->tx_w_round_robin_2);
+ val64 = 0x0304010002060500ULL;
+ writeq(val64, &bar0->tx_w_round_robin_3);
+ val64 = 0x0103020400000000ULL;
+ writeq(val64, &bar0->tx_w_round_robin_4);
+ break;
+ }
+
+ /* Filling the Rx round robin registers as per the
+ * number of Rings and steering based on QoS.
+ */
+ switch (config->rx_ring_num) {
+ case 1:
+ val64 = 0x8080808080808080ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 2:
+ val64 = 0x0000010000010000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0100000100000100ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0001000001000001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0000010000010000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0100000000000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080808040404040ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 3:
+ val64 = 0x0001000102000001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0001020000010001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0200000100010200ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0001000102000001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001020000000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080804040402020ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 4:
+ val64 = 0x0001020300010200ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0100000102030001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0200010000010203ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0001020001000001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0203000100000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080404020201010ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 5:
+ val64 = 0x0001000203000102ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0001020001030004ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0001000203000102ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0001020001030004ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001000000000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080404020201008ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 6:
+ val64 = 0x0001020304000102ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0304050001020001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0203000100000102ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0304000102030405ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0001000200000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080404020100804ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 7:
+ val64 = 0x0001020001020300ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0102030400010203ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0405060001020001ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0304050000010200ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0102030000000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8080402010080402ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ case 8:
+ val64 = 0x0001020300040105ULL;
+ writeq(val64, &bar0->rx_w_round_robin_0);
+ val64 = 0x0200030106000204ULL;
+ writeq(val64, &bar0->rx_w_round_robin_1);
+ val64 = 0x0103000502010007ULL;
+ writeq(val64, &bar0->rx_w_round_robin_2);
+ val64 = 0x0304010002060500ULL;
+ writeq(val64, &bar0->rx_w_round_robin_3);
+ val64 = 0x0103020400000000ULL;
+ writeq(val64, &bar0->rx_w_round_robin_4);
+
+ val64 = 0x8040201008040201ULL;
+ writeq(val64, &bar0->rts_qos_steering);
+ break;
+ }
/* UDP Fix */
val64 = 0;
- for (i = 1; i < 8; i++)
+ for (i = 0; i < 8; i++)
+ writeq(val64, &bar0->rts_frm_len_n[i]);
+
+ /* Set the default rts frame length for the rings configured */
+ val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
+ for (i = 0 ; i < config->rx_ring_num ; i++)
writeq(val64, &bar0->rts_frm_len_n[i]);
- /* Set rts_frm_len register for fifo 0 */
- writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
- &bar0->rts_frm_len_n[0]);
+ /* Set the frame length for the configured rings
+ * desired by the user
+ */
+ for (i = 0; i < config->rx_ring_num; i++) {
+ /* If rts_frm_len[i] == 0 then it is assumed that user not
+ * specified frame length steering.
+ * If the user provides the frame length then program
+ * the rts_frm_len register for those values or else
+ * leave it as it is.
+ */
+ if (rts_frm_len[i] != 0) {
+ writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
+ &bar0->rts_frm_len_n[i]);
+ }
+ }
- /* Enable statistics */
+ /* Program statistics memory */
writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
- val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
- STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
- writeq(val64, &bar0->stat_cfg);
- /*
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ val64 = STAT_BC(0x320);
+ writeq(val64, &bar0->stat_byte_cnt);
+ }
+
+ /*
* Initializing the sampling rate for the device to calculate the
* bandwidth utilization.
*/
@@ -857,30 +1261,38 @@ static int init_nic(struct s2io_nic *nic)
writeq(val64, &bar0->mac_link_util);
- /*
- * Initializing the Transmit and Receive Traffic Interrupt
+ /*
+ * Initializing the Transmit and Receive Traffic Interrupt
* Scheme.
*/
- /* TTI Initialization. Default Tx timer gets us about
+ /*
+ * TTI Initialization. Default Tx timer gets us about
* 250 interrupts per sec. Continuous interrupts are enabled
* by default.
*/
- val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
- TTI_DATA1_MEM_TX_URNG_A(0xA) |
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ int count = (nic->config.bus_speed * 125)/2;
+ val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
+ } else {
+
+ val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
+ }
+ val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
TTI_DATA1_MEM_TX_URNG_B(0x10) |
- TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
- TTI_DATA1_MEM_TX_TIMER_CI_EN;
+ TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
+ if (use_continuous_tx_intrs)
+ val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
writeq(val64, &bar0->tti_data1_mem);
val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
TTI_DATA2_MEM_TX_UFC_B(0x20) |
- TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
+ TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
writeq(val64, &bar0->tti_data2_mem);
val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
writeq(val64, &bar0->tti_command_mem);
- /*
+ /*
* Once the operation completes, the Strobe bit of the command
* register will be reset. We poll for this particular condition
* We wait for a maximum of 500ms for the operation to complete,
@@ -901,52 +1313,97 @@ static int init_nic(struct s2io_nic *nic)
time++;
}
- /* RTI Initialization */
- val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
- RTI_DATA1_MEM_RX_URNG_A(0xA) |
- RTI_DATA1_MEM_RX_URNG_B(0x10) |
- RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
+ if (nic->config.bimodal) {
+ int k = 0;
+ for (k = 0; k < config->rx_ring_num; k++) {
+ val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
+ val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
+ writeq(val64, &bar0->tti_command_mem);
+
+ /*
+ * Once the operation completes, the Strobe bit of the command
+ * register will be reset. We poll for this particular condition
+ * We wait for a maximum of 500ms for the operation to complete,
+ * if it's not complete by then we return error.
+ */
+ time = 0;
+ while (TRUE) {
+ val64 = readq(&bar0->tti_command_mem);
+ if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
+ break;
+ }
+ if (time > 10) {
+ DBG_PRINT(ERR_DBG,
+ "%s: TTI init Failed\n",
+ dev->name);
+ return -1;
+ }
+ time++;
+ msleep(50);
+ }
+ }
+ } else {
- writeq(val64, &bar0->rti_data1_mem);
+ /* RTI Initialization */
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ /*
+ * Programmed to generate Apprx 500 Intrs per
+ * second
+ */
+ int count = (nic->config.bus_speed * 125)/4;
+ val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
+ } else {
+ val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
+ }
+ val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
+ RTI_DATA1_MEM_RX_URNG_B(0x10) |
+ RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
- val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
- RTI_DATA2_MEM_RX_UFC_B(0x2) |
- RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
- writeq(val64, &bar0->rti_data2_mem);
+ writeq(val64, &bar0->rti_data1_mem);
- val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
- writeq(val64, &bar0->rti_command_mem);
+ val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
+ RTI_DATA2_MEM_RX_UFC_B(0x2) |
+ RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
+ writeq(val64, &bar0->rti_data2_mem);
- /*
- * Once the operation completes, the Strobe bit of the command
- * register will be reset. We poll for this particular condition
- * We wait for a maximum of 500ms for the operation to complete,
- * if it's not complete by then we return error.
- */
- time = 0;
- while (TRUE) {
- val64 = readq(&bar0->rti_command_mem);
- if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
- break;
- }
- if (time > 10) {
- DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
- dev->name);
- return -1;
+ for (i = 0; i < config->rx_ring_num; i++) {
+ val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
+ | RTI_CMD_MEM_OFFSET(i);
+ writeq(val64, &bar0->rti_command_mem);
+
+ /*
+ * Once the operation completes, the Strobe bit of the
+ * command register will be reset. We poll for this
+ * particular condition. We wait for a maximum of 500ms
+ * for the operation to complete, if it's not complete
+ * by then we return error.
+ */
+ time = 0;
+ while (TRUE) {
+ val64 = readq(&bar0->rti_command_mem);
+ if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
+ break;
+ }
+ if (time > 10) {
+ DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
+ dev->name);
+ return -1;
+ }
+ time++;
+ msleep(50);
+ }
}
- time++;
- msleep(50);
}
- /*
- * Initializing proper values as Pause threshold into all
+ /*
+ * Initializing proper values as Pause threshold into all
* the 8 Queues on Rx side.
*/
writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
/* Disable RMAC PAD STRIPPING */
- add = &bar0->mac_cfg;
+ add = (void *) &bar0->mac_cfg;
val64 = readq(&bar0->mac_cfg);
val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
@@ -955,8 +1412,8 @@ static int init_nic(struct s2io_nic *nic)
writel((u32) (val64 >> 32), (add + 4));
val64 = readq(&bar0->mac_cfg);
- /*
- * Set the time value to be inserted in the pause frame
+ /*
+ * Set the time value to be inserted in the pause frame
* generated by xena.
*/
val64 = readq(&bar0->rmac_pause_cfg);
@@ -964,7 +1421,7 @@ static int init_nic(struct s2io_nic *nic)
val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
writeq(val64, &bar0->rmac_pause_cfg);
- /*
+ /*
* Set the Threshold Limit for Generating the pause frame
* If the amount of data in any Queue exceeds ratio of
* (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
@@ -988,25 +1445,54 @@ static int init_nic(struct s2io_nic *nic)
}
writeq(val64, &bar0->mc_pause_thresh_q4q7);
- /*
- * TxDMA will stop Read request if the number of read split has
+ /*
+ * TxDMA will stop Read request if the number of read split has
* exceeded the limit pointed by shared_splits
*/
val64 = readq(&bar0->pic_control);
val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
writeq(val64, &bar0->pic_control);
+ /*
+ * Programming the Herc to split every write transaction
+ * that does not start on an ADB to reduce disconnects.
+ */
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ val64 = WREQ_SPLIT_MASK_SET_MASK(255);
+ writeq(val64, &bar0->wreq_split_mask);
+ }
+
+ /* Setting Link stability period to 64 ms */
+ if (nic->device_type == XFRAME_II_DEVICE) {
+ val64 = MISC_LINK_STABILITY_PRD(3);
+ writeq(val64, &bar0->misc_control);
+ }
+
return SUCCESS;
}
+#define LINK_UP_DOWN_INTERRUPT 1
+#define MAC_RMAC_ERR_TIMER 2
-/**
- * en_dis_able_nic_intrs - Enable or Disable the interrupts
+#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
+#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
+#else
+int s2io_link_fault_indication(nic_t *nic)
+{
+ if (nic->device_type == XFRAME_II_DEVICE)
+ return LINK_UP_DOWN_INTERRUPT;
+ else
+ return MAC_RMAC_ERR_TIMER;
+}
+#endif
+
+/**
+ * en_dis_able_nic_intrs - Enable or Disable the interrupts
* @nic: device private variable,
* @mask: A mask indicating which Intr block must be modified and,
* @flag: A flag indicating whether to enable or disable the Intrs.
* Description: This function will either disable or enable the interrupts
- * depending on the flag argument. The mask argument can be used to
- * enable/disable any Intr block.
+ * depending on the flag argument. The mask argument can be used to
+ * enable/disable any Intr block.
* Return Value: NONE.
*/
@@ -1024,20 +1510,31 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
temp64 = readq(&bar0->general_int_mask);
temp64 &= ~((u64) val64);
writeq(temp64, &bar0->general_int_mask);
- /*
- * Disabled all PCIX, Flash, MDIO, IIC and GPIO
- * interrupts for now.
- * TODO
+ /*
+ * If Hercules adapter enable GPIO otherwise
+ * disabled all PCIX, Flash, MDIO, IIC and GPIO
+ * interrupts for now.
+ * TODO
*/
- writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
- /*
+ if (s2io_link_fault_indication(nic) ==
+ LINK_UP_DOWN_INTERRUPT ) {
+ temp64 = readq(&bar0->pic_int_mask);
+ temp64 &= ~((u64) PIC_INT_GPIO);
+ writeq(temp64, &bar0->pic_int_mask);
+ temp64 = readq(&bar0->gpio_int_mask);
+ temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
+ writeq(temp64, &bar0->gpio_int_mask);
+ } else {
+ writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
+ }
+ /*
* No MSI Support is available presently, so TTI and
* RTI interrupts are also disabled.
*/
} else if (flag == DISABLE_INTRS) {
- /*
- * Disable PIC Intrs in the general
- * intr mask register
+ /*
+ * Disable PIC Intrs in the general
+ * intr mask register
*/
writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
temp64 = readq(&bar0->general_int_mask);
@@ -1055,27 +1552,27 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
temp64 = readq(&bar0->general_int_mask);
temp64 &= ~((u64) val64);
writeq(temp64, &bar0->general_int_mask);
- /*
- * Keep all interrupts other than PFC interrupt
+ /*
+ * Keep all interrupts other than PFC interrupt
* and PCC interrupt disabled in DMA level.
*/
val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
TXDMA_PCC_INT_M);
writeq(val64, &bar0->txdma_int_mask);
- /*
- * Enable only the MISC error 1 interrupt in PFC block
+ /*
+ * Enable only the MISC error 1 interrupt in PFC block
*/
val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
writeq(val64, &bar0->pfc_err_mask);
- /*
- * Enable only the FB_ECC error interrupt in PCC block
+ /*
+ * Enable only the FB_ECC error interrupt in PCC block
*/
val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
writeq(val64, &bar0->pcc_err_mask);
} else if (flag == DISABLE_INTRS) {
- /*
- * Disable TxDMA Intrs in the general intr mask
- * register
+ /*
+ * Disable TxDMA Intrs in the general intr mask
+ * register
*/
writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
@@ -1093,15 +1590,15 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
temp64 = readq(&bar0->general_int_mask);
temp64 &= ~((u64) val64);
writeq(temp64, &bar0->general_int_mask);
- /*
- * All RxDMA block interrupts are disabled for now
- * TODO
+ /*
+ * All RxDMA block interrupts are disabled for now
+ * TODO
*/
writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
} else if (flag == DISABLE_INTRS) {
- /*
- * Disable RxDMA Intrs in the general intr mask
- * register
+ /*
+ * Disable RxDMA Intrs in the general intr mask
+ * register
*/
writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
temp64 = readq(&bar0->general_int_mask);
@@ -1118,22 +1615,13 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
temp64 = readq(&bar0->general_int_mask);
temp64 &= ~((u64) val64);
writeq(temp64, &bar0->general_int_mask);
- /*
- * All MAC block error interrupts are disabled for now
- * except the link status change interrupt.
+ /*
+ * All MAC block error interrupts are disabled for now
* TODO
*/
- val64 = MAC_INT_STATUS_RMAC_INT;
- temp64 = readq(&bar0->mac_int_mask);
- temp64 &= ~((u64) val64);
- writeq(temp64, &bar0->mac_int_mask);
-
- val64 = readq(&bar0->mac_rmac_err_mask);
- val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
- writeq(val64, &bar0->mac_rmac_err_mask);
} else if (flag == DISABLE_INTRS) {
- /*
- * Disable MAC Intrs in the general intr mask register
+ /*
+ * Disable MAC Intrs in the general intr mask register
*/
writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
writeq(DISABLE_ALL_INTRS,
@@ -1152,14 +1640,14 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
temp64 = readq(&bar0->general_int_mask);
temp64 &= ~((u64) val64);
writeq(temp64, &bar0->general_int_mask);
- /*
+ /*
* All XGXS block error interrupts are disabled for now
- * TODO
+ * TODO
*/
writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
} else if (flag == DISABLE_INTRS) {
- /*
- * Disable MC Intrs in the general intr mask register
+ /*
+ * Disable MC Intrs in the general intr mask register
*/
writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
temp64 = readq(&bar0->general_int_mask);
@@ -1175,11 +1663,11 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
temp64 = readq(&bar0->general_int_mask);
temp64 &= ~((u64) val64);
writeq(temp64, &bar0->general_int_mask);
- /*
- * All MC block error interrupts are disabled for now
- * TODO
+ /*
+ * Enable all MC Intrs.
*/
- writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
+ writeq(0x0, &bar0->mc_int_mask);
+ writeq(0x0, &bar0->mc_err_mask);
} else if (flag == DISABLE_INTRS) {
/*
* Disable MC Intrs in the general intr mask register
@@ -1199,14 +1687,14 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
temp64 = readq(&bar0->general_int_mask);
temp64 &= ~((u64) val64);
writeq(temp64, &bar0->general_int_mask);
- /*
+ /*
* Enable all the Tx side interrupts
- * writing 0 Enables all 64 TX interrupt levels
+ * writing 0 Enables all 64 TX interrupt levels
*/
writeq(0x0, &bar0->tx_traffic_mask);
} else if (flag == DISABLE_INTRS) {
- /*
- * Disable Tx Traffic Intrs in the general intr mask
+ /*
+ * Disable Tx Traffic Intrs in the general intr mask
* register.
*/
writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
@@ -1226,8 +1714,8 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
/* writing 0 Enables all 8 RX interrupt levels */
writeq(0x0, &bar0->rx_traffic_mask);
} else if (flag == DISABLE_INTRS) {
- /*
- * Disable Rx Traffic Intrs in the general intr mask
+ /*
+ * Disable Rx Traffic Intrs in the general intr mask
* register.
*/
writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
@@ -1238,24 +1726,66 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
}
}
-/**
- * verify_xena_quiescence - Checks whether the H/W is ready
+static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
+{
+ int ret = 0;
+
+ if (flag == FALSE) {
+ if ((!herc && (rev_id >= 4)) || herc) {
+ if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
+ ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+ ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
+ ret = 1;
+ }
+ }else {
+ if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
+ ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+ ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
+ ret = 1;
+ }
+ }
+ } else {
+ if ((!herc && (rev_id >= 4)) || herc) {
+ if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
+ ADAPTER_STATUS_RMAC_PCC_IDLE) &&
+ (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
+ ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+ ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
+ ret = 1;
+ }
+ } else {
+ if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
+ ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
+ (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
+ ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+ ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
+ ret = 1;
+ }
+ }
+ }
+
+ return ret;
+}
+/**
+ * verify_xena_quiescence - Checks whether the H/W is ready
* @val64 : Value read from adapter status register.
* @flag : indicates if the adapter enable bit was ever written once
* before.
* Description: Returns whether the H/W is ready to go or not. Depending
- * on whether adapter enable bit was written or not the comparison
+ * on whether adapter enable bit was written or not the comparison
* differs and the calling function passes the input argument flag to
* indicate this.
- * Return: 1 If xena is quiescence
+ * Return: 1 If xena is quiescence
* 0 If Xena is not quiescence
*/
-static int verify_xena_quiescence(u64 val64, int flag)
+static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
{
- int ret = 0;
+ int ret = 0, herc;
u64 tmp64 = ~((u64) val64);
+ int rev_id = get_xena_rev_id(sp->pdev);
+ herc = (sp->device_type == XFRAME_II_DEVICE);
if (!
(tmp64 &
(ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
@@ -1263,25 +1793,7 @@ static int verify_xena_quiescence(u64 val64, int flag)
ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
ADAPTER_STATUS_P_PLL_LOCK))) {
- if (flag == FALSE) {
- if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
- ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
- ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
-
- ret = 1;
-
- }
- } else {
- if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
- ADAPTER_STATUS_RMAC_PCC_IDLE) &&
- (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
- ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
- ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
-
- ret = 1;
-
- }
- }
+ ret = check_prc_pcc_state(val64, flag, rev_id, herc);
}
return ret;
@@ -1290,12 +1802,12 @@ static int verify_xena_quiescence(u64 val64, int flag)
/**
* fix_mac_address - Fix for Mac addr problem on Alpha platforms
* @sp: Pointer to device specifc structure
- * Description :
+ * Description :
* New procedure to clear mac address reading problems on Alpha platforms
*
*/
-static void fix_mac_address(nic_t * sp)
+void fix_mac_address(nic_t * sp)
{
XENA_dev_config_t __iomem *bar0 = sp->bar0;
u64 val64;
@@ -1303,20 +1815,21 @@ static void fix_mac_address(nic_t * sp)
while (fix_mac[i] != END_SIGN) {
writeq(fix_mac[i++], &bar0->gpio_control);
+ udelay(10);
val64 = readq(&bar0->gpio_control);
}
}
/**
- * start_nic - Turns the device on
+ * start_nic - Turns the device on
* @nic : device private variable.
- * Description:
- * This function actually turns the device on. Before this function is
- * called,all Registers are configured from their reset states
- * and shared memory is allocated but the NIC is still quiescent. On
+ * Description:
+ * This function actually turns the device on. Before this function is
+ * called,all Registers are configured from their reset states
+ * and shared memory is allocated but the NIC is still quiescent. On
* calling this function, the device interrupts are cleared and the NIC is
* literally switched on by writing into the adapter control register.
- * Return Value:
+ * Return Value:
* SUCCESS on success and -1 on failure.
*/
@@ -1325,8 +1838,8 @@ static int start_nic(struct s2io_nic *nic)
XENA_dev_config_t __iomem *bar0 = nic->bar0;
struct net_device *dev = nic->dev;
register u64 val64 = 0;
- u16 interruptible, i;
- u16 subid;
+ u16 interruptible;
+ u16 subid, i;
mac_info_t *mac_control;
struct config_param *config;
@@ -1335,10 +1848,12 @@ static int start_nic(struct s2io_nic *nic)
/* PRC Initialization and configuration */
for (i = 0; i < config->rx_ring_num; i++) {
- writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
+ writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
&bar0->prc_rxd0_n[i]);
val64 = readq(&bar0->prc_ctrl_n[i]);
+ if (nic->config.bimodal)
+ val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
#ifndef CONFIG_2BUFF_MODE
val64 |= PRC_CTRL_RC_ENABLED;
#else
@@ -1354,7 +1869,7 @@ static int start_nic(struct s2io_nic *nic)
writeq(val64, &bar0->rx_pa_cfg);
#endif
- /*
+ /*
* Enabling MC-RLDRAM. After enabling the device, we timeout
* for around 100ms, which is approximately the time required
* for the device to be ready for operation.
@@ -1364,27 +1879,27 @@ static int start_nic(struct s2io_nic *nic)
SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
val64 = readq(&bar0->mc_rldram_mrs);
- msleep(100); /* Delay by around 100 ms. */
+ msleep(100); /* Delay by around 100 ms. */
/* Enabling ECC Protection. */
val64 = readq(&bar0->adapter_control);
val64 &= ~ADAPTER_ECC_EN;
writeq(val64, &bar0->adapter_control);
- /*
- * Clearing any possible Link state change interrupts that
+ /*
+ * Clearing any possible Link state change interrupts that
* could have popped up just before Enabling the card.
*/
val64 = readq(&bar0->mac_rmac_err_reg);
if (val64)
writeq(val64, &bar0->mac_rmac_err_reg);
- /*
- * Verify if the device is ready to be enabled, if so enable
+ /*
+ * Verify if the device is ready to be enabled, if so enable
* it.
*/
val64 = readq(&bar0->adapter_status);
- if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
+ if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
(unsigned long long) val64);
@@ -1392,16 +1907,18 @@ static int start_nic(struct s2io_nic *nic)
}
/* Enable select interrupts */
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
- RX_MAC_INTR;
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+ interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+ interruptible |= TX_MAC_INTR | RX_MAC_INTR;
+
en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
- /*
+ /*
* With some switches, link might be already up at this point.
- * Because of this weird behavior, when we enable laser,
- * we may not get link. We need to handle this. We cannot
- * figure out which switch is misbehaving. So we are forced to
- * make a global change.
+ * Because of this weird behavior, when we enable laser,
+ * we may not get link. We need to handle this. We cannot
+ * figure out which switch is misbehaving. So we are forced to
+ * make a global change.
*/
/* Enabling Laser. */
@@ -1411,44 +1928,30 @@ static int start_nic(struct s2io_nic *nic)
/* SXE-002: Initialize link and activity LED */
subid = nic->pdev->subsystem_device;
- if ((subid & 0xFF) >= 0x07) {
+ if (((subid & 0xFF) >= 0x07) &&
+ (nic->device_type == XFRAME_I_DEVICE)) {
val64 = readq(&bar0->gpio_control);
val64 |= 0x0000800000000000ULL;
writeq(val64, &bar0->gpio_control);
val64 = 0x0411040400000000ULL;
- writeq(val64, (void __iomem *) bar0 + 0x2700);
+ writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
}
- /*
- * Don't see link state interrupts on certain switches, so
+ /*
+ * Don't see link state interrupts on certain switches, so
* directly scheduling a link state task from here.
*/
schedule_work(&nic->set_link_task);
- /*
- * Here we are performing soft reset on XGXS to
- * force link down. Since link is already up, we will get
- * link state change interrupt after this reset
- */
- SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
- val64 = readq(&bar0->dtx_control);
- udelay(50);
- SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
- val64 = readq(&bar0->dtx_control);
- udelay(50);
- SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
- val64 = readq(&bar0->dtx_control);
- udelay(50);
-
return SUCCESS;
}
-/**
- * free_tx_buffers - Free all queued Tx buffers
+/**
+ * free_tx_buffers - Free all queued Tx buffers
* @nic : device private variable.
- * Description:
+ * Description:
* Free all queued Tx buffers.
- * Return Value: void
+ * Return Value: void
*/
static void free_tx_buffers(struct s2io_nic *nic)
@@ -1459,39 +1962,61 @@ static void free_tx_buffers(struct s2io_nic *nic)
int i, j;
mac_info_t *mac_control;
struct config_param *config;
- int cnt = 0;
+ int cnt = 0, frg_cnt;
mac_control = &nic->mac_control;
config = &nic->config;
for (i = 0; i < config->tx_fifo_num; i++) {
for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
- txdp = (TxD_t *) nic->list_info[i][j].
+ txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
list_virt_addr;
skb =
(struct sk_buff *) ((unsigned long) txdp->
Host_Control);
if (skb == NULL) {
- memset(txdp, 0, sizeof(TxD_t));
+ memset(txdp, 0, sizeof(TxD_t) *
+ config->max_txds);
continue;
}
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ txdp->Buffer_Pointer,
+ skb->len - skb->data_len,
+ PCI_DMA_TODEVICE);
+ if (frg_cnt) {
+ TxD_t *temp;
+ temp = txdp;
+ txdp++;
+ for (j = 0; j < frg_cnt; j++, txdp++) {
+ skb_frag_t *frag =
+ &skb_shinfo(skb)->frags[j];
+ pci_unmap_page(nic->pdev,
+ (dma_addr_t)
+ txdp->
+ Buffer_Pointer,
+ frag->size,
+ PCI_DMA_TODEVICE);
+ }
+ txdp = temp;
+ }
dev_kfree_skb(skb);
- memset(txdp, 0, sizeof(TxD_t));
+ memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
cnt++;
}
DBG_PRINT(INTR_DBG,
"%s:forcibly freeing %d skbs on FIFO%d\n",
dev->name, cnt, i);
- mac_control->tx_curr_get_info[i].offset = 0;
- mac_control->tx_curr_put_info[i].offset = 0;
+ mac_control->fifos[i].tx_curr_get_info.offset = 0;
+ mac_control->fifos[i].tx_curr_put_info.offset = 0;
}
}
-/**
- * stop_nic - To stop the nic
+/**
+ * stop_nic - To stop the nic
* @nic ; device private variable.
- * Description:
- * This function does exactly the opposite of what the start_nic()
+ * Description:
+ * This function does exactly the opposite of what the start_nic()
* function does. This function is called to stop the device.
* Return Value:
* void.
@@ -1509,8 +2034,9 @@ static void stop_nic(struct s2io_nic *nic)
config = &nic->config;
/* Disable all interrupts */
- interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
- RX_MAC_INTR;
+ interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
+ interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+ interruptible |= TX_MAC_INTR | RX_MAC_INTR;
en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
/* Disable PRCs */
@@ -1521,11 +2047,11 @@ static void stop_nic(struct s2io_nic *nic)
}
}
-/**
- * fill_rx_buffers - Allocates the Rx side skbs
+/**
+ * fill_rx_buffers - Allocates the Rx side skbs
* @nic: device private variable
- * @ring_no: ring number
- * Description:
+ * @ring_no: ring number
+ * Description:
* The function allocates Rx side skbs and puts the physical
* address of these buffers into the RxD buffer pointers, so that the NIC
* can DMA the received frame into these locations.
@@ -1533,8 +2059,8 @@ static void stop_nic(struct s2io_nic *nic)
* 1. single buffer,
* 2. three buffer and
* 3. Five buffer modes.
- * Each mode defines how many fragments the received frame will be split
- * up into by the NIC. The frame is split into L3 header, L4 Header,
+ * Each mode defines how many fragments the received frame will be split
+ * up into by the NIC. The frame is split into L3 header, L4 Header,
* L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
* is split into 3 fragments. As of now only single buffer mode is
* supported.
@@ -1542,7 +2068,7 @@ static void stop_nic(struct s2io_nic *nic)
* SUCCESS on success or an appropriate -ve value on failure.
*/
-static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
+int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
{
struct net_device *dev = nic->dev;
struct sk_buff *skb;
@@ -1550,34 +2076,35 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
int off, off1, size, block_no, block_no1;
int offset, offset1;
u32 alloc_tab = 0;
- u32 alloc_cnt = nic->pkt_cnt[ring_no] -
- atomic_read(&nic->rx_bufs_left[ring_no]);
+ u32 alloc_cnt;
mac_info_t *mac_control;
struct config_param *config;
#ifdef CONFIG_2BUFF_MODE
RxD_t *rxdpnext;
int nextblk;
- unsigned long tmp;
+ u64 tmp;
buffAdd_t *ba;
dma_addr_t rxdpphys;
#endif
#ifndef CONFIG_S2IO_NAPI
unsigned long flags;
#endif
+ RxD_t *first_rxdp = NULL;
mac_control = &nic->mac_control;
config = &nic->config;
-
+ alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
+ atomic_read(&nic->rx_bufs_left[ring_no]);
size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
while (alloc_tab < alloc_cnt) {
- block_no = mac_control->rx_curr_put_info[ring_no].
+ block_no = mac_control->rings[ring_no].rx_curr_put_info.
block_index;
- block_no1 = mac_control->rx_curr_get_info[ring_no].
+ block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
block_index;
- off = mac_control->rx_curr_put_info[ring_no].offset;
- off1 = mac_control->rx_curr_get_info[ring_no].offset;
+ off = mac_control->rings[ring_no].rx_curr_put_info.offset;
+ off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
#ifndef CONFIG_2BUFF_MODE
offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
@@ -1586,7 +2113,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
#endif
- rxdp = nic->rx_blocks[ring_no][block_no].
+ rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
block_virt_addr + off;
if ((offset == offset1) && (rxdp->Host_Control)) {
DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
@@ -1595,15 +2122,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
}
#ifndef CONFIG_2BUFF_MODE
if (rxdp->Control_1 == END_OF_BLOCK) {
- mac_control->rx_curr_put_info[ring_no].
+ mac_control->rings[ring_no].rx_curr_put_info.
block_index++;
- mac_control->rx_curr_put_info[ring_no].
- block_index %= nic->block_count[ring_no];
- block_no = mac_control->rx_curr_put_info
- [ring_no].block_index;
+ mac_control->rings[ring_no].rx_curr_put_info.
+ block_index %= mac_control->rings[ring_no].block_count;
+ block_no = mac_control->rings[ring_no].rx_curr_put_info.
+ block_index;
off++;
off %= (MAX_RXDS_PER_BLOCK + 1);
- mac_control->rx_curr_put_info[ring_no].offset =
+ mac_control->rings[ring_no].rx_curr_put_info.offset =
off;
rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
@@ -1611,30 +2138,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
}
#ifndef CONFIG_S2IO_NAPI
spin_lock_irqsave(&nic->put_lock, flags);
- nic->put_pos[ring_no] =
+ mac_control->rings[ring_no].put_pos =
(block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
spin_unlock_irqrestore(&nic->put_lock, flags);
#endif
#else
if (rxdp->Host_Control == END_OF_BLOCK) {
- mac_control->rx_curr_put_info[ring_no].
+ mac_control->rings[ring_no].rx_curr_put_info.
block_index++;
- mac_control->rx_curr_put_info[ring_no].
- block_index %= nic->block_count[ring_no];
- block_no = mac_control->rx_curr_put_info
- [ring_no].block_index;
+ mac_control->rings[ring_no].rx_curr_put_info.block_index
+ %= mac_control->rings[ring_no].block_count;
+ block_no = mac_control->rings[ring_no].rx_curr_put_info
+ .block_index;
off = 0;
DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
dev->name, block_no,
(unsigned long long) rxdp->Control_1);
- mac_control->rx_curr_put_info[ring_no].offset =
+ mac_control->rings[ring_no].rx_curr_put_info.offset =
off;
- rxdp = nic->rx_blocks[ring_no][block_no].
+ rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
block_virt_addr;
}
#ifndef CONFIG_S2IO_NAPI
spin_lock_irqsave(&nic->put_lock, flags);
- nic->put_pos[ring_no] = (block_no *
+ mac_control->rings[ring_no].put_pos = (block_no *
(MAX_RXDS_PER_BLOCK + 1)) + off;
spin_unlock_irqrestore(&nic->put_lock, flags);
#endif
@@ -1646,27 +2173,27 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
if (rxdp->Control_2 & BIT(0))
#endif
{
- mac_control->rx_curr_put_info[ring_no].
+ mac_control->rings[ring_no].rx_curr_put_info.
offset = off;
goto end;
}
#ifdef CONFIG_2BUFF_MODE
- /*
- * RxDs Spanning cache lines will be replenished only
- * if the succeeding RxD is also owned by Host. It
- * will always be the ((8*i)+3) and ((8*i)+6)
- * descriptors for the 48 byte descriptor. The offending
+ /*
+ * RxDs Spanning cache lines will be replenished only
+ * if the succeeding RxD is also owned by Host. It
+ * will always be the ((8*i)+3) and ((8*i)+6)
+ * descriptors for the 48 byte descriptor. The offending
* decsriptor is of-course the 3rd descriptor.
*/
- rxdpphys = nic->rx_blocks[ring_no][block_no].
+ rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
block_dma_addr + (off * sizeof(RxD_t));
if (((u64) (rxdpphys)) % 128 > 80) {
- rxdpnext = nic->rx_blocks[ring_no][block_no].
+ rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
block_virt_addr + (off + 1);
if (rxdpnext->Host_Control == END_OF_BLOCK) {
nextblk = (block_no + 1) %
- (nic->block_count[ring_no]);
- rxdpnext = nic->rx_blocks[ring_no]
+ (mac_control->rings[ring_no].block_count);
+ rxdpnext = mac_control->rings[ring_no].rx_blocks
[nextblk].block_virt_addr;
}
if (rxdpnext->Control_2 & BIT(0))
@@ -1682,6 +2209,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
if (!skb) {
DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
+ if (first_rxdp) {
+ wmb();
+ first_rxdp->Control_1 |= RXD_OWN_XENA;
+ }
return -ENOMEM;
}
#ifndef CONFIG_2BUFF_MODE
@@ -1692,12 +2223,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
rxdp->Host_Control = (unsigned long) (skb);
- rxdp->Control_1 |= RXD_OWN_XENA;
+ if (alloc_tab & ((1 << rxsync_frequency) - 1))
+ rxdp->Control_1 |= RXD_OWN_XENA;
off++;
off %= (MAX_RXDS_PER_BLOCK + 1);
- mac_control->rx_curr_put_info[ring_no].offset = off;
+ mac_control->rings[ring_no].rx_curr_put_info.offset = off;
#else
- ba = &nic->ba[ring_no][block_no][off];
+ ba = &mac_control->rings[ring_no].ba[block_no][off];
skb_reserve(skb, BUF0_LEN);
tmp = ((unsigned long) skb->data & ALIGN_SIZE);
if (tmp)
@@ -1719,22 +2251,41 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
rxdp->Host_Control = (u64) ((unsigned long) (skb));
- rxdp->Control_1 |= RXD_OWN_XENA;
+ if (alloc_tab & ((1 << rxsync_frequency) - 1))
+ rxdp->Control_1 |= RXD_OWN_XENA;
off++;
- mac_control->rx_curr_put_info[ring_no].offset = off;
+ mac_control->rings[ring_no].rx_curr_put_info.offset = off;
#endif
+ rxdp->Control_2 |= SET_RXD_MARKER;
+
+ if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
+ if (first_rxdp) {
+ wmb();
+ first_rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+ first_rxdp = rxdp;
+ }
atomic_inc(&nic->rx_bufs_left[ring_no]);
alloc_tab++;
}
end:
+ /* Transfer ownership of first descriptor to adapter just before
+ * exiting. Before that, use memory barrier so that ownership
+ * and other fields are seen by adapter correctly.
+ */
+ if (first_rxdp) {
+ wmb();
+ first_rxdp->Control_1 |= RXD_OWN_XENA;
+ }
+
return SUCCESS;
}
/**
- * free_rx_buffers - Frees all Rx buffers
+ * free_rx_buffers - Frees all Rx buffers
* @sp: device private variable.
- * Description:
+ * Description:
* This function will free all Rx buffers allocated by host.
* Return Value:
* NONE.
@@ -1758,7 +2309,8 @@ static void free_rx_buffers(struct s2io_nic *sp)
for (i = 0; i < config->rx_ring_num; i++) {
for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
off = j % (MAX_RXDS_PER_BLOCK + 1);
- rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
+ rxdp = mac_control->rings[i].rx_blocks[blk].
+ block_virt_addr + off;
#ifndef CONFIG_2BUFF_MODE
if (rxdp->Control_1 == END_OF_BLOCK) {
@@ -1793,7 +2345,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
HEADER_SNAP_SIZE,
PCI_DMA_FROMDEVICE);
#else
- ba = &sp->ba[i][blk][off];
+ ba = &mac_control->rings[i].ba[blk][off];
pci_unmap_single(sp->pdev, (dma_addr_t)
rxdp->Buffer0_ptr,
BUF0_LEN,
@@ -1813,10 +2365,10 @@ static void free_rx_buffers(struct s2io_nic *sp)
}
memset(rxdp, 0, sizeof(RxD_t));
}
- mac_control->rx_curr_put_info[i].block_index = 0;
- mac_control->rx_curr_get_info[i].block_index = 0;
- mac_control->rx_curr_put_info[i].offset = 0;
- mac_control->rx_curr_get_info[i].offset = 0;
+ mac_control->rings[i].rx_curr_put_info.block_index = 0;
+ mac_control->rings[i].rx_curr_get_info.block_index = 0;
+ mac_control->rings[i].rx_curr_put_info.offset = 0;
+ mac_control->rings[i].rx_curr_get_info.offset = 0;
atomic_set(&sp->rx_bufs_left[i], 0);
DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
dev->name, buf_cnt, i);
@@ -1826,7 +2378,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
/**
* s2io_poll - Rx interrupt handler for NAPI support
* @dev : pointer to the device structure.
- * @budget : The number of packets that were budgeted to be processed
+ * @budget : The number of packets that were budgeted to be processed
* during one pass through the 'Poll" function.
* Description:
* Comes into picture only if NAPI support has been incorporated. It does
@@ -1836,160 +2388,36 @@ static void free_rx_buffers(struct s2io_nic *sp)
* 0 on success and 1 if there are No Rx packets to be processed.
*/
-#ifdef CONFIG_S2IO_NAPI
+#if defined(CONFIG_S2IO_NAPI)
static int s2io_poll(struct net_device *dev, int *budget)
{
nic_t *nic = dev->priv;
- XENA_dev_config_t __iomem *bar0 = nic->bar0;
- int pkts_to_process = *budget, pkt_cnt = 0;
- register u64 val64 = 0;
- rx_curr_get_info_t get_info, put_info;
- int i, get_block, put_block, get_offset, put_offset, ring_bufs;
-#ifndef CONFIG_2BUFF_MODE
- u16 val16, cksum;
-#endif
- struct sk_buff *skb;
- RxD_t *rxdp;
+ int pkt_cnt = 0, org_pkts_to_process;
mac_info_t *mac_control;
struct config_param *config;
-#ifdef CONFIG_2BUFF_MODE
- buffAdd_t *ba;
-#endif
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ u64 val64;
+ int i;
+ atomic_inc(&nic->isr_cnt);
mac_control = &nic->mac_control;
config = &nic->config;
- if (pkts_to_process > dev->quota)
- pkts_to_process = dev->quota;
+ nic->pkts_to_process = *budget;
+ if (nic->pkts_to_process > dev->quota)
+ nic->pkts_to_process = dev->quota;
+ org_pkts_to_process = nic->pkts_to_process;
val64 = readq(&bar0->rx_traffic_int);
writeq(val64, &bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) {
- get_info = mac_control->rx_curr_get_info[i];
- get_block = get_info.block_index;
- put_info = mac_control->rx_curr_put_info[i];
- put_block = put_info.block_index;
- ring_bufs = config->rx_cfg[i].num_rxd;
- rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
- get_info.offset;
-#ifndef CONFIG_2BUFF_MODE
- get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
- get_info.offset;
- put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
- put_info.offset;
- while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
- (((get_offset + 1) % ring_bufs) != put_offset)) {
- if (--pkts_to_process < 0) {
- goto no_rx;
- }
- if (rxdp->Control_1 == END_OF_BLOCK) {
- rxdp =
- (RxD_t *) ((unsigned long) rxdp->
- Control_2);
- get_info.offset++;
- get_info.offset %=
- (MAX_RXDS_PER_BLOCK + 1);
- get_block++;
- get_block %= nic->block_count[i];
- mac_control->rx_curr_get_info[i].
- offset = get_info.offset;
- mac_control->rx_curr_get_info[i].
- block_index = get_block;
- continue;
- }
- get_offset =
- (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
- get_info.offset;
- skb =
- (struct sk_buff *) ((unsigned long) rxdp->
- Host_Control);
- if (skb == NULL) {
- DBG_PRINT(ERR_DBG, "%s: The skb is ",
- dev->name);
- DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
- goto no_rx;
- }
- val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
- val16 = (u16) (val64 >> 48);
- cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
- pci_unmap_single(nic->pdev, (dma_addr_t)
- rxdp->Buffer0_ptr,
- dev->mtu +
- HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE +
- HEADER_SNAP_SIZE,
- PCI_DMA_FROMDEVICE);
- rx_osm_handler(nic, val16, rxdp, i);
- pkt_cnt++;
- get_info.offset++;
- get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
- rxdp =
- nic->rx_blocks[i][get_block].block_virt_addr +
- get_info.offset;
- mac_control->rx_curr_get_info[i].offset =
- get_info.offset;
+ rx_intr_handler(&mac_control->rings[i]);
+ pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
+ if (!nic->pkts_to_process) {
+ /* Quota for the current iteration has been met */
+ goto no_rx;
}
-#else
- get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
- get_info.offset;
- put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
- put_info.offset;
- while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
- !(rxdp->Control_2 & BIT(0))) &&
- (((get_offset + 1) % ring_bufs) != put_offset)) {
- if (--pkts_to_process < 0) {
- goto no_rx;
- }
- skb = (struct sk_buff *) ((unsigned long)
- rxdp->Host_Control);
- if (skb == NULL) {
- DBG_PRINT(ERR_DBG, "%s: The skb is ",
- dev->name);
- DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
- goto no_rx;
- }
-
- pci_unmap_single(nic->pdev, (dma_addr_t)
- rxdp->Buffer0_ptr,
- BUF0_LEN, PCI_DMA_FROMDEVICE);
- pci_unmap_single(nic->pdev, (dma_addr_t)
- rxdp->Buffer1_ptr,
- BUF1_LEN, PCI_DMA_FROMDEVICE);
- pci_unmap_single(nic->pdev, (dma_addr_t)
- rxdp->Buffer2_ptr,
- dev->mtu + BUF0_LEN + 4,
- PCI_DMA_FROMDEVICE);
- ba = &nic->ba[i][get_block][get_info.offset];
-
- rx_osm_handler(nic, rxdp, i, ba);
-
- get_info.offset++;
- mac_control->rx_curr_get_info[i].offset =
- get_info.offset;
- rxdp =
- nic->rx_blocks[i][get_block].block_virt_addr +
- get_info.offset;
-
- if (get_info.offset &&
- (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
- get_info.offset = 0;
- mac_control->rx_curr_get_info[i].
- offset = get_info.offset;
- get_block++;
- get_block %= nic->block_count[i];
- mac_control->rx_curr_get_info[i].
- block_index = get_block;
- rxdp =
- nic->rx_blocks[i][get_block].
- block_virt_addr;
- }
- get_offset =
- (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
- get_info.offset;
- pkt_cnt++;
- }
-#endif
}
if (!pkt_cnt)
pkt_cnt = 1;
@@ -2007,9 +2435,10 @@ static int s2io_poll(struct net_device *dev, int *budget)
}
/* Re enable the Rx interrupts. */
en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
+ atomic_dec(&nic->isr_cnt);
return 0;
- no_rx:
+no_rx:
dev->quota -= pkt_cnt;
*budget -= pkt_cnt;
@@ -2020,279 +2449,204 @@ static int s2io_poll(struct net_device *dev, int *budget)
break;
}
}
+ atomic_dec(&nic->isr_cnt);
return 1;
}
-#else
-/**
+#endif
+
+/**
* rx_intr_handler - Rx interrupt handler
* @nic: device private variable.
- * Description:
- * If the interrupt is because of a received frame or if the
+ * Description:
+ * If the interrupt is because of a received frame or if the
* receive ring contains fresh as yet un-processed frames,this function is
- * called. It picks out the RxD at which place the last Rx processing had
- * stopped and sends the skb to the OSM's Rx handler and then increments
+ * called. It picks out the RxD at which place the last Rx processing had
+ * stopped and sends the skb to the OSM's Rx handler and then increments
* the offset.
* Return Value:
* NONE.
*/
-
-static void rx_intr_handler(struct s2io_nic *nic)
+static void rx_intr_handler(ring_info_t *ring_data)
{
+ nic_t *nic = ring_data->nic;
struct net_device *dev = (struct net_device *) nic->dev;
- XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+ int get_block, get_offset, put_block, put_offset, ring_bufs;
rx_curr_get_info_t get_info, put_info;
RxD_t *rxdp;
struct sk_buff *skb;
-#ifndef CONFIG_2BUFF_MODE
- u16 val16, cksum;
-#endif
- register u64 val64 = 0;
- int get_block, get_offset, put_block, put_offset, ring_bufs;
- int i, pkt_cnt = 0;
- mac_info_t *mac_control;
- struct config_param *config;
-#ifdef CONFIG_2BUFF_MODE
- buffAdd_t *ba;
+#ifndef CONFIG_S2IO_NAPI
+ int pkt_cnt = 0;
#endif
+ spin_lock(&nic->rx_lock);
+ if (atomic_read(&nic->card_state) == CARD_DOWN) {
+ DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
+ __FUNCTION__, dev->name);
+ spin_unlock(&nic->rx_lock);
+ }
- mac_control = &nic->mac_control;
- config = &nic->config;
-
- /*
- * rx_traffic_int reg is an R1 register, hence we read and write back
- * the samevalue in the register to clear it.
- */
- val64 = readq(&bar0->rx_traffic_int);
- writeq(val64, &bar0->rx_traffic_int);
-
- for (i = 0; i < config->rx_ring_num; i++) {
- get_info = mac_control->rx_curr_get_info[i];
- get_block = get_info.block_index;
- put_info = mac_control->rx_curr_put_info[i];
- put_block = put_info.block_index;
- ring_bufs = config->rx_cfg[i].num_rxd;
- rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
- get_info.offset;
-#ifndef CONFIG_2BUFF_MODE
- get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info = ring_data->rx_curr_get_info;
+ get_block = get_info.block_index;
+ put_info = ring_data->rx_curr_put_info;
+ put_block = put_info.block_index;
+ ring_bufs = get_info.ring_len+1;
+ rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
get_info.offset;
- spin_lock(&nic->put_lock);
- put_offset = nic->put_pos[i];
- spin_unlock(&nic->put_lock);
- while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
- (((get_offset + 1) % ring_bufs) != put_offset)) {
- if (rxdp->Control_1 == END_OF_BLOCK) {
- rxdp = (RxD_t *) ((unsigned long)
- rxdp->Control_2);
- get_info.offset++;
- get_info.offset %=
- (MAX_RXDS_PER_BLOCK + 1);
- get_block++;
- get_block %= nic->block_count[i];
- mac_control->rx_curr_get_info[i].
- offset = get_info.offset;
- mac_control->rx_curr_get_info[i].
- block_index = get_block;
- continue;
- }
- get_offset =
- (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
- get_info.offset;
- skb = (struct sk_buff *) ((unsigned long)
- rxdp->Host_Control);
- if (skb == NULL) {
- DBG_PRINT(ERR_DBG, "%s: The skb is ",
- dev->name);
- DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
- return;
- }
- val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
- val16 = (u16) (val64 >> 48);
- cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
- pci_unmap_single(nic->pdev, (dma_addr_t)
- rxdp->Buffer0_ptr,
- dev->mtu +
- HEADER_ETHERNET_II_802_3_SIZE +
- HEADER_802_2_SIZE +
- HEADER_SNAP_SIZE,
- PCI_DMA_FROMDEVICE);
- rx_osm_handler(nic, val16, rxdp, i);
- get_info.offset++;
- get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
- rxdp =
- nic->rx_blocks[i][get_block].block_virt_addr +
- get_info.offset;
- mac_control->rx_curr_get_info[i].offset =
- get_info.offset;
- pkt_cnt++;
- if ((indicate_max_pkts)
- && (pkt_cnt > indicate_max_pkts))
- break;
+ get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_info.offset;
+#ifndef CONFIG_S2IO_NAPI
+ spin_lock(&nic->put_lock);
+ put_offset = ring_data->put_pos;
+ spin_unlock(&nic->put_lock);
+#else
+ put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ put_info.offset;
+#endif
+ while (RXD_IS_UP2DT(rxdp) &&
+ (((get_offset + 1) % ring_bufs) != put_offset)) {
+ skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
+ if (skb == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: The skb is ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
+ spin_unlock(&nic->rx_lock);
+ return;
}
+#ifndef CONFIG_2BUFF_MODE
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer0_ptr,
+ dev->mtu +
+ HEADER_ETHERNET_II_802_3_SIZE +
+ HEADER_802_2_SIZE +
+ HEADER_SNAP_SIZE,
+ PCI_DMA_FROMDEVICE);
#else
- get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer0_ptr,
+ BUF0_LEN, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer1_ptr,
+ BUF1_LEN, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ rxdp->Buffer2_ptr,
+ dev->mtu + BUF0_LEN + 4,
+ PCI_DMA_FROMDEVICE);
+#endif
+ rx_osm_handler(ring_data, rxdp);
+ get_info.offset++;
+ ring_data->rx_curr_get_info.offset =
get_info.offset;
- spin_lock(&nic->put_lock);
- put_offset = nic->put_pos[i];
- spin_unlock(&nic->put_lock);
- while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
- !(rxdp->Control_2 & BIT(0))) &&
- (((get_offset + 1) % ring_bufs) != put_offset)) {
- skb = (struct sk_buff *) ((unsigned long)
- rxdp->Host_Control);
- if (skb == NULL) {
- DBG_PRINT(ERR_DBG, "%s: The skb is ",
- dev->name);
- DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
- return;
- }
-
- pci_unmap_single(nic->pdev, (dma_addr_t)
- rxdp->Buffer0_ptr,
- BUF0_LEN, PCI_DMA_FROMDEVICE);
- pci_unmap_single(nic->pdev, (dma_addr_t)
- rxdp->Buffer1_ptr,
- BUF1_LEN, PCI_DMA_FROMDEVICE);
- pci_unmap_single(nic->pdev, (dma_addr_t)
- rxdp->Buffer2_ptr,
- dev->mtu + BUF0_LEN + 4,
- PCI_DMA_FROMDEVICE);
- ba = &nic->ba[i][get_block][get_info.offset];
-
- rx_osm_handler(nic, rxdp, i, ba);
-
- get_info.offset++;
- mac_control->rx_curr_get_info[i].offset =
- get_info.offset;
- rxdp =
- nic->rx_blocks[i][get_block].block_virt_addr +
- get_info.offset;
+ rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
+ get_info.offset;
+ if (get_info.offset &&
+ (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
+ get_info.offset = 0;
+ ring_data->rx_curr_get_info.offset
+ = get_info.offset;
+ get_block++;
+ get_block %= ring_data->block_count;
+ ring_data->rx_curr_get_info.block_index
+ = get_block;
+ rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
+ }
- if (get_info.offset &&
- (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
- get_info.offset = 0;
- mac_control->rx_curr_get_info[i].
- offset = get_info.offset;
- get_block++;
- get_block %= nic->block_count[i];
- mac_control->rx_curr_get_info[i].
- block_index = get_block;
- rxdp =
- nic->rx_blocks[i][get_block].
- block_virt_addr;
- }
- get_offset =
- (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+ get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
get_info.offset;
- pkt_cnt++;
- if ((indicate_max_pkts)
- && (pkt_cnt > indicate_max_pkts))
- break;
- }
-#endif
+#ifdef CONFIG_S2IO_NAPI
+ nic->pkts_to_process -= 1;
+ if (!nic->pkts_to_process)
+ break;
+#else
+ pkt_cnt++;
if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
break;
+#endif
}
+ spin_unlock(&nic->rx_lock);
}
-#endif
-/**
+
+/**
* tx_intr_handler - Transmit interrupt handler
* @nic : device private variable
- * Description:
- * If an interrupt was raised to indicate DMA complete of the
- * Tx packet, this function is called. It identifies the last TxD
- * whose buffer was freed and frees all skbs whose data have already
+ * Description:
+ * If an interrupt was raised to indicate DMA complete of the
+ * Tx packet, this function is called. It identifies the last TxD
+ * whose buffer was freed and frees all skbs whose data have already
* DMA'ed into the NICs internal memory.
* Return Value:
* NONE
*/
-static void tx_intr_handler(struct s2io_nic *nic)
+static void tx_intr_handler(fifo_info_t *fifo_data)
{
- XENA_dev_config_t __iomem *bar0 = nic->bar0;
+ nic_t *nic = fifo_data->nic;
struct net_device *dev = (struct net_device *) nic->dev;
tx_curr_get_info_t get_info, put_info;
struct sk_buff *skb;
TxD_t *txdlp;
- register u64 val64 = 0;
- int i;
u16 j, frg_cnt;
- mac_info_t *mac_control;
- struct config_param *config;
- mac_control = &nic->mac_control;
- config = &nic->config;
-
- /*
- * tx_traffic_int reg is an R1 register, hence we read and write
- * back the samevalue in the register to clear it.
- */
- val64 = readq(&bar0->tx_traffic_int);
- writeq(val64, &bar0->tx_traffic_int);
+ get_info = fifo_data->tx_curr_get_info;
+ put_info = fifo_data->tx_curr_put_info;
+ txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
+ list_virt_addr;
+ while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
+ (get_info.offset != put_info.offset) &&
+ (txdlp->Host_Control)) {
+ /* Check for TxD errors */
+ if (txdlp->Control_1 & TXD_T_CODE) {
+ unsigned long long err;
+ err = txdlp->Control_1 & TXD_T_CODE;
+ DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
+ err);
+ }
- for (i = 0; i < config->tx_fifo_num; i++) {
- get_info = mac_control->tx_curr_get_info[i];
- put_info = mac_control->tx_curr_put_info[i];
- txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
- list_virt_addr;
- while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
- (get_info.offset != put_info.offset) &&
- (txdlp->Host_Control)) {
- /* Check for TxD errors */
- if (txdlp->Control_1 & TXD_T_CODE) {
- unsigned long long err;
- err = txdlp->Control_1 & TXD_T_CODE;
- DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
- err);
- }
+ skb = (struct sk_buff *) ((unsigned long)
+ txdlp->Host_Control);
+ if (skb == NULL) {
+ DBG_PRINT(ERR_DBG, "%s: Null skb ",
+ __FUNCTION__);
+ DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
+ return;
+ }
- skb = (struct sk_buff *) ((unsigned long)
- txdlp->Host_Control);
- if (skb == NULL) {
- DBG_PRINT(ERR_DBG, "%s: Null skb ",
- dev->name);
- DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
- return;
+ frg_cnt = skb_shinfo(skb)->nr_frags;
+ nic->tx_pkt_count++;
+
+ pci_unmap_single(nic->pdev, (dma_addr_t)
+ txdlp->Buffer_Pointer,
+ skb->len - skb->data_len,
+ PCI_DMA_TODEVICE);
+ if (frg_cnt) {
+ TxD_t *temp;
+ temp = txdlp;
+ txdlp++;
+ for (j = 0; j < frg_cnt; j++, txdlp++) {
+ skb_frag_t *frag =
+ &skb_shinfo(skb)->frags[j];
+ if (!txdlp->Buffer_Pointer)
+ break;
+ pci_unmap_page(nic->pdev,
+ (dma_addr_t)
+ txdlp->
+ Buffer_Pointer,
+ frag->size,
+ PCI_DMA_TODEVICE);
}
- nic->tx_pkt_count++;
-
- frg_cnt = skb_shinfo(skb)->nr_frags;
-
- /* For unfragmented skb */
- pci_unmap_single(nic->pdev, (dma_addr_t)
- txdlp->Buffer_Pointer,
- skb->len - skb->data_len,
- PCI_DMA_TODEVICE);
- if (frg_cnt) {
- TxD_t *temp = txdlp;
- txdlp++;
- for (j = 0; j < frg_cnt; j++, txdlp++) {
- skb_frag_t *frag =
- &skb_shinfo(skb)->frags[j];
- pci_unmap_page(nic->pdev,
- (dma_addr_t)
- txdlp->
- Buffer_Pointer,
- frag->size,
- PCI_DMA_TODEVICE);
- }
- txdlp = temp;
- }
- memset(txdlp, 0,
- (sizeof(TxD_t) * config->max_txds));
-
- /* Updating the statistics block */
- nic->stats.tx_packets++;
- nic->stats.tx_bytes += skb->len;
- dev_kfree_skb_irq(skb);
-
- get_info.offset++;
- get_info.offset %= get_info.fifo_len + 1;
- txdlp = (TxD_t *) nic->list_info[i]
- [get_info.offset].list_virt_addr;
- mac_control->tx_curr_get_info[i].offset =
- get_info.offset;
+ txdlp = temp;
}
+ memset(txdlp, 0,
+ (sizeof(TxD_t) * fifo_data->max_txds));
+
+ /* Updating the statistics block */
+ nic->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+
+ get_info.offset++;
+ get_info.offset %= get_info.fifo_len + 1;
+ txdlp = (TxD_t *) fifo_data->list_info
+ [get_info.offset].list_virt_addr;
+ fifo_data->tx_curr_get_info.offset =
+ get_info.offset;
}
spin_lock(&nic->tx_lock);
@@ -2301,13 +2655,13 @@ static void tx_intr_handler(struct s2io_nic *nic)
spin_unlock(&nic->tx_lock);
}
-/**
+/**
* alarm_intr_handler - Alarm Interrrupt handler
* @nic: device private variable
- * Description: If the interrupt was neither because of Rx packet or Tx
+ * Description: If the interrupt was neither because of Rx packet or Tx
* complete, this function is called. If the interrupt was to indicate
- * a loss of link, the OSM link status handler is invoked for any other
- * alarm interrupt the block that raised the interrupt is displayed
+ * a loss of link, the OSM link status handler is invoked for any other
+ * alarm interrupt the block that raised the interrupt is displayed
* and a H/W reset is issued.
* Return Value:
* NONE
@@ -2320,10 +2674,32 @@ static void alarm_intr_handler(struct s2io_nic *nic)
register u64 val64 = 0, err_reg = 0;
/* Handling link status change error Intr */
- err_reg = readq(&bar0->mac_rmac_err_reg);
- writeq(err_reg, &bar0->mac_rmac_err_reg);
- if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
- schedule_work(&nic->set_link_task);
+ if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+ err_reg = readq(&bar0->mac_rmac_err_reg);
+ writeq(err_reg, &bar0->mac_rmac_err_reg);
+ if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
+ schedule_work(&nic->set_link_task);
+ }
+ }
+
+ /* Handling Ecc errors */
+ val64 = readq(&bar0->mc_err_reg);
+ writeq(val64, &bar0->mc_err_reg);
+ if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
+ if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
+ nic->mac_control.stats_info->sw_stat.
+ double_ecc_errs++;
+ DBG_PRINT(ERR_DBG, "%s: Device indicates ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "double ECC error!!\n");
+ if (nic->device_type != XFRAME_II_DEVICE) {
+ netif_stop_queue(dev);
+ schedule_work(&nic->rst_timer_task);
+ }
+ } else {
+ nic->mac_control.stats_info->sw_stat.
+ single_ecc_errs++;
+ }
}
/* In case of a serious error, the device will be Reset. */
@@ -2338,7 +2714,7 @@ static void alarm_intr_handler(struct s2io_nic *nic)
/*
* Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
* Error occurs, the adapter will be recycled by disabling the
- * adapter enable bit and enabling it again after the device
+ * adapter enable bit and enabling it again after the device
* becomes Quiescent.
*/
val64 = readq(&bar0->pcc_err_reg);
@@ -2354,18 +2730,18 @@ static void alarm_intr_handler(struct s2io_nic *nic)
/* Other type of interrupts are not being handled now, TODO */
}
-/**
+/**
* wait_for_cmd_complete - waits for a command to complete.
- * @sp : private member of the device structure, which is a pointer to the
+ * @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * Description: Function that waits for a command to Write into RMAC
- * ADDR DATA registers to be completed and returns either success or
- * error depending on whether the command was complete or not.
+ * Description: Function that waits for a command to Write into RMAC
+ * ADDR DATA registers to be completed and returns either success or
+ * error depending on whether the command was complete or not.
* Return value:
* SUCCESS on success and FAILURE on failure.
*/
-static int wait_for_cmd_complete(nic_t * sp)
+int wait_for_cmd_complete(nic_t * sp)
{
XENA_dev_config_t __iomem *bar0 = sp->bar0;
int ret = FAILURE, cnt = 0;
@@ -2385,29 +2761,32 @@ static int wait_for_cmd_complete(nic_t * sp)
return ret;
}
-/**
- * s2io_reset - Resets the card.
+/**
+ * s2io_reset - Resets the card.
* @sp : private member of the device structure.
* Description: Function to Reset the card. This function then also
- * restores the previously saved PCI configuration space registers as
+ * restores the previously saved PCI configuration space registers as
* the card reset also resets the configuration space.
* Return value:
* void.
*/
-static void s2io_reset(nic_t * sp)
+void s2io_reset(nic_t * sp)
{
XENA_dev_config_t __iomem *bar0 = sp->bar0;
u64 val64;
- u16 subid;
+ u16 subid, pci_cmd;
+
+ /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
+ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
val64 = SW_RESET_ALL;
writeq(val64, &bar0->sw_reset);
- /*
- * At this stage, if the PCI write is indeed completed, the
- * card is reset and so is the PCI Config space of the device.
- * So a read cannot be issued at this stage on any of the
+ /*
+ * At this stage, if the PCI write is indeed completed, the
+ * card is reset and so is the PCI Config space of the device.
+ * So a read cannot be issued at this stage on any of the
* registers to ensure the write into "sw_reset" register
* has gone through.
* Question: Is there any system call that will explicitly force
@@ -2418,42 +2797,72 @@ static void s2io_reset(nic_t * sp)
*/
msleep(250);
- /* Restore the PCI state saved during initializarion. */
+ /* Restore the PCI state saved during initialization. */
pci_restore_state(sp->pdev);
+ pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+ pci_cmd);
s2io_init_pci(sp);
msleep(250);
+ /* Set swapper to enable I/O register access */
+ s2io_set_swapper(sp);
+
+ /* Clear certain PCI/PCI-X fields after reset */
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ /* Clear parity err detect bit */
+ pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
+
+ /* Clearing PCIX Ecc status register */
+ pci_write_config_dword(sp->pdev, 0x68, 0x7C);
+
+ /* Clearing PCI_STATUS error reflected here */
+ writeq(BIT(62), &bar0->txpic_int_reg);
+ }
+
+ /* Reset device statistics maintained by OS */
+ memset(&sp->stats, 0, sizeof (struct net_device_stats));
+
/* SXE-002: Configure link and activity LED to turn it off */
subid = sp->pdev->subsystem_device;
- if ((subid & 0xFF) >= 0x07) {
+ if (((subid & 0xFF) >= 0x07) &&
+ (sp->device_type == XFRAME_I_DEVICE)) {
val64 = readq(&bar0->gpio_control);
val64 |= 0x0000800000000000ULL;
writeq(val64, &bar0->gpio_control);
val64 = 0x0411040400000000ULL;
- writeq(val64, (void __iomem *) bar0 + 0x2700);
+ writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
+ }
+
+ /*
+ * Clear spurious ECC interrupts that would have occured on
+ * XFRAME II cards after reset.
+ */
+ if (sp->device_type == XFRAME_II_DEVICE) {
+ val64 = readq(&bar0->pcc_err_reg);
+ writeq(val64, &bar0->pcc_err_reg);
}
sp->device_enabled_once = FALSE;
}
/**
- * s2io_set_swapper - to set the swapper controle on the card
- * @sp : private member of the device structure,
+ * s2io_set_swapper - to set the swapper controle on the card
+ * @sp : private member of the device structure,
* pointer to the s2io_nic structure.
- * Description: Function to set the swapper control on the card
+ * Description: Function to set the swapper control on the card
* correctly depending on the 'endianness' of the system.
* Return value:
* SUCCESS on success and FAILURE on failure.
*/
-static int s2io_set_swapper(nic_t * sp)
+int s2io_set_swapper(nic_t * sp)
{
struct net_device *dev = sp->dev;
XENA_dev_config_t __iomem *bar0 = sp->bar0;
u64 val64, valt, valr;
- /*
+ /*
* Set proper endian settings and verify the same by reading
* the PIF Feed-back register.
*/
@@ -2505,8 +2914,9 @@ static int s2io_set_swapper(nic_t * sp)
i++;
}
if(i == 4) {
+ unsigned long long x = val64;
DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
- DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
+ DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
return FAILURE;
}
}
@@ -2514,8 +2924,8 @@ static int s2io_set_swapper(nic_t * sp)
val64 &= 0xFFFF000000000000ULL;
#ifdef __BIG_ENDIAN
- /*
- * The device by default set to a big endian format, so a
+ /*
+ * The device by default set to a big endian format, so a
* big endian driver need not set anything.
*/
val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2531,9 +2941,9 @@ static int s2io_set_swapper(nic_t * sp)
SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
writeq(val64, &bar0->swapper_ctrl);
#else
- /*
+ /*
* Initially we enable all bits to make it accessible by the
- * driver, then we selectively enable only those bits that
+ * driver, then we selectively enable only those bits that
* we want to set.
*/
val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2555,8 +2965,8 @@ static int s2io_set_swapper(nic_t * sp)
#endif
val64 = readq(&bar0->swapper_ctrl);
- /*
- * Verifying if endian settings are accurate by reading a
+ /*
+ * Verifying if endian settings are accurate by reading a
* feedback register.
*/
val64 = readq(&bar0->pif_rd_swapper_fb);
@@ -2576,55 +2986,63 @@ static int s2io_set_swapper(nic_t * sp)
* Functions defined below concern the OS part of the driver *
* ********************************************************* */
-/**
+/**
* s2io_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
* This function is the open entry point of the driver. It mainly calls a
* function to allocate Rx buffers and inserts them into the buffer
- * descriptors and then enables the Rx part of the NIC.
+ * descriptors and then enables the Rx part of the NIC.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int s2io_open(struct net_device *dev)
+int s2io_open(struct net_device *dev)
{
nic_t *sp = dev->priv;
int err = 0;
- /*
- * Make sure you have link off by default every time
+ /*
+ * Make sure you have link off by default every time
* Nic is initialized
*/
netif_carrier_off(dev);
- sp->last_link_state = LINK_DOWN;
+ sp->last_link_state = 0;
/* Initialize H/W and enable interrupts */
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
- return -ENODEV;
+ err = -ENODEV;
+ goto hw_init_failed;
}
/* After proper initialization of H/W, register ISR */
- err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
+ err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
sp->name, dev);
if (err) {
- s2io_reset(sp);
DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
dev->name);
- return err;
+ goto isr_registration_failed;
}
if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
- s2io_reset(sp);
- return -ENODEV;
+ err = -ENODEV;
+ goto setting_mac_address_failed;
}
netif_start_queue(dev);
return 0;
+
+setting_mac_address_failed:
+ free_irq(sp->pdev->irq, dev);
+isr_registration_failed:
+ del_timer_sync(&sp->alarm_timer);
+ s2io_reset(sp);
+hw_init_failed:
+ return err;
}
/**
@@ -2640,16 +3058,15 @@ static int s2io_open(struct net_device *dev)
* file on failure.
*/
-static int s2io_close(struct net_device *dev)
+int s2io_close(struct net_device *dev)
{
nic_t *sp = dev->priv;
-
flush_scheduled_work();
netif_stop_queue(dev);
/* Reset card, kill tasklet and free Tx and Rx buffers. */
s2io_card_down(sp);
- free_irq(dev->irq, dev);
+ free_irq(sp->pdev->irq, dev);
sp->device_close_flag = TRUE; /* Device is shut down. */
return 0;
}
@@ -2667,7 +3084,7 @@ static int s2io_close(struct net_device *dev)
* 0 on success & 1 on failure.
*/
-static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
+int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
{
nic_t *sp = dev->priv;
u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
@@ -2678,29 +3095,39 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef NETIF_F_TSO
int mss;
#endif
+ u16 vlan_tag = 0;
+ int vlan_priority = 0;
mac_info_t *mac_control;
struct config_param *config;
- XENA_dev_config_t __iomem *bar0 = sp->bar0;
mac_control = &sp->mac_control;
config = &sp->config;
- DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
+ DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
spin_lock_irqsave(&sp->tx_lock, flags);
-
if (atomic_read(&sp->card_state) == CARD_DOWN) {
- DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
+ DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
dev->name);
spin_unlock_irqrestore(&sp->tx_lock, flags);
- return 1;
+ dev_kfree_skb(skb);
+ return 0;
}
queue = 0;
- put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
- get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
- txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
- queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
+ /* Get Fifo number to Transmit based on vlan priority */
+ if (sp->vlgrp && vlan_tx_tag_present(skb)) {
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_priority = vlan_tag >> 13;
+ queue = config->fifo_mapping[vlan_priority];
+ }
+
+ put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
+ get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
+ txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
+ list_virt_addr;
+
+ queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
/* Avoid "put" pointer going beyond "get" pointer */
if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
@@ -2709,6 +3136,15 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&sp->tx_lock, flags);
return 0;
}
+
+ /* A buffer with no data will be dropped */
+ if (!skb->len) {
+ DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
+ dev_kfree_skb(skb);
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+ return 0;
+ }
+
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->tso_size;
if (mss) {
@@ -2720,9 +3156,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
frg_cnt = skb_shinfo(skb)->nr_frags;
frg_len = skb->len - skb->data_len;
- txdp->Host_Control = (unsigned long) skb;
txdp->Buffer_Pointer = pci_map_single
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
+ txdp->Host_Control = (unsigned long) skb;
if (skb->ip_summed == CHECKSUM_HW) {
txdp->Control_2 |=
(TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@@ -2731,6 +3167,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_2 |= config->tx_intr_type;
+ if (sp->vlgrp && vlan_tx_tag_present(skb)) {
+ txdp->Control_2 |= TXD_VLAN_ENABLE;
+ txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
+ }
+
txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
TXD_GATHER_CODE_FIRST);
txdp->Control_1 |= TXD_LIST_OWN_XENA;
@@ -2738,6 +3179,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
/* For fragmented SKB. */
for (i = 0; i < frg_cnt; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ /* A '0' length fragment will be ignored */
+ if (!frag->size)
+ continue;
txdp++;
txdp->Buffer_Pointer = (u64) pci_map_page
(sp->pdev, frag->page, frag->page_offset,
@@ -2747,23 +3191,23 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_1 |= TXD_GATHER_CODE_LAST;
tx_fifo = mac_control->tx_FIFO_start[queue];
- val64 = sp->list_info[queue][put_off].list_phy_addr;
+ val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
writeq(val64, &tx_fifo->TxDL_Pointer);
val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
TX_FIFO_LAST_LIST);
+
#ifdef NETIF_F_TSO
if (mss)
val64 |= TX_FIFO_SPECIAL_FUNC;
#endif
writeq(val64, &tx_fifo->List_Control);
- /* Perform a PCI read to flush previous writes */
- val64 = readq(&bar0->general_int_status);
+ mmiowb();
put_off++;
- put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
- mac_control->tx_curr_put_info[queue].offset = put_off;
+ put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
+ mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
/* Avoid "put" pointer going beyond "get" pointer */
if (((put_off + 1) % queue_len) == get_off) {
@@ -2779,18 +3223,74 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static void
+s2io_alarm_handle(unsigned long data)
+{
+ nic_t *sp = (nic_t *)data;
+
+ alarm_intr_handler(sp);
+ mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
+}
+
+static void s2io_txpic_intr_handle(nic_t *sp)
+{
+ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
+ u64 val64;
+
+ val64 = readq(&bar0->pic_int_status);
+ if (val64 & PIC_INT_GPIO) {
+ val64 = readq(&bar0->gpio_int_reg);
+ if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
+ (val64 & GPIO_INT_REG_LINK_UP)) {
+ val64 |= GPIO_INT_REG_LINK_DOWN;
+ val64 |= GPIO_INT_REG_LINK_UP;
+ writeq(val64, &bar0->gpio_int_reg);
+ goto masking;
+ }
+
+ if (((sp->last_link_state == LINK_UP) &&
+ (val64 & GPIO_INT_REG_LINK_DOWN)) ||
+ ((sp->last_link_state == LINK_DOWN) &&
+ (val64 & GPIO_INT_REG_LINK_UP))) {
+ val64 = readq(&bar0->gpio_int_mask);
+ val64 |= GPIO_INT_MASK_LINK_DOWN;
+ val64 |= GPIO_INT_MASK_LINK_UP;
+ writeq(val64, &bar0->gpio_int_mask);
+ s2io_set_link((unsigned long)sp);
+ }
+masking:
+ if (sp->last_link_state == LINK_UP) {
+ /*enable down interrupt */
+ val64 = readq(&bar0->gpio_int_mask);
+ /* unmasks link down intr */
+ val64 &= ~GPIO_INT_MASK_LINK_DOWN;
+ /* masks link up intr */
+ val64 |= GPIO_INT_MASK_LINK_UP;
+ writeq(val64, &bar0->gpio_int_mask);
+ } else {
+ /*enable UP Interrupt */
+ val64 = readq(&bar0->gpio_int_mask);
+ /* unmasks link up interrupt */
+ val64 &= ~GPIO_INT_MASK_LINK_UP;
+ /* masks link down interrupt */
+ val64 |= GPIO_INT_MASK_LINK_DOWN;
+ writeq(val64, &bar0->gpio_int_mask);
+ }
+ }
+}
+
/**
* s2io_isr - ISR handler of the device .
* @irq: the irq of the device.
* @dev_id: a void pointer to the dev structure of the NIC.
* @pt_regs: pointer to the registers pushed on the stack.
- * Description: This function is the ISR handler of the device. It
- * identifies the reason for the interrupt and calls the relevant
- * service routines. As a contongency measure, this ISR allocates the
+ * Description: This function is the ISR handler of the device. It
+ * identifies the reason for the interrupt and calls the relevant
+ * service routines. As a contongency measure, this ISR allocates the
* recv buffers, if their numbers are below the panic value which is
* presently set to 25% of the original number of rcv buffers allocated.
* Return value:
- * IRQ_HANDLED: will be returned if IRQ was handled by this routine
+ * IRQ_HANDLED: will be returned if IRQ was handled by this routine
* IRQ_NONE: will be returned if interrupt is not from our device
*/
static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2798,40 +3298,31 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
struct net_device *dev = (struct net_device *) dev_id;
nic_t *sp = dev->priv;
XENA_dev_config_t __iomem *bar0 = sp->bar0;
-#ifndef CONFIG_S2IO_NAPI
- int i, ret;
-#endif
- u64 reason = 0;
+ int i;
+ u64 reason = 0, val64;
mac_info_t *mac_control;
struct config_param *config;
+ atomic_inc(&sp->isr_cnt);
mac_control = &sp->mac_control;
config = &sp->config;
- /*
+ /*
* Identify the cause for interrupt and call the appropriate
* interrupt handler. Causes for the interrupt could be;
* 1. Rx of packet.
* 2. Tx complete.
* 3. Link down.
- * 4. Error in any functional blocks of the NIC.
+ * 4. Error in any functional blocks of the NIC.
*/
reason = readq(&bar0->general_int_status);
if (!reason) {
/* The interrupt was not raised by Xena. */
+ atomic_dec(&sp->isr_cnt);
return IRQ_NONE;
}
- /* If Intr is because of Tx Traffic */
- if (reason & GEN_INTR_TXTRAFFIC) {
- tx_intr_handler(sp);
- }
-
- /* If Intr is because of an error */
- if (reason & (GEN_ERROR_INTR))
- alarm_intr_handler(sp);
-
#ifdef CONFIG_S2IO_NAPI
if (reason & GEN_INTR_RXTRAFFIC) {
if (netif_rx_schedule_prep(dev)) {
@@ -2843,17 +3334,43 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
#else
/* If Intr is because of Rx Traffic */
if (reason & GEN_INTR_RXTRAFFIC) {
- rx_intr_handler(sp);
+ /*
+ * rx_traffic_int reg is an R1 register, writing all 1's
+ * will ensure that the actual interrupt causing bit get's
+ * cleared and hence a read can be avoided.
+ */
+ val64 = 0xFFFFFFFFFFFFFFFFULL;
+ writeq(val64, &bar0->rx_traffic_int);
+ for (i = 0; i < config->rx_ring_num; i++) {
+ rx_intr_handler(&mac_control->rings[i]);
+ }
}
#endif
- /*
- * If the Rx buffer count is below the panic threshold then
- * reallocate the buffers from the interrupt handler itself,
+ /* If Intr is because of Tx Traffic */
+ if (reason & GEN_INTR_TXTRAFFIC) {
+ /*
+ * tx_traffic_int reg is an R1 register, writing all 1's
+ * will ensure that the actual interrupt causing bit get's
+ * cleared and hence a read can be avoided.
+ */
+ val64 = 0xFFFFFFFFFFFFFFFFULL;
+ writeq(val64, &bar0->tx_traffic_int);
+
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&mac_control->fifos[i]);
+ }
+
+ if (reason & GEN_INTR_TXPIC)
+ s2io_txpic_intr_handle(sp);
+ /*
+ * If the Rx buffer count is below the panic threshold then
+ * reallocate the buffers from the interrupt handler itself,
* else schedule a tasklet to reallocate the buffers.
*/
#ifndef CONFIG_S2IO_NAPI
for (i = 0; i < config->rx_ring_num; i++) {
+ int ret;
int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
int level = rx_buffer_level(sp, rxb_size, i);
@@ -2865,6 +3382,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
dev->name);
DBG_PRINT(ERR_DBG, " in ISR!!\n");
clear_bit(0, (&sp->tasklet_status));
+ atomic_dec(&sp->isr_cnt);
return IRQ_HANDLED;
}
clear_bit(0, (&sp->tasklet_status));
@@ -2874,33 +3392,69 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
}
#endif
+ atomic_dec(&sp->isr_cnt);
return IRQ_HANDLED;
}
/**
- * s2io_get_stats - Updates the device statistics structure.
+ * s2io_updt_stats -
+ */
+static void s2io_updt_stats(nic_t *sp)
+{
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64;
+ int cnt = 0;
+
+ if (atomic_read(&sp->card_state) == CARD_UP) {
+ /* Apprx 30us on a 133 MHz bus */
+ val64 = SET_UPDT_CLICKS(10) |
+ STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
+ writeq(val64, &bar0->stat_cfg);
+ do {
+ udelay(100);
+ val64 = readq(&bar0->stat_cfg);
+ if (!(val64 & BIT(0)))
+ break;
+ cnt++;
+ if (cnt == 5)
+ break; /* Updt failed */
+ } while(1);
+ }
+}
+
+/**
+ * s2io_get_stats - Updates the device statistics structure.
* @dev : pointer to the device structure.
* Description:
- * This function updates the device statistics structure in the s2io_nic
+ * This function updates the device statistics structure in the s2io_nic
* structure and returns a pointer to the same.
* Return value:
* pointer to the updated net_device_stats structure.
*/
-static struct net_device_stats *s2io_get_stats(struct net_device *dev)
+struct net_device_stats *s2io_get_stats(struct net_device *dev)
{
nic_t *sp = dev->priv;
mac_info_t *mac_control;
struct config_param *config;
+
mac_control = &sp->mac_control;
config = &sp->config;
- sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
- sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
- sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
+ /* Configure Stats for immediate updt */
+ s2io_updt_stats(sp);
+
+ sp->stats.tx_packets =
+ le32_to_cpu(mac_control->stats_info->tmac_frms);
+ sp->stats.tx_errors =
+ le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
+ sp->stats.rx_errors =
+ le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
+ sp->stats.multicast =
+ le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
sp->stats.rx_length_errors =
- mac_control->stats_info->rmac_long_frms;
+ le32_to_cpu(mac_control->stats_info->rmac_long_frms);
return (&sp->stats);
}
@@ -2909,8 +3463,8 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
* s2io_set_multicast - entry point for multicast address enable/disable.
* @dev : pointer to the device structure
* Description:
- * This function is a driver entry point which gets called by the kernel
- * whenever multicast addresses must be enabled/disabled. This also gets
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled. This also gets
* called to set/reset promiscuous mode. Depending on the deivce flag, we
* determine, if multicast address must be enabled or if promiscuous mode
* is to be disabled etc.
@@ -2948,6 +3502,8 @@ static void s2io_set_multicast(struct net_device *dev)
/* Disable all Multicast addresses */
writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
&bar0->rmac_addr_data0_mem);
+ writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
+ &bar0->rmac_addr_data1_mem);
val64 = RMAC_ADDR_CMD_MEM_WE |
RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
@@ -3010,7 +3566,7 @@ static void s2io_set_multicast(struct net_device *dev)
writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
&bar0->rmac_addr_data0_mem);
writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
- &bar0->rmac_addr_data1_mem);
+ &bar0->rmac_addr_data1_mem);
val64 = RMAC_ADDR_CMD_MEM_WE |
RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET
@@ -3039,8 +3595,7 @@ static void s2io_set_multicast(struct net_device *dev)
writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
&bar0->rmac_addr_data0_mem);
writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
- &bar0->rmac_addr_data1_mem);
-
+ &bar0->rmac_addr_data1_mem);
val64 = RMAC_ADDR_CMD_MEM_WE |
RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET
@@ -3059,12 +3614,12 @@ static void s2io_set_multicast(struct net_device *dev)
}
/**
- * s2io_set_mac_addr - Programs the Xframe mac address
+ * s2io_set_mac_addr - Programs the Xframe mac address
* @dev : pointer to the device structure.
* @addr: a uchar pointer to the new mac address which is to be set.
- * Description : This procedure will program the Xframe to receive
+ * Description : This procedure will program the Xframe to receive
* frames with new Mac Address
- * Return value: SUCCESS on success and an appropriate (-)ve integer
+ * Return value: SUCCESS on success and an appropriate (-)ve integer
* as defined in errno.h file on failure.
*/
@@ -3075,10 +3630,10 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
register u64 val64, mac_addr = 0;
int i;
- /*
+ /*
* Set the new MAC address as the new unicast filter and reflect this
* change on the device address registered with the OS. It will be
- * at offset 0.
+ * at offset 0.
*/
for (i = 0; i < ETH_ALEN; i++) {
mac_addr <<= 8;
@@ -3102,12 +3657,12 @@ int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
}
/**
- * s2io_ethtool_sset - Sets different link parameters.
+ * s2io_ethtool_sset - Sets different link parameters.
* @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
* @info: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
- * The function sets different link parameters provided by the user onto
+ * The function sets different link parameters provided by the user onto
* the NIC.
* Return value:
* 0 on success.
@@ -3129,7 +3684,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
}
/**
- * s2io_ethtol_gset - Return link specific information.
+ * s2io_ethtol_gset - Return link specific information.
* @sp : private member of the device structure, pointer to the
* s2io_nic structure.
* @info : pointer to the structure with parameters given by ethtool
@@ -3161,8 +3716,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
}
/**
- * s2io_ethtool_gdrvinfo - Returns driver specific information.
- * @sp : private member of the device structure, which is a pointer to the
+ * s2io_ethtool_gdrvinfo - Returns driver specific information.
+ * @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @info : pointer to the structure with parameters given by ethtool to
* return driver information.
@@ -3190,9 +3745,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
/**
* s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
- * @sp: private member of the device structure, which is a pointer to the
+ * @sp: private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @regs : pointer to the structure with parameters given by ethtool for
+ * @regs : pointer to the structure with parameters given by ethtool for
* dumping the registers.
* @reg_space: The input argumnet into which all the registers are dumped.
* Description:
@@ -3221,11 +3776,11 @@ static void s2io_ethtool_gregs(struct net_device *dev,
/**
* s2io_phy_id - timer function that alternates adapter LED.
- * @data : address of the private member of the device structure, which
+ * @data : address of the private member of the device structure, which
* is a pointer to the s2io_nic structure, provided as an u32.
- * Description: This is actually the timer function that alternates the
- * adapter LED bit of the adapter control bit to set/reset every time on
- * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
+ * Description: This is actually the timer function that alternates the
+ * adapter LED bit of the adapter control bit to set/reset every time on
+ * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
* once every second.
*/
static void s2io_phy_id(unsigned long data)
@@ -3236,7 +3791,8 @@ static void s2io_phy_id(unsigned long data)
u16 subid;
subid = sp->pdev->subsystem_device;
- if ((subid & 0xFF) >= 0x07) {
+ if ((sp->device_type == XFRAME_II_DEVICE) ||
+ ((subid & 0xFF) >= 0x07)) {
val64 = readq(&bar0->gpio_control);
val64 ^= GPIO_CTRL_GPIO_0;
writeq(val64, &bar0->gpio_control);
@@ -3253,12 +3809,12 @@ static void s2io_phy_id(unsigned long data)
* s2io_ethtool_idnic - To physically identify the nic on the system.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @id : pointer to the structure with identification parameters given by
+ * @id : pointer to the structure with identification parameters given by
* ethtool.
* Description: Used to physically identify the NIC on the system.
- * The Link LED will blink for a time specified by the user for
+ * The Link LED will blink for a time specified by the user for
* identification.
- * NOTE: The Link has to be Up to be able to blink the LED. Hence
+ * NOTE: The Link has to be Up to be able to blink the LED. Hence
* identification is possible only if it's link is up.
* Return value:
* int , returns 0 on success
@@ -3273,7 +3829,8 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
subid = sp->pdev->subsystem_device;
last_gpio_ctrl_val = readq(&bar0->gpio_control);
- if ((subid & 0xFF) < 0x07) {
+ if ((sp->device_type == XFRAME_I_DEVICE) &&
+ ((subid & 0xFF) < 0x07)) {
val64 = readq(&bar0->adapter_control);
if (!(val64 & ADAPTER_CNTL_EN)) {
printk(KERN_ERR
@@ -3288,12 +3845,12 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
}
mod_timer(&sp->id_timer, jiffies);
if (data)
- msleep(data * 1000);
+ msleep_interruptible(data * HZ);
else
- msleep(0xFFFFFFFF);
+ msleep_interruptible(MAX_FLICKER_TIME);
del_timer_sync(&sp->id_timer);
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
writeq(last_gpio_ctrl_val, &bar0->gpio_control);
last_gpio_ctrl_val = readq(&bar0->gpio_control);
}
@@ -3303,7 +3860,8 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
/**
* s2io_ethtool_getpause_data -Pause frame frame generation and reception.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
* Returns the Pause frame generation and reception capability of the NIC.
@@ -3327,7 +3885,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
/**
* s2io_ethtool_setpause_data - set/reset pause frame generation.
- * @sp : private member of the device structure, which is a pointer to the
+ * @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
@@ -3338,7 +3896,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
*/
static int s2io_ethtool_setpause_data(struct net_device *dev,
- struct ethtool_pauseparam *ep)
+ struct ethtool_pauseparam *ep)
{
u64 val64;
nic_t *sp = dev->priv;
@@ -3359,13 +3917,13 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
/**
* read_eeprom - reads 4 bytes of data from user given offset.
- * @sp : private member of the device structure, which is a pointer to the
+ * @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @off : offset at which the data must be written
* @data : Its an output parameter where the data read at the given
- * offset is stored.
+ * offset is stored.
* Description:
- * Will read 4 bytes of data from the user given offset and return the
+ * Will read 4 bytes of data from the user given offset and return the
* read data.
* NOTE: Will allow to read only part of the EEPROM visible through the
* I2C bus.
@@ -3406,7 +3964,7 @@ static int read_eeprom(nic_t * sp, int off, u32 * data)
* s2io_nic structure.
* @off : offset at which the data must be written
* @data : The data that is to be written
- * @cnt : Number of bytes of the data that are actually to be written into
+ * @cnt : Number of bytes of the data that are actually to be written into
* the Eeprom. (max of 3)
* Description:
* Actually writes the relevant part of the data value into the Eeprom
@@ -3443,7 +4001,7 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
* @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
- * @eeprom : pointer to the user level structure provided by ethtool,
+ * @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf : user defined value to be written into Eeprom.
* Description: Reads the values stored in the Eeprom at given offset
@@ -3454,7 +4012,7 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
*/
static int s2io_ethtool_geeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 * data_buf)
+ struct ethtool_eeprom *eeprom, u8 * data_buf)
{
u32 data, i, valid;
nic_t *sp = dev->priv;
@@ -3479,7 +4037,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
* s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @eeprom : pointer to the user level structure provided by ethtool,
+ * @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf ; user defined value to be written into Eeprom.
* Description:
@@ -3527,8 +4085,8 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
}
/**
- * s2io_register_test - reads and writes into all clock domains.
- * @sp : private member of the device structure, which is a pointer to the
+ * s2io_register_test - reads and writes into all clock domains.
+ * @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data : variable that returns the result of each of the test conducted b
* by the driver.
@@ -3545,8 +4103,8 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
u64 val64 = 0;
int fail = 0;
- val64 = readq(&bar0->pcc_enable);
- if (val64 != 0xff00000000000000ULL) {
+ val64 = readq(&bar0->pif_rd_swapper_fb);
+ if (val64 != 0x123456789abcdefULL) {
fail = 1;
DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
}
@@ -3590,13 +4148,13 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
}
/**
- * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
+ * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data:variable that returns the result of each of the test conducted by
* the driver.
* Description:
- * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
+ * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
* register.
* Return value:
* 0 on success.
@@ -3661,14 +4219,14 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
/**
* s2io_bist_test - invokes the MemBist test of the card .
- * @sp : private member of the device structure, which is a pointer to the
+ * @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @data:variable that returns the result of each of the test conducted by
+ * @data:variable that returns the result of each of the test conducted by
* the driver.
* Description:
* This invokes the MemBist test of the card. We give around
* 2 secs time for the Test to complete. If it's still not complete
- * within this peiod, we consider that the test failed.
+ * within this peiod, we consider that the test failed.
* Return value:
* 0 on success and -1 on failure.
*/
@@ -3697,13 +4255,13 @@ static int s2io_bist_test(nic_t * sp, uint64_t * data)
}
/**
- * s2io-link_test - verifies the link state of the nic
- * @sp ; private member of the device structure, which is a pointer to the
+ * s2io-link_test - verifies the link state of the nic
+ * @sp ; private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data: variable that returns the result of each of the test conducted by
* the driver.
* Description:
- * The function verifies the link state of the NIC and updates the input
+ * The function verifies the link state of the NIC and updates the input
* argument 'data' appropriately.
* Return value:
* 0 on success.
@@ -3722,13 +4280,13 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
}
/**
- * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
- * @sp - private member of the device structure, which is a pointer to the
+ * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
+ * @sp - private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @data - variable that returns the result of each of the test
+ * @data - variable that returns the result of each of the test
* conducted by the driver.
* Description:
- * This is one of the offline test that tests the read and write
+ * This is one of the offline test that tests the read and write
* access to the RldRam chip on the NIC.
* Return value:
* 0 on success.
@@ -3833,7 +4391,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
* s2io_nic structure.
* @ethtest : pointer to a ethtool command specific structure that will be
* returned to the user.
- * @data : variable that returns the result of each of the test
+ * @data : variable that returns the result of each of the test
* conducted by the driver.
* Description:
* This function conducts 6 tests ( 4 offline and 2 online) to determine
@@ -3851,23 +4409,18 @@ static void s2io_ethtool_test(struct net_device *dev,
if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
/* Offline Tests. */
- if (orig_state) {
+ if (orig_state)
s2io_close(sp->dev);
- s2io_set_swapper(sp);
- } else
- s2io_set_swapper(sp);
if (s2io_register_test(sp, &data[0]))
ethtest->flags |= ETH_TEST_FL_FAILED;
s2io_reset(sp);
- s2io_set_swapper(sp);
if (s2io_rldram_test(sp, &data[3]))
ethtest->flags |= ETH_TEST_FL_FAILED;
s2io_reset(sp);
- s2io_set_swapper(sp);
if (s2io_eeprom_test(sp, &data[1]))
ethtest->flags |= ETH_TEST_FL_FAILED;
@@ -3910,61 +4463,111 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
nic_t *sp = dev->priv;
StatInfo_t *stat_info = sp->mac_control.stats_info;
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
+ s2io_updt_stats(sp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_data_octets);
tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_mcst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_bcst_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_any_err_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_vld_ip);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_drop_ip);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_icmp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_rst_tcp);
tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
- tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
+ tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
+ le32_to_cpu(stat_info->tmac_udp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_vld_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_data_octets);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_vld_mcst_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_vld_bcst_frms);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_discarded_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_usized_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_osized_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_frag_frms);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_jabber_frms);
+ tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_ip);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
+ tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_drop_ip);
+ tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_icmp);
tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
- tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
+ tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_udp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_err_drp_udp);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_pause_cnt);
+ tmp_stats[i++] =
+ (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
+ le32_to_cpu(stat_info->rmac_accepted_ip);
tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
+ tmp_stats[i++] = 0;
+ tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
+ tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
}
-static int s2io_ethtool_get_regs_len(struct net_device *dev)
+int s2io_ethtool_get_regs_len(struct net_device *dev)
{
return (XENA_REG_SPACE);
}
-static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
+u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
{
nic_t *sp = dev->priv;
return (sp->rx_csum);
}
-
-static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
+int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
{
nic_t *sp = dev->priv;
@@ -3975,19 +4578,17 @@ static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
return 0;
}
-
-static int s2io_get_eeprom_len(struct net_device *dev)
+int s2io_get_eeprom_len(struct net_device *dev)
{
return (XENA_EEPROM_SPACE);
}
-static int s2io_ethtool_self_test_count(struct net_device *dev)
+int s2io_ethtool_self_test_count(struct net_device *dev)
{
return (S2IO_TEST_LEN);
}
-
-static void s2io_ethtool_get_strings(struct net_device *dev,
- u32 stringset, u8 * data)
+void s2io_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 * data)
{
switch (stringset) {
case ETH_SS_TEST:
@@ -3998,13 +4599,12 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
sizeof(ethtool_stats_keys));
}
}
-
static int s2io_ethtool_get_stats_count(struct net_device *dev)
{
return (S2IO_STAT_LEN);
}
-static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
{
if (data)
dev->features |= NETIF_F_IP_CSUM;
@@ -4046,21 +4646,18 @@ static struct ethtool_ops netdev_ethtool_ops = {
};
/**
- * s2io_ioctl - Entry point for the Ioctl
+ * s2io_ioctl - Entry point for the Ioctl
* @dev : Device pointer.
* @ifr : An IOCTL specefic structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd : This is used to distinguish between the different commands that
* can be passed to the IOCTL functions.
* Description:
- * This function has support for ethtool, adding multiple MAC addresses on
- * the NIC and some DBG commands for the util tool.
- * Return value:
- * Currently the IOCTL supports no operations, hence by default this
- * function returns OP NOT SUPPORTED value.
+ * Currently there are no special functionality supported in IOCTL, hence
+ * function always return EOPNOTSUPPORTED
*/
-static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
return -EOPNOTSUPP;
}
@@ -4076,17 +4673,9 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
* file on failure.
*/
-static int s2io_change_mtu(struct net_device *dev, int new_mtu)
+int s2io_change_mtu(struct net_device *dev, int new_mtu)
{
nic_t *sp = dev->priv;
- XENA_dev_config_t __iomem *bar0 = sp->bar0;
- register u64 val64;
-
- if (netif_running(dev)) {
- DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
- DBG_PRINT(ERR_DBG, "change its MTU \n");
- return -EBUSY;
- }
if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
@@ -4094,11 +4683,22 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
return -EPERM;
}
- /* Set the new MTU into the PYLD register of the NIC */
- val64 = new_mtu;
- writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
-
dev->mtu = new_mtu;
+ if (netif_running(dev)) {
+ s2io_card_down(sp);
+ netif_stop_queue(dev);
+ if (s2io_card_up(sp)) {
+ DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
+ __FUNCTION__);
+ }
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ } else { /* Device is down */
+ XENA_dev_config_t __iomem *bar0 = sp->bar0;
+ u64 val64 = new_mtu;
+
+ writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
+ }
return 0;
}
@@ -4108,9 +4708,9 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
* @dev_adr : address of the device structure in dma_addr_t format.
* Description:
* This is the tasklet or the bottom half of the ISR. This is
- * an extension of the ISR which is scheduled by the scheduler to be run
+ * an extension of the ISR which is scheduled by the scheduler to be run
* when the load on the CPU is low. All low priority tasks of the ISR can
- * be pushed into the tasklet. For now the tasklet is used only to
+ * be pushed into the tasklet. For now the tasklet is used only to
* replenish the Rx buffers in the Rx buffer descriptors.
* Return value:
* void.
@@ -4166,19 +4766,22 @@ static void s2io_set_link(unsigned long data)
}
subid = nic->pdev->subsystem_device;
- /*
- * Allow a small delay for the NICs self initiated
- * cleanup to complete.
- */
- msleep(100);
+ if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+ /*
+ * Allow a small delay for the NICs self initiated
+ * cleanup to complete.
+ */
+ msleep(100);
+ }
val64 = readq(&bar0->adapter_status);
- if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
+ if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
if (LINK_IS_UP(val64)) {
val64 = readq(&bar0->adapter_control);
val64 |= ADAPTER_CNTL_EN;
writeq(val64, &bar0->adapter_control);
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
+ subid)) {
val64 = readq(&bar0->gpio_control);
val64 |= GPIO_CTRL_GPIO_0;
writeq(val64, &bar0->gpio_control);
@@ -4187,20 +4790,24 @@ static void s2io_set_link(unsigned long data)
val64 |= ADAPTER_LED_ON;
writeq(val64, &bar0->adapter_control);
}
- val64 = readq(&bar0->adapter_status);
- if (!LINK_IS_UP(val64)) {
- DBG_PRINT(ERR_DBG, "%s:", dev->name);
- DBG_PRINT(ERR_DBG, " Link down");
- DBG_PRINT(ERR_DBG, "after ");
- DBG_PRINT(ERR_DBG, "enabling ");
- DBG_PRINT(ERR_DBG, "device \n");
+ if (s2io_link_fault_indication(nic) ==
+ MAC_RMAC_ERR_TIMER) {
+ val64 = readq(&bar0->adapter_status);
+ if (!LINK_IS_UP(val64)) {
+ DBG_PRINT(ERR_DBG, "%s:", dev->name);
+ DBG_PRINT(ERR_DBG, " Link down");
+ DBG_PRINT(ERR_DBG, "after ");
+ DBG_PRINT(ERR_DBG, "enabling ");
+ DBG_PRINT(ERR_DBG, "device \n");
+ }
}
if (nic->device_enabled_once == FALSE) {
nic->device_enabled_once = TRUE;
}
s2io_link(nic, LINK_UP);
} else {
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
+ subid)) {
val64 = readq(&bar0->gpio_control);
val64 &= ~GPIO_CTRL_GPIO_0;
writeq(val64, &bar0->gpio_control);
@@ -4223,9 +4830,11 @@ static void s2io_card_down(nic_t * sp)
unsigned long flags;
register u64 val64 = 0;
+ del_timer_sync(&sp->alarm_timer);
/* If s2io_set_link task is executing, wait till it completes. */
- while (test_and_set_bit(0, &(sp->link_state)))
+ while (test_and_set_bit(0, &(sp->link_state))) {
msleep(50);
+ }
atomic_set(&sp->card_state, CARD_DOWN);
/* disable Tx and Rx traffic on the NIC */
@@ -4237,7 +4846,7 @@ static void s2io_card_down(nic_t * sp)
/* Check if the device is Quiescent and then Reset the NIC */
do {
val64 = readq(&bar0->adapter_status);
- if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
+ if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
break;
}
@@ -4251,14 +4860,27 @@ static void s2io_card_down(nic_t * sp)
break;
}
} while (1);
- spin_lock_irqsave(&sp->tx_lock, flags);
s2io_reset(sp);
- /* Free all unused Tx and Rx buffers */
+ /* Waiting till all Interrupt handlers are complete */
+ cnt = 0;
+ do {
+ msleep(10);
+ if (!atomic_read(&sp->isr_cnt))
+ break;
+ cnt++;
+ } while(cnt < 5);
+
+ spin_lock_irqsave(&sp->tx_lock, flags);
+ /* Free all Tx buffers */
free_tx_buffers(sp);
+ spin_unlock_irqrestore(&sp->tx_lock, flags);
+
+ /* Free all Rx buffers */
+ spin_lock_irqsave(&sp->rx_lock, flags);
free_rx_buffers(sp);
+ spin_unlock_irqrestore(&sp->rx_lock, flags);
- spin_unlock_irqrestore(&sp->tx_lock, flags);
clear_bit(0, &(sp->link_state));
}
@@ -4276,8 +4898,8 @@ static int s2io_card_up(nic_t * sp)
return -ENODEV;
}
- /*
- * Initializing the Rx buffers. For now we are considering only 1
+ /*
+ * Initializing the Rx buffers. For now we are considering only 1
* Rx ring and initializing buffers into 30 Rx blocks
*/
mac_control = &sp->mac_control;
@@ -4311,16 +4933,18 @@ static int s2io_card_up(nic_t * sp)
return -ENODEV;
}
+ S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
+
atomic_set(&sp->card_state, CARD_UP);
return 0;
}
-/**
+/**
* s2io_restart_nic - Resets the NIC.
* @data : long pointer to the device private structure
* Description:
* This function is scheduled to be run by the s2io_tx_watchdog
- * function after 0.5 secs to reset the NIC. The idea is to reduce
+ * function after 0.5 secs to reset the NIC. The idea is to reduce
* the run time of the watch dog routine which is run holding a
* spin lock.
*/
@@ -4338,10 +4962,11 @@ static void s2io_restart_nic(unsigned long data)
netif_wake_queue(dev);
DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
dev->name);
+
}
-/**
- * s2io_tx_watchdog - Watchdog for transmit side.
+/**
+ * s2io_tx_watchdog - Watchdog for transmit side.
* @dev : Pointer to net device structure
* Description:
* This function is triggered if the Tx Queue is stopped
@@ -4369,7 +4994,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
* @len : length of the packet
* @cksum : FCS checksum of the frame.
* @ring_no : the ring from which this RxD was extracted.
- * Description:
+ * Description:
* This function is called by the Tx interrupt serivce routine to perform
* some OS related operations on the SKB before passing it to the upper
* layers. It mainly checks if the checksum is OK, if so adds it to the
@@ -4379,35 +5004,68 @@ static void s2io_tx_watchdog(struct net_device *dev)
* Return value:
* SUCCESS on success and -1 on failure.
*/
-#ifndef CONFIG_2BUFF_MODE
-static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
-#else
-static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
- buffAdd_t * ba)
-#endif
+static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
{
+ nic_t *sp = ring_data->nic;
struct net_device *dev = (struct net_device *) sp->dev;
- struct sk_buff *skb =
- (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
+ struct sk_buff *skb = (struct sk_buff *)
+ ((unsigned long) rxdp->Host_Control);
+ int ring_no = ring_data->ring_no;
u16 l3_csum, l4_csum;
#ifdef CONFIG_2BUFF_MODE
- int buf0_len, buf2_len;
+ int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
+ int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
+ int get_block = ring_data->rx_curr_get_info.block_index;
+ int get_off = ring_data->rx_curr_get_info.offset;
+ buffAdd_t *ba = &ring_data->ba[get_block][get_off];
unsigned char *buff;
+#else
+ u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
#endif
+ skb->dev = dev;
+ if (rxdp->Control_1 & RXD_T_CODE) {
+ unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
+ DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
+ dev->name, err);
+ dev_kfree_skb(skb);
+ sp->stats.rx_crc_errors++;
+ atomic_dec(&sp->rx_bufs_left[ring_no]);
+ rxdp->Host_Control = 0;
+ return 0;
+ }
- l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
- if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
+ /* Updating statistics */
+ rxdp->Host_Control = 0;
+ sp->rx_pkt_count++;
+ sp->stats.rx_packets++;
+#ifndef CONFIG_2BUFF_MODE
+ sp->stats.rx_bytes += len;
+#else
+ sp->stats.rx_bytes += buf0_len + buf2_len;
+#endif
+
+#ifndef CONFIG_2BUFF_MODE
+ skb_put(skb, len);
+#else
+ buff = skb_push(skb, buf0_len);
+ memcpy(buff, ba->ba_0, buf0_len);
+ skb_put(skb, buf2_len);
+#endif
+
+ if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
+ (sp->rx_csum)) {
+ l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
- /*
+ /*
* NIC verifies if the Checksum of the received
* frame is Ok or not and accordingly returns
* a flag in the RxD.
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
- /*
- * Packet with erroneous checksum, let the
+ /*
+ * Packet with erroneous checksum, let the
* upper layers deal with it.
*/
skb->ip_summed = CHECKSUM_NONE;
@@ -4416,44 +5074,26 @@ static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
skb->ip_summed = CHECKSUM_NONE;
}
- if (rxdp->Control_1 & RXD_T_CODE) {
- unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
- DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
- dev->name, err);
- }
-#ifdef CONFIG_2BUFF_MODE
- buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
- buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
-#endif
-
- skb->dev = dev;
-#ifndef CONFIG_2BUFF_MODE
- skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, dev);
-#else
- buff = skb_push(skb, buf0_len);
- memcpy(buff, ba->ba_0, buf0_len);
- skb_put(skb, buf2_len);
skb->protocol = eth_type_trans(skb, dev);
-#endif
-
#ifdef CONFIG_S2IO_NAPI
- netif_receive_skb(skb);
+ if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
+ /* Queueing the vlan frame to the upper layer */
+ vlan_hwaccel_receive_skb(skb, sp->vlgrp,
+ RXD_GET_VLAN_TAG(rxdp->Control_2));
+ } else {
+ netif_receive_skb(skb);
+ }
#else
- netif_rx(skb);
+ if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
+ /* Queueing the vlan frame to the upper layer */
+ vlan_hwaccel_rx(skb, sp->vlgrp,
+ RXD_GET_VLAN_TAG(rxdp->Control_2));
+ } else {
+ netif_rx(skb);
+ }
#endif
-
dev->last_rx = jiffies;
- sp->rx_pkt_count++;
- sp->stats.rx_packets++;
-#ifndef CONFIG_2BUFF_MODE
- sp->stats.rx_bytes += len;
-#else
- sp->stats.rx_bytes += buf0_len + buf2_len;
-#endif
-
atomic_dec(&sp->rx_bufs_left[ring_no]);
- rxdp->Host_Control = 0;
return SUCCESS;
}
@@ -4464,13 +5104,13 @@ static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
* @link : inidicates whether link is UP/DOWN.
* Description:
* This function stops/starts the Tx queue depending on whether the link
- * status of the NIC is is down or up. This is called by the Alarm
- * interrupt handler whenever a link change interrupt comes up.
+ * status of the NIC is is down or up. This is called by the Alarm
+ * interrupt handler whenever a link change interrupt comes up.
* Return value:
* void.
*/
-static void s2io_link(nic_t * sp, int link)
+void s2io_link(nic_t * sp, int link)
{
struct net_device *dev = (struct net_device *) sp->dev;
@@ -4487,8 +5127,25 @@ static void s2io_link(nic_t * sp, int link)
}
/**
- * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
- * @sp : private member of the device structure, which is a pointer to the
+ * get_xena_rev_id - to identify revision ID of xena.
+ * @pdev : PCI Dev structure
+ * Description:
+ * Function to identify the Revision ID of xena.
+ * Return value:
+ * returns the revision ID of the device.
+ */
+
+int get_xena_rev_id(struct pci_dev *pdev)
+{
+ u8 id = 0;
+ int ret;
+ ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
+ return id;
+}
+
+/**
+ * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
+ * @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* Description:
* This function initializes a few of the PCI and PCI-X configuration registers
@@ -4499,15 +5156,15 @@ static void s2io_link(nic_t * sp, int link)
static void s2io_init_pci(nic_t * sp)
{
- u16 pci_cmd = 0;
+ u16 pci_cmd = 0, pcix_cmd = 0;
/* Enable Data Parity Error Recovery in PCI-X command register. */
pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(sp->pcix_cmd));
+ &(pcix_cmd));
pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- (sp->pcix_cmd | 1));
+ (pcix_cmd | 1));
pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(sp->pcix_cmd));
+ &(pcix_cmd));
/* Set the PErr Response bit in PCI command register. */
pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
@@ -4515,53 +5172,43 @@ static void s2io_init_pci(nic_t * sp)
(pci_cmd | PCI_COMMAND_PARITY));
pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
- /* Set MMRB count to 1024 in PCI-X Command register. */
- sp->pcix_cmd &= 0xFFF3;
- pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(sp->pcix_cmd));
-
- /* Setting Maximum outstanding splits based on system type. */
- sp->pcix_cmd &= 0xFF8F;
-
- sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
- pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- sp->pcix_cmd);
- pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(sp->pcix_cmd));
/* Forcibly disabling relaxed ordering capability of the card. */
- sp->pcix_cmd &= 0xfffd;
+ pcix_cmd &= 0xfffd;
pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- sp->pcix_cmd);
+ pcix_cmd);
pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
- &(sp->pcix_cmd));
+ &(pcix_cmd));
}
MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
MODULE_LICENSE("GPL");
module_param(tx_fifo_num, int, 0);
-module_param_array(tx_fifo_len, int, NULL, 0);
module_param(rx_ring_num, int, 0);
-module_param_array(rx_ring_sz, int, NULL, 0);
-module_param(Stats_refresh_time, int, 0);
+module_param_array(tx_fifo_len, uint, NULL, 0);
+module_param_array(rx_ring_sz, uint, NULL, 0);
+module_param_array(rts_frm_len, uint, NULL, 0);
+module_param(use_continuous_tx_intrs, int, 1);
module_param(rmac_pause_time, int, 0);
module_param(mc_pause_threshold_q0q3, int, 0);
module_param(mc_pause_threshold_q4q7, int, 0);
module_param(shared_splits, int, 0);
module_param(tmac_util_period, int, 0);
module_param(rmac_util_period, int, 0);
+module_param(bimodal, bool, 0);
#ifndef CONFIG_S2IO_NAPI
module_param(indicate_max_pkts, int, 0);
#endif
+module_param(rxsync_frequency, int, 0);
+
/**
- * s2io_init_nic - Initialization of the adapter .
+ * s2io_init_nic - Initialization of the adapter .
* @pdev : structure containing the PCI related information of the device.
* @pre: List of PCI devices supported by the driver listed in s2io_tbl.
* Description:
* The function initializes an adapter identified by the pci_dec structure.
- * All OS related initialization including memory and device structure and
- * initlaization of the device private variable is done. Also the swapper
- * control register is initialized to enable read and write into the I/O
+ * All OS related initialization including memory and device structure and
+ * initlaization of the device private variable is done. Also the swapper
+ * control register is initialized to enable read and write into the I/O
* registers of the device.
* Return value:
* returns 0 on success and negative on failure.
@@ -4572,7 +5219,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
{
nic_t *sp;
struct net_device *dev;
- char *dev_name = "S2IO 10GE NIC";
int i, j, ret;
int dma_flag = FALSE;
u32 mac_up, mac_down;
@@ -4581,10 +5227,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
u16 subid;
mac_info_t *mac_control;
struct config_param *config;
+ int mode;
-
- DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
- s2io_driver_version);
+#ifdef CONFIG_S2IO_NAPI
+ DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
+#endif
if ((ret = pci_enable_device(pdev))) {
DBG_PRINT(ERR_DBG,
@@ -4595,7 +5242,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
dma_flag = TRUE;
-
if (pci_set_consistent_dma_mask
(pdev, DMA_64BIT_MASK)) {
DBG_PRINT(ERR_DBG,
@@ -4635,34 +5281,41 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
memset(sp, 0, sizeof(nic_t));
sp->dev = dev;
sp->pdev = pdev;
- sp->vendor_id = pdev->vendor;
- sp->device_id = pdev->device;
sp->high_dma_flag = dma_flag;
- sp->irq = pdev->irq;
sp->device_enabled_once = FALSE;
- strcpy(sp->name, dev_name);
+
+ if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
+ (pdev->device == PCI_DEVICE_ID_HERC_UNI))
+ sp->device_type = XFRAME_II_DEVICE;
+ else
+ sp->device_type = XFRAME_I_DEVICE;
/* Initialize some PCI/PCI-X fields of the NIC. */
s2io_init_pci(sp);
- /*
+ /*
* Setting the device configuration parameters.
- * Most of these parameters can be specified by the user during
- * module insertion as they are module loadable parameters. If
- * these parameters are not not specified during load time, they
+ * Most of these parameters can be specified by the user during
+ * module insertion as they are module loadable parameters. If
+ * these parameters are not not specified during load time, they
* are initialized with default values.
*/
mac_control = &sp->mac_control;
config = &sp->config;
/* Tx side parameters. */
- tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
+ if (tx_fifo_len[0] == 0)
+ tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
config->tx_fifo_num = tx_fifo_num;
for (i = 0; i < MAX_TX_FIFOS; i++) {
config->tx_cfg[i].fifo_len = tx_fifo_len[i];
config->tx_cfg[i].fifo_priority = i;
}
+ /* mapping the QoS priority to the configured fifos */
+ for (i = 0; i < MAX_TX_FIFOS; i++)
+ config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
+
config->tx_intr_type = TXD_INT_TYPE_UTILZ;
for (i = 0; i < config->tx_fifo_num; i++) {
config->tx_cfg[i].f_no_snoop =
@@ -4675,7 +5328,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
config->max_txds = MAX_SKB_FRAGS;
/* Rx side parameters. */
- rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
+ if (rx_ring_sz[0] == 0)
+ rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
config->rx_ring_num = rx_ring_num;
for (i = 0; i < MAX_RX_RINGS; i++) {
config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
@@ -4699,10 +5353,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
for (i = 0; i < config->rx_ring_num; i++)
atomic_set(&sp->rx_bufs_left[i], 0);
+ /* Initialize the number of ISRs currently running */
+ atomic_set(&sp->isr_cnt, 0);
+
/* initialize the shared memory used by the NIC and the host */
if (init_shared_mem(sp)) {
DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
- dev->name);
+ __FUNCTION__);
ret = -ENOMEM;
goto mem_alloc_failed;
}
@@ -4743,13 +5400,17 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
dev->do_ioctl = &s2io_ioctl;
dev->change_mtu = &s2io_change_mtu;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_rx_register = s2io_vlan_rx_register;
+ dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
+
/*
* will use eth_mac_addr() for dev->set_mac_address
* mac address will be set every time dev->open() is called
*/
-#ifdef CONFIG_S2IO_NAPI
+#if defined(CONFIG_S2IO_NAPI)
dev->poll = s2io_poll;
- dev->weight = 90;
+ dev->weight = 32;
#endif
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
@@ -4776,22 +5437,28 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
goto set_swap_failed;
}
- /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
- fix_mac_address(sp);
- s2io_reset(sp);
+ /* Verify if the Herc works on the slot its placed into */
+ if (sp->device_type & XFRAME_II_DEVICE) {
+ mode = s2io_verify_pci_mode(sp);
+ if (mode < 0) {
+ DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
+ DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
+ ret = -EBADSLT;
+ goto set_swap_failed;
+ }
+ }
- /*
- * Setting swapper control on the NIC, so the MAC address can be read.
- */
- if (s2io_set_swapper(sp)) {
- DBG_PRINT(ERR_DBG,
- "%s: S2IO: swapper settings are wrong\n",
- dev->name);
- ret = -EAGAIN;
- goto set_swap_failed;
+ /* Not needed for Herc */
+ if (sp->device_type & XFRAME_I_DEVICE) {
+ /*
+ * Fix for all "FFs" MAC address problems observed on
+ * Alpha platforms
+ */
+ fix_mac_address(sp);
+ s2io_reset(sp);
}
- /*
+ /*
* MAC address initialization.
* For now only one mac address will be read and used.
*/
@@ -4814,37 +5481,28 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
- DBG_PRINT(INIT_DBG,
- "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
- sp->def_mac_addr[0].mac_addr[0],
- sp->def_mac_addr[0].mac_addr[1],
- sp->def_mac_addr[0].mac_addr[2],
- sp->def_mac_addr[0].mac_addr[3],
- sp->def_mac_addr[0].mac_addr[4],
- sp->def_mac_addr[0].mac_addr[5]);
-
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
/*
- * Initialize the tasklet status and link state flags
- * and the card statte parameter
+ * Initialize the tasklet status and link state flags
+ * and the card state parameter
*/
atomic_set(&(sp->card_state), 0);
sp->tasklet_status = 0;
sp->link_state = 0;
-
/* Initialize spinlocks */
spin_lock_init(&sp->tx_lock);
#ifndef CONFIG_S2IO_NAPI
spin_lock_init(&sp->put_lock);
#endif
+ spin_lock_init(&sp->rx_lock);
- /*
- * SXE-002: Configure link and activity LED to init state
- * on driver load.
+ /*
+ * SXE-002: Configure link and activity LED to init state
+ * on driver load.
*/
subid = sp->pdev->subsystem_device;
if ((subid & 0xFF) >= 0x07) {
@@ -4864,13 +5522,61 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
goto register_failed;
}
- /*
- * Make Link state as off at this point, when the Link change
- * interrupt comes the state will be automatically changed to
+ if (sp->device_type & XFRAME_II_DEVICE) {
+ DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
+ get_xena_rev_id(sp->pdev),
+ s2io_driver_version);
+ DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ sp->def_mac_addr[0].mac_addr[0],
+ sp->def_mac_addr[0].mac_addr[1],
+ sp->def_mac_addr[0].mac_addr[2],
+ sp->def_mac_addr[0].mac_addr[3],
+ sp->def_mac_addr[0].mac_addr[4],
+ sp->def_mac_addr[0].mac_addr[5]);
+ mode = s2io_print_pci_mode(sp);
+ if (mode < 0) {
+ DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
+ ret = -EBADSLT;
+ goto set_swap_failed;
+ }
+ } else {
+ DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
+ dev->name);
+ DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
+ get_xena_rev_id(sp->pdev),
+ s2io_driver_version);
+ DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ sp->def_mac_addr[0].mac_addr[0],
+ sp->def_mac_addr[0].mac_addr[1],
+ sp->def_mac_addr[0].mac_addr[2],
+ sp->def_mac_addr[0].mac_addr[3],
+ sp->def_mac_addr[0].mac_addr[4],
+ sp->def_mac_addr[0].mac_addr[5]);
+ }
+
+ /* Initialize device name */
+ strcpy(sp->name, dev->name);
+ if (sp->device_type & XFRAME_II_DEVICE)
+ strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
+ else
+ strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
+
+ /* Initialize bimodal Interrupts */
+ sp->config.bimodal = bimodal;
+ if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
+ sp->config.bimodal = 0;
+ DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
+ dev->name);
+ }
+
+ /*
+ * Make Link state as off at this point, when the Link change
+ * interrupt comes the state will be automatically changed to
* the right state.
*/
netif_carrier_off(dev);
- sp->last_link_state = LINK_DOWN;
return 0;
@@ -4891,11 +5597,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
}
/**
- * s2io_rem_nic - Free the PCI device
+ * s2io_rem_nic - Free the PCI device
* @pdev: structure containing the PCI related information of the device.
- * Description: This function is called by the Pci subsystem to release a
+ * Description: This function is called by the Pci subsystem to release a
* PCI device and free up all resource held up by the device. This could
- * be in response to a Hot plug event or when the driver is to be removed
+ * be in response to a Hot plug event or when the driver is to be removed
* from memory.
*/
@@ -4919,7 +5625,6 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
-
free_netdev(dev);
}
@@ -4935,11 +5640,11 @@ int __init s2io_starter(void)
}
/**
- * s2io_closer - Cleanup routine for the driver
+ * s2io_closer - Cleanup routine for the driver
* Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
*/
-static void s2io_closer(void)
+void s2io_closer(void)
{
pci_unregister_driver(&s2io_driver);
DBG_PRINT(INIT_DBG, "cleanup done\n");
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 1711c8c3dc9..5d9270730ca 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -31,6 +31,9 @@
#define SUCCESS 0
#define FAILURE -1
+/* Maximum time to flicker LED when asked to identify NIC using ethtool */
+#define MAX_FLICKER_TIME 60000 /* 60 Secs */
+
/* Maximum outstanding splits to be configured into xena. */
typedef enum xena_max_outstanding_splits {
XENA_ONE_SPLIT_TRANSACTION = 0,
@@ -45,10 +48,10 @@ typedef enum xena_max_outstanding_splits {
#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
/* OS concerned variables and constants */
-#define WATCH_DOG_TIMEOUT 5*HZ
-#define EFILL 0x1234
-#define ALIGN_SIZE 127
-#define PCIX_COMMAND_REGISTER 0x62
+#define WATCH_DOG_TIMEOUT 15*HZ
+#define EFILL 0x1234
+#define ALIGN_SIZE 127
+#define PCIX_COMMAND_REGISTER 0x62
/*
* Debug related variables.
@@ -61,7 +64,7 @@ typedef enum xena_max_outstanding_splits {
#define INTR_DBG 4
/* Global variable that defines the present debug level of the driver. */
-static int debug_level = ERR_DBG; /* Default level. */
+int debug_level = ERR_DBG; /* Default level. */
/* DEBUG message print. */
#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
@@ -71,6 +74,12 @@ static int debug_level = ERR_DBG; /* Default level. */
#define L4_CKSUM_OK 0xFFFF
#define S2IO_JUMBO_SIZE 9600
+/* Driver statistics maintained by driver */
+typedef struct {
+ unsigned long long single_ecc_errs;
+ unsigned long long double_ecc_errs;
+} swStat_t;
+
/* The statistics block of Xena */
typedef struct stat_block {
/* Tx MAC statistics counters. */
@@ -186,12 +195,90 @@ typedef struct stat_block {
u32 rxd_rd_cnt;
u32 rxf_wr_cnt;
u32 txf_rd_cnt;
+
+/* Tx MAC statistics overflow counters. */
+ u32 tmac_data_octets_oflow;
+ u32 tmac_frms_oflow;
+ u32 tmac_bcst_frms_oflow;
+ u32 tmac_mcst_frms_oflow;
+ u32 tmac_ucst_frms_oflow;
+ u32 tmac_ttl_octets_oflow;
+ u32 tmac_any_err_frms_oflow;
+ u32 tmac_nucst_frms_oflow;
+ u64 tmac_vlan_frms;
+ u32 tmac_drop_ip_oflow;
+ u32 tmac_vld_ip_oflow;
+ u32 tmac_rst_tcp_oflow;
+ u32 tmac_icmp_oflow;
+ u32 tpa_unknown_protocol;
+ u32 tmac_udp_oflow;
+ u32 reserved_10;
+ u32 tpa_parse_failure;
+
+/* Rx MAC Statistics overflow counters. */
+ u32 rmac_data_octets_oflow;
+ u32 rmac_vld_frms_oflow;
+ u32 rmac_vld_bcst_frms_oflow;
+ u32 rmac_vld_mcst_frms_oflow;
+ u32 rmac_accepted_ucst_frms_oflow;
+ u32 rmac_ttl_octets_oflow;
+ u32 rmac_discarded_frms_oflow;
+ u32 rmac_accepted_nucst_frms_oflow;
+ u32 rmac_usized_frms_oflow;
+ u32 rmac_drop_events_oflow;
+ u32 rmac_frag_frms_oflow;
+ u32 rmac_osized_frms_oflow;
+ u32 rmac_ip_oflow;
+ u32 rmac_jabber_frms_oflow;
+ u32 rmac_icmp_oflow;
+ u32 rmac_drop_ip_oflow;
+ u32 rmac_err_drp_udp_oflow;
+ u32 rmac_udp_oflow;
+ u32 reserved_11;
+ u32 rmac_pause_cnt_oflow;
+ u64 rmac_ttl_1519_4095_frms;
+ u64 rmac_ttl_4096_8191_frms;
+ u64 rmac_ttl_8192_max_frms;
+ u64 rmac_ttl_gt_max_frms;
+ u64 rmac_osized_alt_frms;
+ u64 rmac_jabber_alt_frms;
+ u64 rmac_gt_max_alt_frms;
+ u64 rmac_vlan_frms;
+ u32 rmac_len_discard;
+ u32 rmac_fcs_discard;
+ u32 rmac_pf_discard;
+ u32 rmac_da_discard;
+ u32 rmac_red_discard;
+ u32 rmac_rts_discard;
+ u32 reserved_12;
+ u32 rmac_ingm_full_discard;
+ u32 reserved_13;
+ u32 rmac_accepted_ip_oflow;
+ u32 reserved_14;
+ u32 link_fault_cnt;
+ swStat_t sw_stat;
} StatInfo_t;
-/* Structures representing different init time configuration
+/*
+ * Structures representing different init time configuration
* parameters of the NIC.
*/
+#define MAX_TX_FIFOS 8
+#define MAX_RX_RINGS 8
+
+/* FIFO mappings for all possible number of fifos configured */
+int fifo_map[][MAX_TX_FIFOS] = {
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 1, 1, 1, 1},
+ {0, 0, 0, 1, 1, 1, 2, 2},
+ {0, 0, 1, 1, 2, 2, 3, 3},
+ {0, 0, 1, 1, 2, 2, 3, 4},
+ {0, 0, 1, 1, 2, 3, 4, 5},
+ {0, 0, 1, 2, 3, 4, 5, 6},
+ {0, 1, 2, 3, 4, 5, 6, 7},
+};
+
/* Maintains Per FIFO related information. */
typedef struct tx_fifo_config {
#define MAX_AVAILABLE_TXDS 8192
@@ -237,14 +324,14 @@ typedef struct rx_ring_config {
#define NO_SNOOP_RXD_BUFFER 0x02
} rx_ring_config_t;
-/* This structure provides contains values of the tunable parameters
- * of the H/W
+/* This structure provides contains values of the tunable parameters
+ * of the H/W
*/
struct config_param {
/* Tx Side */
u32 tx_fifo_num; /*Number of Tx FIFOs */
-#define MAX_TX_FIFOS 8
+ u8 fifo_mapping[MAX_TX_FIFOS];
tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
u64 tx_intr_type;
@@ -252,10 +339,10 @@ struct config_param {
/* Rx Side */
u32 rx_ring_num; /*Number of receive rings */
-#define MAX_RX_RINGS 8
#define MAX_RX_BLOCKS_PER_RING 150
rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
+ u8 bimodal; /*Flag for setting bimodal interrupts*/
#define HEADER_ETHERNET_II_802_3_SIZE 14
#define HEADER_802_2_SIZE 3
@@ -269,6 +356,7 @@ struct config_param {
#define MAX_PYLD_JUMBO 9600
#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18)
#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22)
+ u16 bus_speed;
};
/* Structure representing MAC Addrs */
@@ -277,7 +365,7 @@ typedef struct mac_addr {
} macaddr_t;
/* Structure that represent every FIFO element in the BAR1
- * Address location.
+ * Address location.
*/
typedef struct _TxFIFO_element {
u64 TxDL_Pointer;
@@ -339,6 +427,7 @@ typedef struct _RxD_t {
#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
#define RXD_FRAME_PROTO_IPV4 BIT(27)
#define RXD_FRAME_PROTO_IPV6 BIT(28)
+#define RXD_FRAME_IP_FRAG BIT(29)
#define RXD_FRAME_PROTO_TCP BIT(30)
#define RXD_FRAME_PROTO_UDP BIT(31)
#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
@@ -346,11 +435,15 @@ typedef struct _RxD_t {
#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF)
u64 Control_2;
+#define THE_RXD_MARK 0x3
+#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
+#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
+
#ifndef CONFIG_2BUFF_MODE
-#define MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
-#define SET_BUFFER0_SIZE(val) vBIT(val,0,16)
+#define MASK_BUFFER0_SIZE vBIT(0x3FFF,2,14)
+#define SET_BUFFER0_SIZE(val) vBIT(val,2,14)
#else
-#define MASK_BUFFER0_SIZE vBIT(0xFF,0,16)
+#define MASK_BUFFER0_SIZE vBIT(0xFF,2,14)
#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
@@ -363,7 +456,7 @@ typedef struct _RxD_t {
#define SET_NUM_TAG(val) vBIT(val,16,32)
#ifndef CONFIG_2BUFF_MODE
-#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0xFFFF,0,16)))
+#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14)))
#else
#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \
>> 48)
@@ -382,7 +475,7 @@ typedef struct _RxD_t {
#endif
} RxD_t;
-/* Structure that represents the Rx descriptor block which contains
+/* Structure that represents the Rx descriptor block which contains
* 128 Rx descriptors.
*/
#ifndef CONFIG_2BUFF_MODE
@@ -392,11 +485,11 @@ typedef struct _RxD_block {
u64 reserved_0;
#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
- u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
+ u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last
* Rxd in this blk */
u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */
u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
- * the upper 32 bits should
+ * the upper 32 bits should
* be 0 */
} RxD_block_t;
#else
@@ -405,13 +498,13 @@ typedef struct _RxD_block {
RxD_t rxd[MAX_RXDS_PER_BLOCK];
#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
- u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
+ u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
* in this blk */
u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
} RxD_block_t;
#define SIZE_OF_BLOCK 4096
-/* Structure to hold virtual addresses of Buf0 and Buf1 in
+/* Structure to hold virtual addresses of Buf0 and Buf1 in
* 2buf mode. */
typedef struct bufAdd {
void *ba_0_org;
@@ -423,8 +516,8 @@ typedef struct bufAdd {
/* Structure which stores all the MAC control parameters */
-/* This structure stores the offset of the RxD in the ring
- * from which the Rx Interrupt processor can start picking
+/* This structure stores the offset of the RxD in the ring
+ * from which the Rx Interrupt processor can start picking
* up the RxDs for processing.
*/
typedef struct _rx_curr_get_info_t {
@@ -436,7 +529,7 @@ typedef struct _rx_curr_get_info_t {
typedef rx_curr_get_info_t rx_curr_put_info_t;
/* This structure stores the offset of the TxDl in the FIFO
- * from which the Tx Interrupt processor can start picking
+ * from which the Tx Interrupt processor can start picking
* up the TxDLs for send complete interrupt processing.
*/
typedef struct {
@@ -446,32 +539,96 @@ typedef struct {
typedef tx_curr_get_info_t tx_curr_put_info_t;
-/* Infomation related to the Tx and Rx FIFOs and Rings of Xena
- * is maintained in this structure.
- */
-typedef struct mac_info {
-/* rx side stuff */
- /* Put pointer info which indictes which RxD has to be replenished
+/* Structure that holds the Phy and virt addresses of the Blocks */
+typedef struct rx_block_info {
+ RxD_t *block_virt_addr;
+ dma_addr_t block_dma_addr;
+} rx_block_info_t;
+
+/* pre declaration of the nic structure */
+typedef struct s2io_nic nic_t;
+
+/* Ring specific structure */
+typedef struct ring_info {
+ /* The ring number */
+ int ring_no;
+
+ /*
+ * Place holders for the virtual and physical addresses of
+ * all the Rx Blocks
+ */
+ rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING];
+ int block_count;
+ int pkt_cnt;
+
+ /*
+ * Put pointer info which indictes which RxD has to be replenished
* with a new buffer.
*/
- rx_curr_put_info_t rx_curr_put_info[MAX_RX_RINGS];
+ rx_curr_put_info_t rx_curr_put_info;
- /* Get pointer info which indictes which is the last RxD that was
+ /*
+ * Get pointer info which indictes which is the last RxD that was
* processed by the driver.
*/
- rx_curr_get_info_t rx_curr_get_info[MAX_RX_RINGS];
+ rx_curr_get_info_t rx_curr_get_info;
- u16 rmac_pause_time;
- u16 mc_pause_threshold_q0q3;
- u16 mc_pause_threshold_q4q7;
+#ifndef CONFIG_S2IO_NAPI
+ /* Index to the absolute position of the put pointer of Rx ring */
+ int put_pos;
+#endif
+
+#ifdef CONFIG_2BUFF_MODE
+ /* Buffer Address store. */
+ buffAdd_t **ba;
+#endif
+ nic_t *nic;
+} ring_info_t;
+/* Fifo specific structure */
+typedef struct fifo_info {
+ /* FIFO number */
+ int fifo_no;
+
+ /* Maximum TxDs per TxDL */
+ int max_txds;
+
+ /* Place holder of all the TX List's Phy and Virt addresses. */
+ list_info_hold_t *list_info;
+
+ /*
+ * Current offset within the tx FIFO where driver would write
+ * new Tx frame
+ */
+ tx_curr_put_info_t tx_curr_put_info;
+
+ /*
+ * Current offset within tx FIFO from where the driver would start freeing
+ * the buffers
+ */
+ tx_curr_get_info_t tx_curr_get_info;
+
+ nic_t *nic;
+}fifo_info_t;
+
+/* Infomation related to the Tx and Rx FIFOs and Rings of Xena
+ * is maintained in this structure.
+ */
+typedef struct mac_info {
/* tx side stuff */
/* logical pointer of start of each Tx FIFO */
TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS];
-/* Current offset within tx_FIFO_start, where driver would write new Tx frame*/
- tx_curr_put_info_t tx_curr_put_info[MAX_TX_FIFOS];
- tx_curr_get_info_t tx_curr_get_info[MAX_TX_FIFOS];
+ /* Fifo specific structure */
+ fifo_info_t fifos[MAX_TX_FIFOS];
+
+/* rx side stuff */
+ /* Ring specific structure */
+ ring_info_t rings[MAX_RX_RINGS];
+
+ u16 rmac_pause_time;
+ u16 mc_pause_threshold_q0q3;
+ u16 mc_pause_threshold_q4q7;
void *stats_mem; /* orignal pointer to allocated mem */
dma_addr_t stats_mem_phy; /* Physical address of the stat block */
@@ -485,12 +642,6 @@ typedef struct {
int usage_cnt;
} usr_addr_t;
-/* Structure that holds the Phy and virt addresses of the Blocks */
-typedef struct rx_block_info {
- RxD_t *block_virt_addr;
- dma_addr_t block_dma_addr;
-} rx_block_info_t;
-
/* Default Tunable parameters of the NIC. */
#define DEFAULT_FIFO_LEN 4096
#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
@@ -499,7 +650,20 @@ typedef struct rx_block_info {
#define LARGE_BLK_CNT 100
/* Structure representing one instance of the NIC */
-typedef struct s2io_nic {
+struct s2io_nic {
+#ifdef CONFIG_S2IO_NAPI
+ /*
+ * Count of packets to be processed in a given iteration, it will be indicated
+ * by the quota field of the device structure when NAPI is enabled.
+ */
+ int pkts_to_process;
+#endif
+ struct net_device *dev;
+ mac_info_t mac_control;
+ struct config_param config;
+ struct pci_dev *pdev;
+ void __iomem *bar0;
+ void __iomem *bar1;
#define MAX_MAC_SUPPORTED 16
#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
@@ -507,33 +671,20 @@ typedef struct s2io_nic {
macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED];
struct net_device_stats stats;
- void __iomem *bar0;
- void __iomem *bar1;
- struct config_param config;
- mac_info_t mac_control;
int high_dma_flag;
int device_close_flag;
int device_enabled_once;
- char name[32];
+ char name[50];
struct tasklet_struct task;
volatile unsigned long tasklet_status;
- struct timer_list timer;
- struct net_device *dev;
- struct pci_dev *pdev;
- u16 vendor_id;
- u16 device_id;
- u16 ccmd;
- u32 cbar0_1;
- u32 cbar0_2;
- u32 cbar1_1;
- u32 cbar1_2;
- u32 cirq;
- u8 cache_line;
- u32 rom_expansion;
- u16 pcix_cmd;
- u32 irq;
+ /* Timer that handles I/O errors/exceptions */
+ struct timer_list alarm_timer;
+
+ /* Space to back up the PCI config space */
+ u32 config_space[256 / sizeof(u32)];
+
atomic_t rx_bufs_left[MAX_RX_RINGS];
spinlock_t tx_lock;
@@ -558,27 +709,11 @@ typedef struct s2io_nic {
u16 tx_err_count;
u16 rx_err_count;
-#ifndef CONFIG_S2IO_NAPI
- /* Index to the absolute position of the put pointer of Rx ring. */
- int put_pos[MAX_RX_RINGS];
-#endif
-
- /*
- * Place holders for the virtual and physical addresses of
- * all the Rx Blocks
- */
- rx_block_info_t rx_blocks[MAX_RX_RINGS][MAX_RX_BLOCKS_PER_RING];
- int block_count[MAX_RX_RINGS];
- int pkt_cnt[MAX_RX_RINGS];
-
- /* Place holder of all the TX List's Phy and Virt addresses. */
- list_info_hold_t *list_info[MAX_TX_FIFOS];
-
/* Id timer, used to blink NIC to physically identify NIC. */
struct timer_list id_timer;
/* Restart timer, used to restart NIC if the device is stuck and
- * a schedule task that will set the correct Link state once the
+ * a schedule task that will set the correct Link state once the
* NIC's PHY has stabilized after a state change.
*/
#ifdef INIT_TQUEUE
@@ -589,12 +724,12 @@ typedef struct s2io_nic {
struct work_struct set_link_task;
#endif
- /* Flag that can be used to turn on or turn off the Rx checksum
+ /* Flag that can be used to turn on or turn off the Rx checksum
* offload feature.
*/
int rx_csum;
- /* after blink, the adapter must be restored with original
+ /* after blink, the adapter must be restored with original
* values.
*/
u64 adapt_ctrl_org;
@@ -604,16 +739,19 @@ typedef struct s2io_nic {
#define LINK_DOWN 1
#define LINK_UP 2
-#ifdef CONFIG_2BUFF_MODE
- /* Buffer Address store. */
- buffAdd_t **ba[MAX_RX_RINGS];
-#endif
int task_flag;
#define CARD_DOWN 1
#define CARD_UP 2
atomic_t card_state;
volatile unsigned long link_state;
-} nic_t;
+ struct vlan_group *vlgrp;
+#define XFRAME_I_DEVICE 1
+#define XFRAME_II_DEVICE 2
+ u8 device_type;
+
+ spinlock_t rx_lock;
+ atomic_t isr_cnt;
+};
#define RESET_ERROR 1;
#define CMD_ERROR 2;
@@ -622,9 +760,10 @@ typedef struct s2io_nic {
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
- u64 ret = readl(addr + 4);
- ret <<= 32;
- ret |= readl(addr);
+ u64 ret = 0;
+ ret = readl(addr + 4);
+ (u64) ret <<= 32;
+ (u64) ret |= readl(addr);
return ret;
}
@@ -637,10 +776,10 @@ static inline void writeq(u64 val, void __iomem *addr)
writel((u32) (val >> 32), (addr + 4));
}
-/* In 32 bit modes, some registers have to be written in a
+/* In 32 bit modes, some registers have to be written in a
* particular order to expect correct hardware operation. The
- * macro SPECIAL_REG_WRITE is used to perform such ordered
- * writes. Defines UF (Upper First) and LF (Lower First) will
+ * macro SPECIAL_REG_WRITE is used to perform such ordered
+ * writes. Defines UF (Upper First) and LF (Lower First) will
* be used to specify the required write order.
*/
#define UF 1
@@ -716,6 +855,7 @@ static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate
PCC_FB_ECC Error. */
+#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
/*
* Prototype declaration.
*/
@@ -725,36 +865,30 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
static int init_shared_mem(struct s2io_nic *sp);
static void free_shared_mem(struct s2io_nic *sp);
static int init_nic(struct s2io_nic *nic);
-#ifndef CONFIG_S2IO_NAPI
-static void rx_intr_handler(struct s2io_nic *sp);
-#endif
-static void tx_intr_handler(struct s2io_nic *sp);
+static void rx_intr_handler(ring_info_t *ring_data);
+static void tx_intr_handler(fifo_info_t *fifo_data);
static void alarm_intr_handler(struct s2io_nic *sp);
static int s2io_starter(void);
-static void s2io_closer(void);
+void s2io_closer(void);
static void s2io_tx_watchdog(struct net_device *dev);
static void s2io_tasklet(unsigned long dev_addr);
static void s2io_set_multicast(struct net_device *dev);
-#ifndef CONFIG_2BUFF_MODE
-static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no);
-#else
-static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
- buffAdd_t * ba);
-#endif
-static void s2io_link(nic_t * sp, int link);
-static void s2io_reset(nic_t * sp);
-#ifdef CONFIG_S2IO_NAPI
+static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp);
+void s2io_link(nic_t * sp, int link);
+void s2io_reset(nic_t * sp);
+#if defined(CONFIG_S2IO_NAPI)
static int s2io_poll(struct net_device *dev, int *budget);
#endif
static void s2io_init_pci(nic_t * sp);
-static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
+int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
+static void s2io_alarm_handle(unsigned long data);
static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
-static int verify_xena_quiescence(u64 val64, int flag);
+static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
static struct ethtool_ops netdev_ethtool_ops;
static void s2io_set_link(unsigned long data);
-static int s2io_set_swapper(nic_t * sp);
-static void s2io_card_down(nic_t * nic);
-static int s2io_card_up(nic_t * nic);
-
+int s2io_set_swapper(nic_t * sp);
+static void s2io_card_down(nic_t *nic);
+static int s2io_card_up(nic_t *nic);
+int get_xena_rev_id(struct pci_dev *pdev);
#endif /* _S2IO_H */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index f15739481d6..d7c98515fdf 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "0.8"
+#define DRV_VERSION "0.9"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
@@ -79,8 +79,8 @@ static const struct pci_device_id skge_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
- { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032) },
{ PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -189,7 +189,7 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
{
u32 supported;
- if (iscopper(hw)) {
+ if (hw->copper) {
supported = SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half
@@ -222,7 +222,7 @@ static int skge_get_settings(struct net_device *dev,
ecmd->transceiver = XCVR_INTERNAL;
ecmd->supported = skge_supported_modes(hw);
- if (iscopper(hw)) {
+ if (hw->copper) {
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy_addr;
} else
@@ -876,6 +876,9 @@ static int skge_rx_fill(struct skge_port *skge)
static void skge_link_up(struct skge_port *skge)
{
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
+ LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
+
netif_carrier_on(skge->netdev);
if (skge->tx_avail > MAX_SKB_FRAGS + 1)
netif_wake_queue(skge->netdev);
@@ -894,6 +897,7 @@ static void skge_link_up(struct skge_port *skge)
static void skge_link_down(struct skge_port *skge)
{
+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
netif_carrier_off(skge->netdev);
netif_stop_queue(skge->netdev);
@@ -1599,7 +1603,7 @@ static void yukon_init(struct skge_hw *hw, int port)
adv = PHY_AN_CSMA;
if (skge->autoneg == AUTONEG_ENABLE) {
- if (iscopper(hw)) {
+ if (hw->copper) {
if (skge->advertising & ADVERTISED_1000baseT_Full)
ct1000 |= PHY_M_1000C_AFD;
if (skge->advertising & ADVERTISED_1000baseT_Half)
@@ -1691,7 +1695,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
/* Set hardware config mode */
reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
- reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
+ reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
/* Clear GMC reset */
skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
@@ -1780,7 +1784,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
reg &= ~GMF_RX_F_FL_ON;
skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
- skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
+ /*
+ * because Pause Packet Truncation in GMAC is not working
+ * we have to increase the Flush Threshold to 64 bytes
+ * in order to flush pause packets in Rx FIFO on Yukon-1
+ */
+ skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
/* Configure Tx MAC FIFO */
skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
@@ -2670,18 +2679,6 @@ static void skge_error_irq(struct skge_hw *hw)
/* Timestamp (unused) overflow */
if (hwstatus & IS_IRQ_TIST_OV)
skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
-
- if (hwstatus & IS_IRQ_SENSOR) {
- /* no sensors on 32-bit Yukon */
- if (!(skge_read16(hw, B0_CTST) & CS_BUS_SLOT_SZ)) {
- printk(KERN_ERR PFX "ignoring bogus sensor interrups\n");
- skge_write32(hw, B0_HWE_IMSK,
- IS_ERR_MSK & ~IS_IRQ_SENSOR);
- } else
- printk(KERN_WARNING PFX "sensor interrupt\n");
- }
-
-
}
if (hwstatus & IS_RAM_RD_PAR) {
@@ -2712,9 +2709,10 @@ static void skge_error_irq(struct skge_hw *hw)
skge_pci_clear(hw);
+ /* if error still set then just ignore it */
hwstatus = skge_read32(hw, B0_HWE_ISRC);
if (hwstatus & IS_IRQ_STAT) {
- printk(KERN_WARNING PFX "IRQ status %x: still set ignoring hardware errors\n",
+ pr_debug("IRQ status %x: still set ignoring hardware errors\n",
hwstatus);
hw->intr_mask &= ~IS_HW_ERR;
}
@@ -2876,7 +2874,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
static int skge_reset(struct skge_hw *hw)
{
u16 ctst;
- u8 t8, mac_cfg;
+ u8 t8, mac_cfg, pmd_type, phy_type;
int i;
ctst = skge_read16(hw, B0_CTST);
@@ -2895,18 +2893,19 @@ static int skge_reset(struct skge_hw *hw)
ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
hw->chip_id = skge_read8(hw, B2_CHIP_ID);
- hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
- hw->pmd_type = skge_read8(hw, B2_PMD_TYP);
+ phy_type = skge_read8(hw, B2_E_1) & 0xf;
+ pmd_type = skge_read8(hw, B2_PMD_TYP);
+ hw->copper = (pmd_type == 'T' || pmd_type == '1');
switch (hw->chip_id) {
case CHIP_ID_GENESIS:
- switch (hw->phy_type) {
+ switch (phy_type) {
case SK_PHY_BCOM:
hw->phy_addr = PHY_ADDR_BCOM;
break;
default:
printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
- pci_name(hw->pdev), hw->phy_type);
+ pci_name(hw->pdev), phy_type);
return -EOPNOTSUPP;
}
break;
@@ -2914,13 +2913,10 @@ static int skge_reset(struct skge_hw *hw)
case CHIP_ID_YUKON:
case CHIP_ID_YUKON_LITE:
case CHIP_ID_YUKON_LP:
- if (hw->phy_type < SK_PHY_MARV_COPPER && hw->pmd_type != 'S')
- hw->phy_type = SK_PHY_MARV_COPPER;
+ if (phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
+ hw->copper = 1;
hw->phy_addr = PHY_ADDR_MARV;
- if (!iscopper(hw))
- hw->phy_type = SK_PHY_MARV_FIBER;
-
break;
default:
@@ -2948,12 +2944,20 @@ static int skge_reset(struct skge_hw *hw)
else
hw->ram_size = t8 * 4096;
+ hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_init(hw);
else {
/* switch power to VCC (WA for VAUX problem) */
skge_write8(hw, B0_POWER_CTRL,
PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+ /* avoid boards with stuck Hardware error bits */
+ if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
+ (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
+ printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
+ hw->intr_mask &= ~IS_HW_ERR;
+ }
+
for (i = 0; i < hw->ports; i++) {
skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
@@ -2994,7 +2998,6 @@ static int skge_reset(struct skge_hw *hw)
skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
skge_write32(hw, B2_IRQM_CTRL, TIM_START);
- hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
if (hw->chip_id != CHIP_ID_GENESIS)
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index b432f1bb816..f1680beb8e6 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -214,8 +214,6 @@ enum {
/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
enum {
- IS_ERR_MSK = 0x00003fff,/* All Error bits */
-
IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
@@ -230,6 +228,12 @@ enum {
IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
+
+ IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT
+ | IS_NO_STAT_M1 | IS_NO_STAT_M2
+ | IS_RAM_RD_PAR | IS_RAM_WR_PAR
+ | IS_M1_PAR_ERR | IS_M2_PAR_ERR
+ | IS_R1_PAR_ERR | IS_R2_PAR_ERR,
};
/* B2_TST_CTRL1 8 bit Test Control Register 1 */
@@ -2456,24 +2460,17 @@ struct skge_hw {
u8 chip_id;
u8 chip_rev;
- u8 phy_type;
- u8 pmd_type;
- u16 phy_addr;
+ u8 copper;
u8 ports;
u32 ram_size;
u32 ram_offset;
+ u16 phy_addr;
struct tasklet_struct ext_tasklet;
spinlock_t phy_lock;
};
-
-static inline int iscopper(const struct skge_hw *hw)
-{
- return (hw->pmd_type == 'T');
-}
-
enum {
FLOW_MODE_NONE = 0, /* No Flow-Control */
FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 6d9dae60a69..ba8593ac3f8 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -68,6 +68,7 @@ static const char version[] =
#include <linux/etherdevice.h>
#include <asm/io.h>
+#include <asm/irq.h>
#include <asm/system.h>
#include "8390.h"
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index cdc9cc873e0..90b818a8de6 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -1,6 +1,11 @@
/*
* sonic.c
*
+ * (C) 2005 Finn Thain
+ *
+ * Converted to DMA API, added zero-copy buffer handling, and
+ * (from the mac68k project) introduced dhd's support for 16-bit cards.
+ *
* (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*
* This driver is based on work from Andreas Busse, but most of
@@ -9,12 +14,23 @@
* (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
*
* Core code included by system sonic drivers
+ *
+ * And... partially rewritten again by David Huggins-Daines in order
+ * to cope with screwed up Macintosh NICs that may or may not use
+ * 16-bit DMA.
+ *
+ * (C) 1999 David Huggins-Daines <dhd@debian.org>
+ *
*/
/*
* Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
* National Semiconductors data sheet for the DP83932B Sonic Ethernet
* controller, and the files "8390.c" and "skeleton.c" in this directory.
+ *
+ * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
+ * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
+ * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
*/
@@ -28,6 +44,9 @@
*/
static int sonic_open(struct net_device *dev)
{
+ struct sonic_local *lp = netdev_priv(dev);
+ int i;
+
if (sonic_debug > 2)
printk("sonic_open: initializing sonic driver.\n");
@@ -40,14 +59,59 @@ static int sonic_open(struct net_device *dev)
* This means that during execution of the handler interrupt are disabled
* covering another bug otherwise corrupting data. This doesn't mean
* this glue works ok under all situations.
+ *
+ * Note (dhd): this also appears to prevent lockups on the Macintrash
+ * when more than one Ethernet card is installed (knock on wood)
+ *
+ * Note (fthain): whether the above is still true is anyones guess. Certainly
+ * the buffer handling algorithms will not tolerate re-entrance without some
+ * mutual exclusion added. Anyway, the memcpy has now been eliminated from the
+ * rx code to make this a faster "fast interrupt".
*/
-// if (sonic_request_irq(dev->irq, &sonic_interrupt, 0, "sonic", dev)) {
- if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT,
- "sonic", dev)) {
- printk("\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
+ if (request_irq(dev->irq, &sonic_interrupt, SONIC_IRQ_FLAG, "sonic", dev)) {
+ printk(KERN_ERR "\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
return -EAGAIN;
}
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
+ if (skb == NULL) {
+ while(i > 0) { /* free any that were allocated successfully */
+ i--;
+ dev_kfree_skb(lp->rx_skb[i]);
+ lp->rx_skb[i] = NULL;
+ }
+ printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ skb->dev = dev;
+ /* align IP header unless DMA requires otherwise */
+ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(skb, 2);
+ lp->rx_skb[i] = skb;
+ }
+
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!laddr) {
+ while(i > 0) { /* free any that were mapped successfully */
+ i--;
+ dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
+ lp->rx_laddr[i] = (dma_addr_t)0;
+ }
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ dev_kfree_skb(lp->rx_skb[i]);
+ lp->rx_skb[i] = NULL;
+ }
+ printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ lp->rx_laddr[i] = laddr;
+ }
+
/*
* Initialize the SONIC
*/
@@ -67,7 +131,8 @@ static int sonic_open(struct net_device *dev)
*/
static int sonic_close(struct net_device *dev)
{
- unsigned int base_addr = dev->base_addr;
+ struct sonic_local *lp = netdev_priv(dev);
+ int i;
if (sonic_debug > 2)
printk("sonic_close\n");
@@ -77,20 +142,56 @@ static int sonic_close(struct net_device *dev)
/*
* stop the SONIC, disable interrupts
*/
- SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
- sonic_free_irq(dev->irq, dev); /* release the IRQ */
+ /* unmap and free skbs that haven't been transmitted */
+ for (i = 0; i < SONIC_NUM_TDS; i++) {
+ if(lp->tx_laddr[i]) {
+ dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
+ lp->tx_laddr[i] = (dma_addr_t)0;
+ }
+ if(lp->tx_skb[i]) {
+ dev_kfree_skb(lp->tx_skb[i]);
+ lp->tx_skb[i] = NULL;
+ }
+ }
+
+ /* unmap and free the receive buffers */
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ if(lp->rx_laddr[i]) {
+ dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
+ lp->rx_laddr[i] = (dma_addr_t)0;
+ }
+ if(lp->rx_skb[i]) {
+ dev_kfree_skb(lp->rx_skb[i]);
+ lp->rx_skb[i] = NULL;
+ }
+ }
+
+ free_irq(dev->irq, dev); /* release the IRQ */
return 0;
}
static void sonic_tx_timeout(struct net_device *dev)
{
- struct sonic_local *lp = (struct sonic_local *) dev->priv;
- printk("%s: transmit timed out.\n", dev->name);
-
+ struct sonic_local *lp = netdev_priv(dev);
+ int i;
+ /* Stop the interrupts for this */
+ SONIC_WRITE(SONIC_IMR, 0);
+ /* We could resend the original skbs. Easier to re-initialise. */
+ for (i = 0; i < SONIC_NUM_TDS; i++) {
+ if(lp->tx_laddr[i]) {
+ dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
+ lp->tx_laddr[i] = (dma_addr_t)0;
+ }
+ if(lp->tx_skb[i]) {
+ dev_kfree_skb(lp->tx_skb[i]);
+ lp->tx_skb[i] = NULL;
+ }
+ }
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
@@ -100,60 +201,92 @@ static void sonic_tx_timeout(struct net_device *dev)
/*
* transmit packet
+ *
+ * Appends new TD during transmission thus avoiding any TX interrupts
+ * until we run out of TDs.
+ * This routine interacts closely with the ISR in that it may,
+ * set tx_skb[i]
+ * reset the status flags of the new TD
+ * set and reset EOL flags
+ * stop the tx queue
+ * The ISR interacts with this routine in various ways. It may,
+ * reset tx_skb[i]
+ * test the EOL and status flags of the TDs
+ * wake the tx queue
+ * Concurrently with all of this, the SONIC is potentially writing to
+ * the status flags of the TDs.
+ * Until some mutual exclusion is added, this code will not work with SMP. However,
+ * MIPS Jazz machines and m68k Macs were all uni-processor machines.
*/
+
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
- struct sonic_local *lp = (struct sonic_local *) dev->priv;
- unsigned int base_addr = dev->base_addr;
- unsigned int laddr;
- int entry, length;
-
- netif_stop_queue(dev);
+ struct sonic_local *lp = netdev_priv(dev);
+ dma_addr_t laddr;
+ int length;
+ int entry = lp->next_tx;
if (sonic_debug > 2)
printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
+ length = skb->len;
+ if (length < ETH_ZLEN) {
+ skb = skb_padto(skb, ETH_ZLEN);
+ if (skb == NULL)
+ return 0;
+ length = ETH_ZLEN;
+ }
+
/*
* Map the packet data into the logical DMA address space
*/
- if ((laddr = vdma_alloc(CPHYSADDR(skb->data), skb->len)) == ~0UL) {
- printk("%s: no VDMA entry for transmit available.\n",
- dev->name);
+
+ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
+ if (!laddr) {
+ printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb(skb);
- netif_start_queue(dev);
return 1;
}
- entry = lp->cur_tx & SONIC_TDS_MASK;
+
+ sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
+ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
+ sonic_tda_put(dev, entry, SONIC_TD_LINK,
+ sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
+
+ /*
+ * Must set tx_skb[entry] only after clearing status, and
+ * before clearing EOL and before stopping queue
+ */
+ wmb();
+ lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
lp->tx_skb[entry] = skb;
- length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
- flush_cache_all();
+ wmb();
+ sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
+ sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
+ lp->eol_tx = entry;
- /*
- * Setup the transmit descriptor and issue the transmit command.
- */
- lp->tda[entry].tx_status = 0; /* clear status */
- lp->tda[entry].tx_frag_count = 1; /* single fragment */
- lp->tda[entry].tx_pktsize = length; /* length of packet */
- lp->tda[entry].tx_frag_ptr_l = laddr & 0xffff;
- lp->tda[entry].tx_frag_ptr_h = laddr >> 16;
- lp->tda[entry].tx_frag_size = length;
- lp->cur_tx++;
- lp->stats.tx_bytes += length;
+ lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
+ if (lp->tx_skb[lp->next_tx] != NULL) {
+ /* The ring is full, the ISR has yet to process the next TD. */
+ if (sonic_debug > 3)
+ printk("%s: stopping queue\n", dev->name);
+ netif_stop_queue(dev);
+ /* after this packet, wait for ISR to free up some TDAs */
+ } else netif_start_queue(dev);
if (sonic_debug > 2)
- printk("sonic_send_packet: issueing Tx command\n");
+ printk("sonic_send_packet: issuing Tx command\n");
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
dev->trans_start = jiffies;
- if (lp->cur_tx < lp->dirty_tx + SONIC_NUM_TDS)
- netif_start_queue(dev);
- else
- lp->tx_full = 1;
-
return 0;
}
@@ -164,175 +297,199 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
static irqreturn_t sonic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
- unsigned int base_addr = dev->base_addr;
- struct sonic_local *lp;
+ struct sonic_local *lp = netdev_priv(dev);
int status;
if (dev == NULL) {
- printk("sonic_interrupt: irq %d for unknown device.\n", irq);
+ printk(KERN_ERR "sonic_interrupt: irq %d for unknown device.\n", irq);
return IRQ_NONE;
}
- lp = (struct sonic_local *) dev->priv;
-
- status = SONIC_READ(SONIC_ISR);
- SONIC_WRITE(SONIC_ISR, 0x7fff); /* clear all bits */
-
- if (sonic_debug > 2)
- printk("sonic_interrupt: ISR=%x\n", status);
-
- if (status & SONIC_INT_PKTRX) {
- sonic_rx(dev); /* got packet(s) */
- }
-
- if (status & SONIC_INT_TXDN) {
- int dirty_tx = lp->dirty_tx;
-
- while (dirty_tx < lp->cur_tx) {
- int entry = dirty_tx & SONIC_TDS_MASK;
- int status = lp->tda[entry].tx_status;
+ if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
+ return IRQ_NONE;
- if (sonic_debug > 3)
- printk
- ("sonic_interrupt: status %d, cur_tx %d, dirty_tx %d\n",
- status, lp->cur_tx, lp->dirty_tx);
+ do {
+ if (status & SONIC_INT_PKTRX) {
+ if (sonic_debug > 2)
+ printk("%s: packet rx\n", dev->name);
+ sonic_rx(dev); /* got packet(s) */
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
+ }
- if (status == 0) {
- /* It still hasn't been Txed, kick the sonic again */
- SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
- break;
- }
+ if (status & SONIC_INT_TXDN) {
+ int entry = lp->cur_tx;
+ int td_status;
+ int freed_some = 0;
- /* put back EOL and free descriptor */
- lp->tda[entry].tx_frag_count = 0;
- lp->tda[entry].tx_status = 0;
-
- if (status & 0x0001)
- lp->stats.tx_packets++;
- else {
- lp->stats.tx_errors++;
- if (status & 0x0642)
- lp->stats.tx_aborted_errors++;
- if (status & 0x0180)
- lp->stats.tx_carrier_errors++;
- if (status & 0x0020)
- lp->stats.tx_window_errors++;
- if (status & 0x0004)
- lp->stats.tx_fifo_errors++;
- }
+ /* At this point, cur_tx is the index of a TD that is one of:
+ * unallocated/freed (status set & tx_skb[entry] clear)
+ * allocated and sent (status set & tx_skb[entry] set )
+ * allocated and not yet sent (status clear & tx_skb[entry] set )
+ * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+ */
- /* We must free the original skb */
- if (lp->tx_skb[entry]) {
+ if (sonic_debug > 2)
+ printk("%s: tx done\n", dev->name);
+
+ while (lp->tx_skb[entry] != NULL) {
+ if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
+ break;
+
+ if (td_status & 0x0001) {
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
+ } else {
+ lp->stats.tx_errors++;
+ if (td_status & 0x0642)
+ lp->stats.tx_aborted_errors++;
+ if (td_status & 0x0180)
+ lp->stats.tx_carrier_errors++;
+ if (td_status & 0x0020)
+ lp->stats.tx_window_errors++;
+ if (td_status & 0x0004)
+ lp->stats.tx_fifo_errors++;
+ }
+
+ /* We must free the original skb */
dev_kfree_skb_irq(lp->tx_skb[entry]);
- lp->tx_skb[entry] = 0;
+ lp->tx_skb[entry] = NULL;
+ /* and unmap DMA buffer */
+ dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
+ lp->tx_laddr[entry] = (dma_addr_t)0;
+ freed_some = 1;
+
+ if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
+ entry = (entry + 1) & SONIC_TDS_MASK;
+ break;
+ }
+ entry = (entry + 1) & SONIC_TDS_MASK;
}
- /* and the VDMA address */
- vdma_free(lp->tx_laddr[entry]);
- dirty_tx++;
- }
- if (lp->tx_full
- && dirty_tx + SONIC_NUM_TDS > lp->cur_tx + 2) {
- /* The ring is no longer full, clear tbusy. */
- lp->tx_full = 0;
- netif_wake_queue(dev);
+ if (freed_some || lp->tx_skb[entry] == NULL)
+ netif_wake_queue(dev); /* The ring is no longer full */
+ lp->cur_tx = entry;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
}
- lp->dirty_tx = dirty_tx;
- }
+ /*
+ * check error conditions
+ */
+ if (status & SONIC_INT_RFO) {
+ if (sonic_debug > 1)
+ printk("%s: rx fifo overrun\n", dev->name);
+ lp->stats.rx_fifo_errors++;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_RDE) {
+ if (sonic_debug > 1)
+ printk("%s: rx descriptors exhausted\n", dev->name);
+ lp->stats.rx_dropped++;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_RBAE) {
+ if (sonic_debug > 1)
+ printk("%s: rx buffer area exceeded\n", dev->name);
+ lp->stats.rx_dropped++;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
+ }
- /*
- * check error conditions
- */
- if (status & SONIC_INT_RFO) {
- printk("%s: receive fifo underrun\n", dev->name);
- lp->stats.rx_fifo_errors++;
- }
- if (status & SONIC_INT_RDE) {
- printk("%s: receive descriptors exhausted\n", dev->name);
- lp->stats.rx_dropped++;
- }
- if (status & SONIC_INT_RBE) {
- printk("%s: receive buffer exhausted\n", dev->name);
- lp->stats.rx_dropped++;
- }
- if (status & SONIC_INT_RBAE) {
- printk("%s: receive buffer area exhausted\n", dev->name);
- lp->stats.rx_dropped++;
- }
+ /* counter overruns; all counters are 16bit wide */
+ if (status & SONIC_INT_FAE) {
+ lp->stats.rx_frame_errors += 65536;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_CRC) {
+ lp->stats.rx_crc_errors += 65536;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
+ }
+ if (status & SONIC_INT_MP) {
+ lp->stats.rx_missed_errors += 65536;
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
+ }
- /* counter overruns; all counters are 16bit wide */
- if (status & SONIC_INT_FAE)
- lp->stats.rx_frame_errors += 65536;
- if (status & SONIC_INT_CRC)
- lp->stats.rx_crc_errors += 65536;
- if (status & SONIC_INT_MP)
- lp->stats.rx_missed_errors += 65536;
+ /* transmit error */
+ if (status & SONIC_INT_TXER) {
+ if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2))
+ printk(KERN_ERR "%s: tx fifo underrun\n", dev->name);
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+ }
- /* transmit error */
- if (status & SONIC_INT_TXER)
- lp->stats.tx_errors++;
+ /* bus retry */
+ if (status & SONIC_INT_BR) {
+ printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
+ dev->name);
+ /* ... to help debug DMA problems causing endless interrupts. */
+ /* Bounce the eth interface to turn on the interrupt again. */
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
+ }
- /*
- * clear interrupt bits and return
- */
- SONIC_WRITE(SONIC_ISR, status);
+ /* load CAM done */
+ if (status & SONIC_INT_LCD)
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
+ } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
return IRQ_HANDLED;
}
/*
- * We have a good packet(s), get it/them out of the buffers.
+ * We have a good packet(s), pass it/them up the network stack.
*/
static void sonic_rx(struct net_device *dev)
{
- unsigned int base_addr = dev->base_addr;
- struct sonic_local *lp = (struct sonic_local *) dev->priv;
- sonic_rd_t *rd = &lp->rda[lp->cur_rx & SONIC_RDS_MASK];
+ struct sonic_local *lp = netdev_priv(dev);
int status;
-
- while (rd->in_use == 0) {
- struct sk_buff *skb;
+ int entry = lp->cur_rx;
+
+ while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
+ struct sk_buff *used_skb;
+ struct sk_buff *new_skb;
+ dma_addr_t new_laddr;
+ u16 bufadr_l;
+ u16 bufadr_h;
int pkt_len;
- unsigned char *pkt_ptr;
- status = rd->rx_status;
- if (sonic_debug > 3)
- printk("status %x, cur_rx %d, cur_rra %x\n",
- status, lp->cur_rx, lp->cur_rra);
+ status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
if (status & SONIC_RCR_PRX) {
- pkt_len = rd->rx_pktlen;
- pkt_ptr =
- (char *)
- sonic_chiptomem((rd->rx_pktptr_h << 16) +
- rd->rx_pktptr_l);
-
- if (sonic_debug > 3)
- printk
- ("pktptr %p (rba %p) h:%x l:%x, bsize h:%x l:%x\n",
- pkt_ptr, lp->rba, rd->rx_pktptr_h,
- rd->rx_pktptr_l,
- SONIC_READ(SONIC_RBWC1),
- SONIC_READ(SONIC_RBWC0));
-
/* Malloc up new buffer. */
- skb = dev_alloc_skb(pkt_len + 2);
- if (skb == NULL) {
- printk
- ("%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
+ if (new_skb == NULL) {
+ printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ new_skb->dev = dev;
+ /* provide 16 byte IP header alignment unless DMA requires otherwise */
+ if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+ skb_reserve(new_skb, 2);
+
+ new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
+ SONIC_RBSIZE, DMA_FROM_DEVICE);
+ if (!new_laddr) {
+ dev_kfree_skb(new_skb);
+ printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
break;
}
- skb->dev = dev;
- skb_reserve(skb, 2); /* 16 byte align */
- skb_put(skb, pkt_len); /* Make room */
- eth_copy_and_sum(skb, pkt_ptr, pkt_len, 0);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb); /* pass the packet to upper layers */
+
+ /* now we have a new skb to replace it, pass the used one up the stack */
+ dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
+ used_skb = lp->rx_skb[entry];
+ pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
+ skb_trim(used_skb, pkt_len);
+ used_skb->protocol = eth_type_trans(used_skb, dev);
+ netif_rx(used_skb);
dev->last_rx = jiffies;
lp->stats.rx_packets++;
lp->stats.rx_bytes += pkt_len;
+ /* and insert the new skb */
+ lp->rx_laddr[entry] = new_laddr;
+ lp->rx_skb[entry] = new_skb;
+
+ bufadr_l = (unsigned long)new_laddr & 0xffff;
+ bufadr_h = (unsigned long)new_laddr >> 16;
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
+ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
} else {
/* This should only happen, if we enable accepting broken packets. */
lp->stats.rx_errors++;
@@ -341,29 +498,35 @@ static void sonic_rx(struct net_device *dev)
if (status & SONIC_RCR_CRCR)
lp->stats.rx_crc_errors++;
}
-
- rd->in_use = 1;
- rd = &lp->rda[(++lp->cur_rx) & SONIC_RDS_MASK];
- /* now give back the buffer to the receive buffer area */
if (status & SONIC_RCR_LPKT) {
/*
- * this was the last packet out of the current receice buffer
+ * this was the last packet out of the current receive buffer
* give the buffer back to the SONIC
*/
- lp->cur_rra += sizeof(sonic_rr_t);
- if (lp->cur_rra >
- (lp->rra_laddr +
- (SONIC_NUM_RRS -
- 1) * sizeof(sonic_rr_t))) lp->cur_rra =
- lp->rra_laddr;
- SONIC_WRITE(SONIC_RWP, lp->cur_rra & 0xffff);
+ lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+ if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
+ SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+ if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
+ if (sonic_debug > 2)
+ printk("%s: rx buffer exhausted\n", dev->name);
+ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
+ }
} else
- printk
- ("%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
+ printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
dev->name);
+ /*
+ * give back the descriptor
+ */
+ sonic_rda_put(dev, entry, SONIC_RD_LINK,
+ sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+ sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
+ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
+ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
+ lp->eol_rx = entry;
+ lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
}
/*
- * If any worth-while packets have been received, dev_rint()
+ * If any worth-while packets have been received, netif_rx()
* has done a mark_bh(NET_BH) for us and will work on them
* when we get to the bottom-half routine.
*/
@@ -376,8 +539,7 @@ static void sonic_rx(struct net_device *dev)
*/
static struct net_device_stats *sonic_get_stats(struct net_device *dev)
{
- struct sonic_local *lp = (struct sonic_local *) dev->priv;
- unsigned int base_addr = dev->base_addr;
+ struct sonic_local *lp = netdev_priv(dev);
/* read the tally counter from the SONIC and reset them */
lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
@@ -396,8 +558,7 @@ static struct net_device_stats *sonic_get_stats(struct net_device *dev)
*/
static void sonic_multicast_list(struct net_device *dev)
{
- struct sonic_local *lp = (struct sonic_local *) dev->priv;
- unsigned int base_addr = dev->base_addr;
+ struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
struct dev_mc_list *dmi = dev->mc_list;
unsigned char *addr;
@@ -413,20 +574,15 @@ static void sonic_multicast_list(struct net_device *dev)
rcr |= SONIC_RCR_AMC;
} else {
if (sonic_debug > 2)
- printk
- ("sonic_multicast_list: mc_count %d\n",
- dev->mc_count);
- lp->cda.cam_enable = 1; /* always enable our own address */
+ printk("sonic_multicast_list: mc_count %d\n", dev->mc_count);
+ sonic_set_cam_enable(dev, 1); /* always enable our own address */
for (i = 1; i <= dev->mc_count; i++) {
addr = dmi->dmi_addr;
dmi = dmi->next;
- lp->cda.cam_desc[i].cam_cap0 =
- addr[1] << 8 | addr[0];
- lp->cda.cam_desc[i].cam_cap1 =
- addr[3] << 8 | addr[2];
- lp->cda.cam_desc[i].cam_cap2 =
- addr[5] << 8 | addr[4];
- lp->cda.cam_enable |= (1 << i);
+ sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
+ sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
+ sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
+ sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
}
SONIC_WRITE(SONIC_CDC, 16);
/* issue Load CAM command */
@@ -447,19 +603,16 @@ static void sonic_multicast_list(struct net_device *dev)
*/
static int sonic_init(struct net_device *dev)
{
- unsigned int base_addr = dev->base_addr;
unsigned int cmd;
- struct sonic_local *lp = (struct sonic_local *) dev->priv;
- unsigned int rra_start;
- unsigned int rra_end;
+ struct sonic_local *lp = netdev_priv(dev);
int i;
/*
* put the Sonic into software-reset mode and
* disable all interrupts
*/
- SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
/*
@@ -475,34 +628,32 @@ static int sonic_init(struct net_device *dev)
if (sonic_debug > 2)
printk("sonic_init: initialize receive resource area\n");
- rra_start = lp->rra_laddr & 0xffff;
- rra_end =
- (rra_start + (SONIC_NUM_RRS * sizeof(sonic_rr_t))) & 0xffff;
-
for (i = 0; i < SONIC_NUM_RRS; i++) {
- lp->rra[i].rx_bufadr_l =
- (lp->rba_laddr + i * SONIC_RBSIZE) & 0xffff;
- lp->rra[i].rx_bufadr_h =
- (lp->rba_laddr + i * SONIC_RBSIZE) >> 16;
- lp->rra[i].rx_bufsize_l = SONIC_RBSIZE >> 1;
- lp->rra[i].rx_bufsize_h = 0;
+ u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
+ u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
+ sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
+ sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
+ sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
+ sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
}
/* initialize all RRA registers */
- SONIC_WRITE(SONIC_RSA, rra_start);
- SONIC_WRITE(SONIC_REA, rra_end);
- SONIC_WRITE(SONIC_RRP, rra_start);
- SONIC_WRITE(SONIC_RWP, rra_end);
+ lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
+ SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
+ lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
+ SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
+
+ SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
+ SONIC_WRITE(SONIC_REA, lp->rra_end);
+ SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
+ SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
- SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE - 2) >> 1);
-
- lp->cur_rra =
- lp->rra_laddr + (SONIC_NUM_RRS - 1) * sizeof(sonic_rr_t);
+ SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
/* load the resource pointers */
if (sonic_debug > 3)
- printk("sonic_init: issueing RRRA command\n");
-
+ printk("sonic_init: issuing RRRA command\n");
+
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
i = 0;
while (i++ < 100) {
@@ -511,27 +662,30 @@ static int sonic_init(struct net_device *dev)
}
if (sonic_debug > 2)
- printk("sonic_init: status=%x\n", SONIC_READ(SONIC_CMD));
-
+ printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i);
+
/*
* Initialize the receive descriptors so that they
* become a circular linked list, ie. let the last
* descriptor point to the first again.
*/
if (sonic_debug > 2)
- printk("sonic_init: initialize receive descriptors\n");
- for (i = 0; i < SONIC_NUM_RDS; i++) {
- lp->rda[i].rx_status = 0;
- lp->rda[i].rx_pktlen = 0;
- lp->rda[i].rx_pktptr_l = 0;
- lp->rda[i].rx_pktptr_h = 0;
- lp->rda[i].rx_seqno = 0;
- lp->rda[i].in_use = 1;
- lp->rda[i].link =
- lp->rda_laddr + (i + 1) * sizeof(sonic_rd_t);
+ printk("sonic_init: initialize receive descriptors\n");
+ for (i=0; i<SONIC_NUM_RDS; i++) {
+ sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
+ sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
+ sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
+ sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
+ sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
+ sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
+ sonic_rda_put(dev, i, SONIC_RD_LINK,
+ lp->rda_laddr +
+ ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
}
/* fix last descriptor */
- lp->rda[SONIC_NUM_RDS - 1].link = lp->rda_laddr;
+ sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
+ (lp->rda_laddr & 0xffff) | SONIC_EOL);
+ lp->eol_rx = SONIC_NUM_RDS - 1;
lp->cur_rx = 0;
SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
@@ -542,34 +696,34 @@ static int sonic_init(struct net_device *dev)
if (sonic_debug > 2)
printk("sonic_init: initialize transmit descriptors\n");
for (i = 0; i < SONIC_NUM_TDS; i++) {
- lp->tda[i].tx_status = 0;
- lp->tda[i].tx_config = 0;
- lp->tda[i].tx_pktsize = 0;
- lp->tda[i].tx_frag_count = 0;
- lp->tda[i].link =
- (lp->tda_laddr +
- (i + 1) * sizeof(sonic_td_t)) | SONIC_END_OF_LINKS;
+ sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
+ sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
+ sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
+ sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
+ sonic_tda_put(dev, i, SONIC_TD_LINK,
+ (lp->tda_laddr & 0xffff) +
+ (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->tx_skb[i] = NULL;
}
- lp->tda[SONIC_NUM_TDS - 1].link =
- (lp->tda_laddr & 0xffff) | SONIC_END_OF_LINKS;
+ /* fix last descriptor */
+ sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
+ (lp->tda_laddr & 0xffff));
SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
- lp->cur_tx = lp->dirty_tx = 0;
-
+ lp->cur_tx = lp->next_tx = 0;
+ lp->eol_tx = SONIC_NUM_TDS - 1;
+
/*
* put our own address to CAM desc[0]
*/
- lp->cda.cam_desc[0].cam_cap0 =
- dev->dev_addr[1] << 8 | dev->dev_addr[0];
- lp->cda.cam_desc[0].cam_cap1 =
- dev->dev_addr[3] << 8 | dev->dev_addr[2];
- lp->cda.cam_desc[0].cam_cap2 =
- dev->dev_addr[5] << 8 | dev->dev_addr[4];
- lp->cda.cam_enable = 1;
+ sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
+ sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
+ sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
+ sonic_set_cam_enable(dev, 1);
for (i = 0; i < 16; i++)
- lp->cda.cam_desc[i].cam_entry_pointer = i;
+ sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
/*
* initialize CAM registers
@@ -588,8 +742,8 @@ static int sonic_init(struct net_device *dev)
break;
}
if (sonic_debug > 2) {
- printk("sonic_init: CMD=%x, ISR=%x\n",
- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR));
+ printk("sonic_init: CMD=%x, ISR=%x\n, i=%d",
+ SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
}
/*
@@ -604,7 +758,7 @@ static int sonic_init(struct net_device *dev)
cmd = SONIC_READ(SONIC_CMD);
if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
- printk("sonic_init: failed, status=%x\n", cmd);
+ printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
if (sonic_debug > 2)
printk("sonic_init: new status=%x\n",
diff --git a/drivers/net/sonic.h b/drivers/net/sonic.h
index c4a6d58e4af..cede969a8ba 100644
--- a/drivers/net/sonic.h
+++ b/drivers/net/sonic.h
@@ -1,5 +1,5 @@
/*
- * Helpfile for sonic.c
+ * Header file for sonic.c
*
* (C) Waldorf Electronics, Germany
* Written by Andreas Busse
@@ -9,10 +9,16 @@
* and pad structure members must be exchanged. Also, the structures
* need to be changed accordingly to the bus size.
*
- * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian),
- * see CONFIG_MACSONIC branch below.
+ * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian)
*
+ * 990611 David Huggins-Daines <dhd@debian.org>: This machine abstraction
+ * does not cope with 16-bit bus sizes very well. Therefore I have
+ * rewritten it with ugly macros and evil inlines.
+ *
+ * 050625 Finn Thain: introduced more 32-bit cards and dhd's support
+ * for 16-bit cards (from the mac68k project).
*/
+
#ifndef SONIC_H
#define SONIC_H
@@ -83,6 +89,7 @@
/*
* Error counters
*/
+
#define SONIC_CRCT 0x2c
#define SONIC_FAET 0x2d
#define SONIC_MPT 0x2e
@@ -182,14 +189,14 @@
#define SONIC_INT_BR 0x4000
#define SONIC_INT_HBL 0x2000
-#define SONIC_INT_LCD 0x1000
-#define SONIC_INT_PINT 0x0800
-#define SONIC_INT_PKTRX 0x0400
-#define SONIC_INT_TXDN 0x0200
-#define SONIC_INT_TXER 0x0100
-#define SONIC_INT_TC 0x0080
-#define SONIC_INT_RDE 0x0040
-#define SONIC_INT_RBE 0x0020
+#define SONIC_INT_LCD 0x1000
+#define SONIC_INT_PINT 0x0800
+#define SONIC_INT_PKTRX 0x0400
+#define SONIC_INT_TXDN 0x0200
+#define SONIC_INT_TXER 0x0100
+#define SONIC_INT_TC 0x0080
+#define SONIC_INT_RDE 0x0040
+#define SONIC_INT_RBE 0x0020
#define SONIC_INT_RBAE 0x0010
#define SONIC_INT_CRC 0x0008
#define SONIC_INT_FAE 0x0004
@@ -201,224 +208,61 @@
* The interrupts we allow.
*/
-#define SONIC_IMR_DEFAULT (SONIC_INT_BR | \
- SONIC_INT_LCD | \
- SONIC_INT_PINT | \
+#define SONIC_IMR_DEFAULT ( SONIC_INT_BR | \
+ SONIC_INT_LCD | \
+ SONIC_INT_RFO | \
SONIC_INT_PKTRX | \
SONIC_INT_TXDN | \
SONIC_INT_TXER | \
SONIC_INT_RDE | \
- SONIC_INT_RBE | \
SONIC_INT_RBAE | \
SONIC_INT_CRC | \
SONIC_INT_FAE | \
SONIC_INT_MP)
-#define SONIC_END_OF_LINKS 0x0001
-
-
-#ifdef CONFIG_MACSONIC
-/*
- * Big endian like structures on 680x0 Macs
- */
-
-typedef struct {
- u32 rx_bufadr_l; /* receive buffer ptr */
- u32 rx_bufadr_h;
-
- u32 rx_bufsize_l; /* no. of words in the receive buffer */
- u32 rx_bufsize_h;
-} sonic_rr_t;
-
-/*
- * Sonic receive descriptor. Receive descriptors are
- * kept in a linked list of these structures.
- */
-
-typedef struct {
- SREGS_PAD(pad0);
- u16 rx_status; /* status after reception of a packet */
- SREGS_PAD(pad1);
- u16 rx_pktlen; /* length of the packet incl. CRC */
-
- /*
- * Pointers to the location in the receive buffer area (RBA)
- * where the packet resides. A packet is always received into
- * a contiguous piece of memory.
- */
- SREGS_PAD(pad2);
- u16 rx_pktptr_l;
- SREGS_PAD(pad3);
- u16 rx_pktptr_h;
-
- SREGS_PAD(pad4);
- u16 rx_seqno; /* sequence no. */
-
- SREGS_PAD(pad5);
- u16 link; /* link to next RDD (end if EOL bit set) */
-
- /*
- * Owner of this descriptor, 0= driver, 1=sonic
- */
-
- SREGS_PAD(pad6);
- u16 in_use;
-
- caddr_t rda_next; /* pointer to next RD */
-} sonic_rd_t;
-
-
-/*
- * Describes a Transmit Descriptor
- */
-typedef struct {
- SREGS_PAD(pad0);
- u16 tx_status; /* status after transmission of a packet */
- SREGS_PAD(pad1);
- u16 tx_config; /* transmit configuration for this packet */
- SREGS_PAD(pad2);
- u16 tx_pktsize; /* size of the packet to be transmitted */
- SREGS_PAD(pad3);
- u16 tx_frag_count; /* no. of fragments */
-
- SREGS_PAD(pad4);
- u16 tx_frag_ptr_l;
- SREGS_PAD(pad5);
- u16 tx_frag_ptr_h;
- SREGS_PAD(pad6);
- u16 tx_frag_size;
-
- SREGS_PAD(pad7);
- u16 link; /* ptr to next descriptor */
-} sonic_td_t;
-
-
-/*
- * Describes an entry in the CAM Descriptor Area.
- */
-
-typedef struct {
- SREGS_PAD(pad0);
- u16 cam_entry_pointer;
- SREGS_PAD(pad1);
- u16 cam_cap0;
- SREGS_PAD(pad2);
- u16 cam_cap1;
- SREGS_PAD(pad3);
- u16 cam_cap2;
-} sonic_cd_t;
-
+#define SONIC_EOL 0x0001
#define CAM_DESCRIPTORS 16
-
-typedef struct {
- sonic_cd_t cam_desc[CAM_DESCRIPTORS];
- SREGS_PAD(pad);
- u16 cam_enable;
-} sonic_cda_t;
-
-#else /* original declarations, little endian 32 bit */
-
-/*
- * structure definitions
- */
-
-typedef struct {
- u32 rx_bufadr_l; /* receive buffer ptr */
- u32 rx_bufadr_h;
-
- u32 rx_bufsize_l; /* no. of words in the receive buffer */
- u32 rx_bufsize_h;
-} sonic_rr_t;
-
-/*
- * Sonic receive descriptor. Receive descriptors are
- * kept in a linked list of these structures.
- */
-
-typedef struct {
- u16 rx_status; /* status after reception of a packet */
- SREGS_PAD(pad0);
- u16 rx_pktlen; /* length of the packet incl. CRC */
- SREGS_PAD(pad1);
-
- /*
- * Pointers to the location in the receive buffer area (RBA)
- * where the packet resides. A packet is always received into
- * a contiguous piece of memory.
- */
- u16 rx_pktptr_l;
- SREGS_PAD(pad2);
- u16 rx_pktptr_h;
- SREGS_PAD(pad3);
-
- u16 rx_seqno; /* sequence no. */
- SREGS_PAD(pad4);
-
- u16 link; /* link to next RDD (end if EOL bit set) */
- SREGS_PAD(pad5);
-
- /*
- * Owner of this descriptor, 0= driver, 1=sonic
- */
-
- u16 in_use;
- SREGS_PAD(pad6);
-
- caddr_t rda_next; /* pointer to next RD */
-} sonic_rd_t;
-
-
-/*
- * Describes a Transmit Descriptor
- */
-typedef struct {
- u16 tx_status; /* status after transmission of a packet */
- SREGS_PAD(pad0);
- u16 tx_config; /* transmit configuration for this packet */
- SREGS_PAD(pad1);
- u16 tx_pktsize; /* size of the packet to be transmitted */
- SREGS_PAD(pad2);
- u16 tx_frag_count; /* no. of fragments */
- SREGS_PAD(pad3);
-
- u16 tx_frag_ptr_l;
- SREGS_PAD(pad4);
- u16 tx_frag_ptr_h;
- SREGS_PAD(pad5);
- u16 tx_frag_size;
- SREGS_PAD(pad6);
-
- u16 link; /* ptr to next descriptor */
- SREGS_PAD(pad7);
-} sonic_td_t;
-
-
-/*
- * Describes an entry in the CAM Descriptor Area.
- */
-
-typedef struct {
- u16 cam_entry_pointer;
- SREGS_PAD(pad0);
- u16 cam_cap0;
- SREGS_PAD(pad1);
- u16 cam_cap1;
- SREGS_PAD(pad2);
- u16 cam_cap2;
- SREGS_PAD(pad3);
-} sonic_cd_t;
-
-#define CAM_DESCRIPTORS 16
-
-
-typedef struct {
- sonic_cd_t cam_desc[CAM_DESCRIPTORS];
- u16 cam_enable;
- SREGS_PAD(pad);
-} sonic_cda_t;
-#endif /* endianness */
+/* Offsets in the various DMA buffers accessed by the SONIC */
+
+#define SONIC_BITMODE16 0
+#define SONIC_BITMODE32 1
+#define SONIC_BUS_SCALE(bitmode) ((bitmode) ? 4 : 2)
+/* Note! These are all measured in bus-size units, so use SONIC_BUS_SCALE */
+#define SIZEOF_SONIC_RR 4
+#define SONIC_RR_BUFADR_L 0
+#define SONIC_RR_BUFADR_H 1
+#define SONIC_RR_BUFSIZE_L 2
+#define SONIC_RR_BUFSIZE_H 3
+
+#define SIZEOF_SONIC_RD 7
+#define SONIC_RD_STATUS 0
+#define SONIC_RD_PKTLEN 1
+#define SONIC_RD_PKTPTR_L 2
+#define SONIC_RD_PKTPTR_H 3
+#define SONIC_RD_SEQNO 4
+#define SONIC_RD_LINK 5
+#define SONIC_RD_IN_USE 6
+
+#define SIZEOF_SONIC_TD 8
+#define SONIC_TD_STATUS 0
+#define SONIC_TD_CONFIG 1
+#define SONIC_TD_PKTSIZE 2
+#define SONIC_TD_FRAG_COUNT 3
+#define SONIC_TD_FRAG_PTR_L 4
+#define SONIC_TD_FRAG_PTR_H 5
+#define SONIC_TD_FRAG_SIZE 6
+#define SONIC_TD_LINK 7
+
+#define SIZEOF_SONIC_CD 4
+#define SONIC_CD_ENTRY_POINTER 0
+#define SONIC_CD_CAP0 1
+#define SONIC_CD_CAP1 2
+#define SONIC_CD_CAP2 3
+
+#define SIZEOF_SONIC_CDA ((CAM_DESCRIPTORS * SIZEOF_SONIC_CD) + 1)
+#define SONIC_CDA_CAM_ENABLE (CAM_DESCRIPTORS * SIZEOF_SONIC_CD)
/*
* Some tunables for the buffer areas. Power of 2 is required
@@ -426,44 +270,60 @@ typedef struct {
*
* MSch: use more buffer space for the slow m68k Macs!
*/
-#ifdef CONFIG_MACSONIC
-#define SONIC_NUM_RRS 32 /* number of receive resources */
-#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
-#define SONIC_NUM_TDS 32 /* number of transmit descriptors */
-#else
-#define SONIC_NUM_RRS 16 /* number of receive resources */
-#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
-#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
-#endif
-#define SONIC_RBSIZE 1520 /* size of one resource buffer */
+#define SONIC_NUM_RRS 16 /* number of receive resources */
+#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
+#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
+#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+#define SONIC_RBSIZE 1520 /* size of one resource buffer */
+
+/* Again, measured in bus size units! */
+#define SIZEOF_SONIC_DESC (SIZEOF_SONIC_CDA \
+ + (SIZEOF_SONIC_TD * SONIC_NUM_TDS) \
+ + (SIZEOF_SONIC_RD * SONIC_NUM_RDS) \
+ + (SIZEOF_SONIC_RR * SONIC_NUM_RRS))
/* Information that need to be kept for each board. */
struct sonic_local {
- sonic_cda_t cda; /* virtual CPU address of CDA */
- sonic_td_t tda[SONIC_NUM_TDS]; /* transmit descriptor area */
- sonic_rr_t rra[SONIC_NUM_RRS]; /* receive resource area */
- sonic_rd_t rda[SONIC_NUM_RDS]; /* receive descriptor area */
- struct sk_buff *tx_skb[SONIC_NUM_TDS]; /* skbuffs for packets to transmit */
- unsigned int tx_laddr[SONIC_NUM_TDS]; /* logical DMA address fro skbuffs */
- unsigned char *rba; /* start of receive buffer areas */
- unsigned int cda_laddr; /* logical DMA address of CDA */
- unsigned int tda_laddr; /* logical DMA address of TDA */
- unsigned int rra_laddr; /* logical DMA address of RRA */
- unsigned int rda_laddr; /* logical DMA address of RDA */
- unsigned int rba_laddr; /* logical DMA address of RBA */
- unsigned int cur_rra; /* current indexes to resource areas */
+ /* Bus size. 0 == 16 bits, 1 == 32 bits. */
+ int dma_bitmode;
+ /* Register offset within the longword (independent of endianness,
+ and varies from one type of Macintosh SONIC to another
+ (Aarrgh)) */
+ int reg_offset;
+ void *descriptors;
+ /* Crud. These areas have to be within the same 64K. Therefore
+ we allocate a desriptors page, and point these to places within it. */
+ void *cda; /* CAM descriptor area */
+ void *tda; /* Transmit descriptor area */
+ void *rra; /* Receive resource area */
+ void *rda; /* Receive descriptor area */
+ struct sk_buff* volatile rx_skb[SONIC_NUM_RRS]; /* packets to be received */
+ struct sk_buff* volatile tx_skb[SONIC_NUM_TDS]; /* packets to be transmitted */
+ unsigned int tx_len[SONIC_NUM_TDS]; /* lengths of tx DMA mappings */
+ /* Logical DMA addresses on MIPS, bus addresses on m68k
+ * (so "laddr" is a bit misleading) */
+ dma_addr_t descriptors_laddr;
+ u32 cda_laddr; /* logical DMA address of CDA */
+ u32 tda_laddr; /* logical DMA address of TDA */
+ u32 rra_laddr; /* logical DMA address of RRA */
+ u32 rda_laddr; /* logical DMA address of RDA */
+ dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
+ dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
+ unsigned int rra_end;
+ unsigned int cur_rwp;
unsigned int cur_rx;
- unsigned int cur_tx;
- unsigned int dirty_tx; /* last unacked transmit packet */
- char tx_full;
+ unsigned int cur_tx; /* first unacked transmit packet */
+ unsigned int eol_rx;
+ unsigned int eol_tx; /* last unacked transmit packet */
+ unsigned int next_tx; /* next free TD */
+ struct device *device; /* generic device */
struct net_device_stats stats;
};
-#define TX_TIMEOUT 6
+#define TX_TIMEOUT (3 * HZ)
/* Index to functions, as function prototypes. */
@@ -477,6 +337,114 @@ static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
static void sonic_tx_timeout(struct net_device *dev);
+/* Internal inlines for reading/writing DMA buffers. Note that bus
+ size and endianness matter here, whereas they don't for registers,
+ as far as we can tell. */
+/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
+ is a much better name. */
+static inline void sonic_buf_put(void* base, int bitmode,
+ int offset, __u16 val)
+{
+ if (bitmode)
+#ifdef __BIG_ENDIAN
+ ((__u16 *) base + (offset*2))[1] = val;
+#else
+ ((__u16 *) base + (offset*2))[0] = val;
+#endif
+ else
+ ((__u16 *) base)[offset] = val;
+}
+
+static inline __u16 sonic_buf_get(void* base, int bitmode,
+ int offset)
+{
+ if (bitmode)
+#ifdef __BIG_ENDIAN
+ return ((volatile __u16 *) base + (offset*2))[1];
+#else
+ return ((volatile __u16 *) base + (offset*2))[0];
+#endif
+ else
+ return ((volatile __u16 *) base)[offset];
+}
+
+/* Inlines that you should actually use for reading/writing DMA buffers */
+static inline void sonic_cda_put(struct net_device* dev, int entry,
+ int offset, __u16 val)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ sonic_buf_put(lp->cda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_CD) + offset, val);
+}
+
+static inline __u16 sonic_cda_get(struct net_device* dev, int entry,
+ int offset)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ return sonic_buf_get(lp->cda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_CD) + offset);
+}
+
+static inline void sonic_set_cam_enable(struct net_device* dev, __u16 val)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ sonic_buf_put(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE, val);
+}
+
+static inline __u16 sonic_get_cam_enable(struct net_device* dev)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ return sonic_buf_get(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE);
+}
+
+static inline void sonic_tda_put(struct net_device* dev, int entry,
+ int offset, __u16 val)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ sonic_buf_put(lp->tda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_TD) + offset, val);
+}
+
+static inline __u16 sonic_tda_get(struct net_device* dev, int entry,
+ int offset)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ return sonic_buf_get(lp->tda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_TD) + offset);
+}
+
+static inline void sonic_rda_put(struct net_device* dev, int entry,
+ int offset, __u16 val)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ sonic_buf_put(lp->rda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_RD) + offset, val);
+}
+
+static inline __u16 sonic_rda_get(struct net_device* dev, int entry,
+ int offset)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ return sonic_buf_get(lp->rda, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_RD) + offset);
+}
+
+static inline void sonic_rra_put(struct net_device* dev, int entry,
+ int offset, __u16 val)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ sonic_buf_put(lp->rra, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_RR) + offset, val);
+}
+
+static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
+ int offset)
+{
+ struct sonic_local* lp = (struct sonic_local *) dev->priv;
+ return sonic_buf_get(lp->rra, lp->dma_bitmode,
+ (entry * SIZEOF_SONIC_RR) + offset);
+}
+
static const char *version =
"sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index 7e99e9f8045..e4cfc80b283 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -84,7 +84,7 @@ config 3C359
config TMS380TR
tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
- depends on TR && (PCI || ISA && ISA_DMA_API)
+ depends on TR && (PCI || ISA && ISA_DMA_API || MCA)
select FW_LOADER
---help---
This driver provides generic support for token ring adapters
@@ -158,7 +158,7 @@ config ABYSS
config MADGEMC
tristate "Madge Smart 16/4 Ringnode MicroChannel"
- depends on TR && TMS380TR && MCA_LEGACY
+ depends on TR && TMS380TR && MCA
help
This tms380 module supports the Madge Smart 16/4 MC16 and MC32
MicroChannel adapters.
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 87103c40099..9345e68c451 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -139,7 +139,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
*/
dev->base_addr += 0x10;
- ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
+ ret = tmsdev_init(dev, &pdev->dev);
if (ret) {
printk("%s: unable to get memory for dev->priv.\n",
dev->name);
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 659cbdbef7f..3a25d191ea4 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -20,7 +20,7 @@
static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
#include <linux/module.h>
-#include <linux/mca-legacy.h>
+#include <linux/mca.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
@@ -38,9 +38,7 @@ static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
#define MADGEMC_IO_EXTENT 32
#define MADGEMC_SIF_OFFSET 0x08
-struct madgemc_card {
- struct net_device *dev;
-
+struct card_info {
/*
* These are read from the BIA ROM.
*/
@@ -57,16 +55,12 @@ struct madgemc_card {
unsigned int arblevel:4;
unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */
unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */
-
- struct madgemc_card *next;
};
-static struct madgemc_card *madgemc_card_list;
-
static int madgemc_open(struct net_device *dev);
static int madgemc_close(struct net_device *dev);
static int madgemc_chipset_init(struct net_device *dev);
-static void madgemc_read_rom(struct madgemc_card *card);
+static void madgemc_read_rom(struct net_device *dev, struct card_info *card);
static unsigned short madgemc_setnselout_pins(struct net_device *dev);
static void madgemc_setcabletype(struct net_device *dev, int type);
@@ -151,261 +145,237 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
-static int __init madgemc_probe(void)
+static int __devinit madgemc_probe(struct device *device)
{
static int versionprinted;
struct net_device *dev;
struct net_local *tp;
- struct madgemc_card *card;
- int i,slot = 0;
- __u8 posreg[4];
-
- if (!MCA_bus)
- return -1;
-
- while (slot != MCA_NOTFOUND) {
- /*
- * Currently we only support the MC16/32 (MCA ID 002d)
- */
- slot = mca_find_unused_adapter(0x002d, slot);
- if (slot == MCA_NOTFOUND)
- break;
-
- /*
- * If we get here, we have an adapter.
- */
- if (versionprinted++ == 0)
- printk("%s", version);
-
- dev = alloc_trdev(sizeof(struct net_local));
- if (dev == NULL) {
- printk("madgemc: unable to allocate dev space\n");
- if (madgemc_card_list)
- return 0;
- return -1;
- }
+ struct card_info *card;
+ struct mca_device *mdev = to_mca_device(device);
+ int ret = 0, i = 0;
+
+ if (versionprinted++ == 0)
+ printk("%s", version);
+
+ if(mca_device_claimed(mdev))
+ return -EBUSY;
+ mca_device_set_claim(mdev, 1);
+
+ dev = alloc_trdev(sizeof(struct net_local));
+ if (!dev) {
+ printk("madgemc: unable to allocate dev space\n");
+ mca_device_set_claim(mdev, 0);
+ ret = -ENOMEM;
+ goto getout;
+ }
- SET_MODULE_OWNER(dev);
- dev->dma = 0;
+ SET_MODULE_OWNER(dev);
+ dev->dma = 0;
- /*
- * Fetch MCA config registers
- */
- for(i=0;i<4;i++)
- posreg[i] = mca_read_stored_pos(slot, i+2);
-
- card = kmalloc(sizeof(struct madgemc_card), GFP_KERNEL);
- if (card==NULL) {
- printk("madgemc: unable to allocate card struct\n");
- free_netdev(dev);
- if (madgemc_card_list)
- return 0;
- return -1;
- }
- card->dev = dev;
-
- /*
- * Parse configuration information. This all comes
- * directly from the publicly available @002d.ADF.
- * Get it from Madge or your local ADF library.
- */
-
- /*
- * Base address
- */
- dev->base_addr = 0x0a20 +
- ((posreg[2] & MC16_POS2_ADDR2)?0x0400:0) +
- ((posreg[0] & MC16_POS0_ADDR1)?0x1000:0) +
- ((posreg[3] & MC16_POS3_ADDR3)?0x2000:0);
-
- /*
- * Interrupt line
- */
- switch(posreg[0] >> 6) { /* upper two bits */
+ card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
+ if (card==NULL) {
+ printk("madgemc: unable to allocate card struct\n");
+ ret = -ENOMEM;
+ goto getout1;
+ }
+
+ /*
+ * Parse configuration information. This all comes
+ * directly from the publicly available @002d.ADF.
+ * Get it from Madge or your local ADF library.
+ */
+
+ /*
+ * Base address
+ */
+ dev->base_addr = 0x0a20 +
+ ((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) +
+ ((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) +
+ ((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0);
+
+ /*
+ * Interrupt line
+ */
+ switch(mdev->pos[0] >> 6) { /* upper two bits */
case 0x1: dev->irq = 3; break;
case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */
case 0x3: dev->irq = 10; break;
default: dev->irq = 0; break;
- }
+ }
- if (dev->irq == 0) {
- printk("%s: invalid IRQ\n", dev->name);
- goto getout1;
- }
+ if (dev->irq == 0) {
+ printk("%s: invalid IRQ\n", dev->name);
+ ret = -EBUSY;
+ goto getout2;
+ }
- if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT,
- "madgemc")) {
- printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", slot, dev->base_addr);
- dev->base_addr += MADGEMC_SIF_OFFSET;
- goto getout1;
- }
+ if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT,
+ "madgemc")) {
+ printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr);
dev->base_addr += MADGEMC_SIF_OFFSET;
+ ret = -EBUSY;
+ goto getout2;
+ }
+ dev->base_addr += MADGEMC_SIF_OFFSET;
+
+ /*
+ * Arbitration Level
+ */
+ card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8;
+
+ /*
+ * Burst mode and Fairness
+ */
+ card->burstmode = ((mdev->pos[2] >> 6) & 0x3);
+ card->fairness = ((mdev->pos[2] >> 4) & 0x1);
+
+ /*
+ * Ring Speed
+ */
+ if ((mdev->pos[1] >> 2)&0x1)
+ card->ringspeed = 2; /* not selected */
+ else if ((mdev->pos[2] >> 5) & 0x1)
+ card->ringspeed = 1; /* 16Mb */
+ else
+ card->ringspeed = 0; /* 4Mb */
+
+ /*
+ * Cable type
+ */
+ if ((mdev->pos[1] >> 6)&0x1)
+ card->cabletype = 1; /* STP/DB9 */
+ else
+ card->cabletype = 0; /* UTP/RJ-45 */
+
+
+ /*
+ * ROM Info. This requires us to actually twiddle
+ * bits on the card, so we must ensure above that
+ * the base address is free of conflict (request_region above).
+ */
+ madgemc_read_rom(dev, card);
- /*
- * Arbitration Level
- */
- card->arblevel = ((posreg[0] >> 1) & 0x7) + 8;
-
- /*
- * Burst mode and Fairness
- */
- card->burstmode = ((posreg[2] >> 6) & 0x3);
- card->fairness = ((posreg[2] >> 4) & 0x1);
-
- /*
- * Ring Speed
- */
- if ((posreg[1] >> 2)&0x1)
- card->ringspeed = 2; /* not selected */
- else if ((posreg[2] >> 5) & 0x1)
- card->ringspeed = 1; /* 16Mb */
- else
- card->ringspeed = 0; /* 4Mb */
-
- /*
- * Cable type
- */
- if ((posreg[1] >> 6)&0x1)
- card->cabletype = 1; /* STP/DB9 */
- else
- card->cabletype = 0; /* UTP/RJ-45 */
-
-
- /*
- * ROM Info. This requires us to actually twiddle
- * bits on the card, so we must ensure above that
- * the base address is free of conflict (request_region above).
- */
- madgemc_read_rom(card);
-
- if (card->manid != 0x4d) { /* something went wrong */
- printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
- goto getout;
- }
+ if (card->manid != 0x4d) { /* something went wrong */
+ printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
+ goto getout3;
+ }
- if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) {
- printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype);
- goto getout;
- }
+ if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) {
+ printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype);
+ ret = -EIO;
+ goto getout3;
+ }
- /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */
- if ((card->cardtype == 0x08) && (card->cardrev <= 0x01))
- card->ramsize = 128;
- else
- card->ramsize = 256;
-
- printk("%s: %s Rev %d at 0x%04lx IRQ %d\n",
- dev->name,
- (card->cardtype == 0x08)?MADGEMC16_CARDNAME:
- MADGEMC32_CARDNAME, card->cardrev,
- dev->base_addr, dev->irq);
-
- if (card->cardtype == 0x0d)
- printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name);
-
- if (card->ringspeed==2) { /* Unknown */
- printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name);
- card->ringspeed = 1; /* default to 16mb */
- }
+ /* All cards except Rev 0 and 1 MC16's have 256kb of RAM */
+ if ((card->cardtype == 0x08) && (card->cardrev <= 0x01))
+ card->ramsize = 128;
+ else
+ card->ramsize = 256;
+
+ printk("%s: %s Rev %d at 0x%04lx IRQ %d\n",
+ dev->name,
+ (card->cardtype == 0x08)?MADGEMC16_CARDNAME:
+ MADGEMC32_CARDNAME, card->cardrev,
+ dev->base_addr, dev->irq);
+
+ if (card->cardtype == 0x0d)
+ printk("%s: Warning: MC32 support is experimental and highly untested\n", dev->name);
+
+ if (card->ringspeed==2) { /* Unknown */
+ printk("%s: Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name);
+ card->ringspeed = 1; /* default to 16mb */
+ }
- printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize);
+ printk("%s: RAM Size: %dKB\n", dev->name, card->ramsize);
- printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name,
- (card->ringspeed)?16:4,
- card->cabletype?"STP/DB9":"UTP/RJ-45");
- printk("%s: Arbitration Level: %d\n", dev->name,
- card->arblevel);
+ printk("%s: Ring Speed: %dMb/sec on %s\n", dev->name,
+ (card->ringspeed)?16:4,
+ card->cabletype?"STP/DB9":"UTP/RJ-45");
+ printk("%s: Arbitration Level: %d\n", dev->name,
+ card->arblevel);
- printk("%s: Burst Mode: ", dev->name);
- switch(card->burstmode) {
+ printk("%s: Burst Mode: ", dev->name);
+ switch(card->burstmode) {
case 0: printk("Cycle steal"); break;
case 1: printk("Limited burst"); break;
case 2: printk("Delayed release"); break;
case 3: printk("Immediate release"); break;
- }
- printk(" (%s)\n", (card->fairness)?"Unfair":"Fair");
-
-
- /*
- * Enable SIF before we assign the interrupt handler,
- * just in case we get spurious interrupts that need
- * handling.
- */
- outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
- madgemc_setsifsel(dev, 1);
- if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
- "madgemc", dev))
- goto getout;
-
- madgemc_chipset_init(dev); /* enables interrupts! */
- madgemc_setcabletype(dev, card->cabletype);
+ }
+ printk(" (%s)\n", (card->fairness)?"Unfair":"Fair");
- /* Setup MCA structures */
- mca_set_adapter_name(slot, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
- mca_set_adapter_procfn(slot, madgemc_mcaproc, dev);
- mca_mark_as_used(slot);
- printk("%s: Ring Station Address: ", dev->name);
- printk("%2.2x", dev->dev_addr[0]);
- for (i = 1; i < 6; i++)
- printk(":%2.2x", dev->dev_addr[i]);
- printk("\n");
-
- /* XXX is ISA_MAX_ADDRESS correct here? */
- if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL)) {
- printk("%s: unable to get memory for dev->priv.\n",
- dev->name);
- release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
- MADGEMC_IO_EXTENT);
-
- kfree(card);
- tmsdev_term(dev);
- free_netdev(dev);
- if (madgemc_card_list)
- return 0;
- return -1;
- }
- tp = netdev_priv(dev);
-
- /*
- * The MC16 is physically a 32bit card. However, Madge
- * insists on calling it 16bit, so I'll assume here that
- * they know what they're talking about. Cut off DMA
- * at 16mb.
- */
- tp->setnselout = madgemc_setnselout_pins;
- tp->sifwriteb = madgemc_sifwriteb;
- tp->sifreadb = madgemc_sifreadb;
- tp->sifwritew = madgemc_sifwritew;
- tp->sifreadw = madgemc_sifreadw;
- tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
-
- memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
-
- dev->open = madgemc_open;
- dev->stop = madgemc_close;
-
- if (register_netdev(dev) == 0) {
- /* Enlist in the card list */
- card->next = madgemc_card_list;
- madgemc_card_list = card;
- slot++;
- continue; /* successful, try to find another */
- }
-
- free_irq(dev->irq, dev);
- getout:
- release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
- MADGEMC_IO_EXTENT);
- getout1:
- kfree(card);
- free_netdev(dev);
- slot++;
+ /*
+ * Enable SIF before we assign the interrupt handler,
+ * just in case we get spurious interrupts that need
+ * handling.
+ */
+ outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
+ madgemc_setsifsel(dev, 1);
+ if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
+ "madgemc", dev)) {
+ ret = -EBUSY;
+ goto getout3;
}
- if (madgemc_card_list)
+ madgemc_chipset_init(dev); /* enables interrupts! */
+ madgemc_setcabletype(dev, card->cabletype);
+
+ /* Setup MCA structures */
+ mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
+ mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev);
+
+ printk("%s: Ring Station Address: ", dev->name);
+ printk("%2.2x", dev->dev_addr[0]);
+ for (i = 1; i < 6; i++)
+ printk(":%2.2x", dev->dev_addr[i]);
+ printk("\n");
+
+ if (tmsdev_init(dev, device)) {
+ printk("%s: unable to get memory for dev->priv.\n",
+ dev->name);
+ ret = -ENOMEM;
+ goto getout4;
+ }
+ tp = netdev_priv(dev);
+
+ /*
+ * The MC16 is physically a 32bit card. However, Madge
+ * insists on calling it 16bit, so I'll assume here that
+ * they know what they're talking about. Cut off DMA
+ * at 16mb.
+ */
+ tp->setnselout = madgemc_setnselout_pins;
+ tp->sifwriteb = madgemc_sifwriteb;
+ tp->sifreadb = madgemc_sifreadb;
+ tp->sifwritew = madgemc_sifwritew;
+ tp->sifreadw = madgemc_sifreadw;
+ tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
+
+ memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
+
+ dev->open = madgemc_open;
+ dev->stop = madgemc_close;
+
+ tp->tmspriv = card;
+ dev_set_drvdata(device, dev);
+
+ if (register_netdev(dev) == 0)
return 0;
- return -1;
+
+ dev_set_drvdata(device, NULL);
+ ret = -ENOMEM;
+getout4:
+ free_irq(dev->irq, dev);
+getout3:
+ release_region(dev->base_addr-MADGEMC_SIF_OFFSET,
+ MADGEMC_IO_EXTENT);
+getout2:
+ kfree(card);
+getout1:
+ free_netdev(dev);
+getout:
+ mca_device_set_claim(mdev, 0);
+ return ret;
}
/*
@@ -664,12 +634,12 @@ static void madgemc_chipset_close(struct net_device *dev)
* is complete.
*
*/
-static void madgemc_read_rom(struct madgemc_card *card)
+static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
{
unsigned long ioaddr;
unsigned char reg0, reg1, tmpreg0, i;
- ioaddr = card->dev->base_addr;
+ ioaddr = dev->base_addr;
reg0 = inb(ioaddr + MC_CONTROL_REG0);
reg1 = inb(ioaddr + MC_CONTROL_REG1);
@@ -686,9 +656,9 @@ static void madgemc_read_rom(struct madgemc_card *card)
outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0);
/* Read BIA */
- card->dev->addr_len = 6;
+ dev->addr_len = 6;
for (i = 0; i < 6; i++)
- card->dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i);
+ dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i);
/* Restore original register values */
outb(reg0, ioaddr + MC_CONTROL_REG0);
@@ -721,14 +691,10 @@ static int madgemc_close(struct net_device *dev)
static int madgemc_mcaproc(char *buf, int slot, void *d)
{
struct net_device *dev = (struct net_device *)d;
- struct madgemc_card *curcard = madgemc_card_list;
+ struct net_local *tp = dev->priv;
+ struct card_info *curcard = tp->tmspriv;
int len = 0;
- while (curcard) { /* search for card struct */
- if (curcard->dev == dev)
- break;
- curcard = curcard->next;
- }
len += sprintf(buf+len, "-------\n");
if (curcard) {
struct net_local *tp = netdev_priv(dev);
@@ -763,25 +729,56 @@ static int madgemc_mcaproc(char *buf, int slot, void *d)
return len;
}
-static void __exit madgemc_exit(void)
+static int __devexit madgemc_remove(struct device *device)
{
- struct net_device *dev;
- struct madgemc_card *this_card;
-
- while (madgemc_card_list) {
- dev = madgemc_card_list->dev;
- unregister_netdev(dev);
- release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT);
- free_irq(dev->irq, dev);
- tmsdev_term(dev);
- free_netdev(dev);
- this_card = madgemc_card_list;
- madgemc_card_list = this_card->next;
- kfree(this_card);
- }
+ struct net_device *dev = dev_get_drvdata(device);
+ struct net_local *tp;
+ struct card_info *card;
+
+ if (!dev)
+ BUG();
+
+ tp = dev->priv;
+ card = tp->tmspriv;
+ kfree(card);
+ tp->tmspriv = NULL;
+
+ unregister_netdev(dev);
+ release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT);
+ free_irq(dev->irq, dev);
+ tmsdev_term(dev);
+ free_netdev(dev);
+ dev_set_drvdata(device, NULL);
+
+ return 0;
+}
+
+static short madgemc_adapter_ids[] __initdata = {
+ 0x002d,
+ 0x0000
+};
+
+static struct mca_driver madgemc_driver = {
+ .id_table = madgemc_adapter_ids,
+ .driver = {
+ .name = "madgemc",
+ .bus = &mca_bus_type,
+ .probe = madgemc_probe,
+ .remove = __devexit_p(madgemc_remove),
+ },
+};
+
+static int __init madgemc_init (void)
+{
+ return mca_register_driver (&madgemc_driver);
+}
+
+static void __exit madgemc_exit (void)
+{
+ mca_unregister_driver (&madgemc_driver);
}
-module_init(madgemc_probe);
+module_init(madgemc_init);
module_exit(madgemc_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 40ad0fde28a..eb1423ede75 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -62,8 +62,7 @@ static int dmalist[] __initdata = {
};
static char cardname[] = "Proteon 1392\0";
-
-struct net_device *proteon_probe(int unit);
+static u64 dma_mask = ISA_MAX_ADDRESS;
static int proteon_open(struct net_device *dev);
static void proteon_read_eeprom(struct net_device *dev);
static unsigned short proteon_setnselout_pins(struct net_device *dev);
@@ -116,7 +115,7 @@ nodev:
return -ENODEV;
}
-static int __init setup_card(struct net_device *dev)
+static int __init setup_card(struct net_device *dev, struct device *pdev)
{
struct net_local *tp;
static int versionprinted;
@@ -137,7 +136,7 @@ static int __init setup_card(struct net_device *dev)
}
}
if (err)
- goto out4;
+ goto out5;
/* At this point we have found a valid card. */
@@ -145,14 +144,15 @@ static int __init setup_card(struct net_device *dev)
printk(KERN_DEBUG "%s", version);
err = -EIO;
- if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL))
+ pdev->dma_mask = &dma_mask;
+ if (tmsdev_init(dev, pdev))
goto out4;
dev->base_addr &= ~3;
proteon_read_eeprom(dev);
- printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name);
+ printk(KERN_DEBUG "proteon.c: Ring Station Address: ");
printk("%2.2x", dev->dev_addr[0]);
for (j = 1; j < 6; j++)
printk(":%2.2x", dev->dev_addr[j]);
@@ -185,7 +185,7 @@ static int __init setup_card(struct net_device *dev)
if(irqlist[j] == 0)
{
- printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name);
+ printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
goto out3;
}
}
@@ -196,15 +196,15 @@ static int __init setup_card(struct net_device *dev)
break;
if (irqlist[j] == 0)
{
- printk(KERN_INFO "%s: Illegal IRQ %d specified\n",
- dev->name, dev->irq);
+ printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
+ dev->irq);
goto out3;
}
if (request_irq(dev->irq, tms380tr_interrupt, 0,
cardname, dev))
{
- printk(KERN_INFO "%s: Selected IRQ %d not available\n",
- dev->name, dev->irq);
+ printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
+ dev->irq);
goto out3;
}
}
@@ -220,7 +220,7 @@ static int __init setup_card(struct net_device *dev)
if(dmalist[j] == 0)
{
- printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name);
+ printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
goto out2;
}
}
@@ -231,25 +231,25 @@ static int __init setup_card(struct net_device *dev)
break;
if (dmalist[j] == 0)
{
- printk(KERN_INFO "%s: Illegal DMA %d specified\n",
- dev->name, dev->dma);
+ printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
+ dev->dma);
goto out2;
}
if (request_dma(dev->dma, cardname))
{
- printk(KERN_INFO "%s: Selected DMA %d not available\n",
- dev->name, dev->dma);
+ printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
+ dev->dma);
goto out2;
}
}
- printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
- dev->name, dev->base_addr, dev->irq, dev->dma);
-
err = register_netdev(dev);
if (err)
goto out;
+ printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
+ dev->name, dev->base_addr, dev->irq, dev->dma);
+
return 0;
out:
free_dma(dev->dma);
@@ -258,34 +258,11 @@ out2:
out3:
tmsdev_term(dev);
out4:
- release_region(dev->base_addr, PROTEON_IO_EXTENT);
+ release_region(dev->base_addr, PROTEON_IO_EXTENT);
+out5:
return err;
}
-struct net_device * __init proteon_probe(int unit)
-{
- struct net_device *dev = alloc_trdev(sizeof(struct net_local));
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "tr%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- err = setup_card(dev);
- if (err)
- goto out;
-
- return dev;
-
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
/*
* Reads MAC address from adapter RAM, which should've read it from
* the onboard ROM.
@@ -352,8 +329,6 @@ static int proteon_open(struct net_device *dev)
return tms380tr_open(dev);
}
-#ifdef MODULE
-
#define ISATR_MAX_ADAPTERS 3
static int io[ISATR_MAX_ADAPTERS];
@@ -366,13 +341,23 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(dma, int, NULL, 0);
-static struct net_device *proteon_dev[ISATR_MAX_ADAPTERS];
+static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
+
+static struct device_driver proteon_driver = {
+ .name = "proteon",
+ .bus = &platform_bus_type,
+};
-int init_module(void)
+static int __init proteon_init(void)
{
struct net_device *dev;
+ struct platform_device *pdev;
int i, num = 0, err = 0;
+ err = driver_register(&proteon_driver);
+ if (err)
+ return err;
+
for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
dev = alloc_trdev(sizeof(struct net_local));
if (!dev)
@@ -381,11 +366,15 @@ int init_module(void)
dev->base_addr = io[i];
dev->irq = irq[i];
dev->dma = dma[i];
- err = setup_card(dev);
+ pdev = platform_device_register_simple("proteon",
+ i, NULL, 0);
+ err = setup_card(dev, &pdev->dev);
if (!err) {
- proteon_dev[i] = dev;
+ proteon_dev[i] = pdev;
+ dev_set_drvdata(&pdev->dev, dev);
++num;
} else {
+ platform_device_unregister(pdev);
free_netdev(dev);
}
}
@@ -399,23 +388,28 @@ int init_module(void)
return (0);
}
-void cleanup_module(void)
+static void __exit proteon_cleanup(void)
{
+ struct net_device *dev;
int i;
for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
- struct net_device *dev = proteon_dev[i];
+ struct platform_device *pdev = proteon_dev[i];
- if (!dev)
+ if (!pdev)
continue;
-
+ dev = dev_get_drvdata(&pdev->dev);
unregister_netdev(dev);
release_region(dev->base_addr, PROTEON_IO_EXTENT);
free_irq(dev->irq, dev);
free_dma(dev->dma);
tmsdev_term(dev);
free_netdev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+ platform_device_unregister(pdev);
}
+ driver_unregister(&proteon_driver);
}
-#endif /* MODULE */
+module_init(proteon_init);
+module_exit(proteon_cleanup);
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index f26796e2d0e..3c7c66204f7 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -68,8 +68,7 @@ static int dmalist[] __initdata = {
};
static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
-
-struct net_device *sk_isa_probe(int unit);
+static u64 dma_mask = ISA_MAX_ADDRESS;
static int sk_isa_open(struct net_device *dev);
static void sk_isa_read_eeprom(struct net_device *dev);
static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
@@ -133,7 +132,7 @@ static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
return 0;
}
-static int __init setup_card(struct net_device *dev)
+static int __init setup_card(struct net_device *dev, struct device *pdev)
{
struct net_local *tp;
static int versionprinted;
@@ -154,7 +153,7 @@ static int __init setup_card(struct net_device *dev)
}
}
if (err)
- goto out4;
+ goto out5;
/* At this point we have found a valid card. */
@@ -162,14 +161,15 @@ static int __init setup_card(struct net_device *dev)
printk(KERN_DEBUG "%s", version);
err = -EIO;
- if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL))
+ pdev->dma_mask = &dma_mask;
+ if (tmsdev_init(dev, pdev))
goto out4;
dev->base_addr &= ~3;
sk_isa_read_eeprom(dev);
- printk(KERN_DEBUG "%s: Ring Station Address: ", dev->name);
+ printk(KERN_DEBUG "skisa.c: Ring Station Address: ");
printk("%2.2x", dev->dev_addr[0]);
for (j = 1; j < 6; j++)
printk(":%2.2x", dev->dev_addr[j]);
@@ -202,7 +202,7 @@ static int __init setup_card(struct net_device *dev)
if(irqlist[j] == 0)
{
- printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name);
+ printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n");
goto out3;
}
}
@@ -213,15 +213,15 @@ static int __init setup_card(struct net_device *dev)
break;
if (irqlist[j] == 0)
{
- printk(KERN_INFO "%s: Illegal IRQ %d specified\n",
- dev->name, dev->irq);
+ printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n",
+ dev->irq);
goto out3;
}
if (request_irq(dev->irq, tms380tr_interrupt, 0,
isa_cardname, dev))
{
- printk(KERN_INFO "%s: Selected IRQ %d not available\n",
- dev->name, dev->irq);
+ printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n",
+ dev->irq);
goto out3;
}
}
@@ -237,7 +237,7 @@ static int __init setup_card(struct net_device *dev)
if(dmalist[j] == 0)
{
- printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name);
+ printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n");
goto out2;
}
}
@@ -248,25 +248,25 @@ static int __init setup_card(struct net_device *dev)
break;
if (dmalist[j] == 0)
{
- printk(KERN_INFO "%s: Illegal DMA %d specified\n",
- dev->name, dev->dma);
+ printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n",
+ dev->dma);
goto out2;
}
if (request_dma(dev->dma, isa_cardname))
{
- printk(KERN_INFO "%s: Selected DMA %d not available\n",
- dev->name, dev->dma);
+ printk(KERN_INFO "skisa.c: Selected DMA %d not available\n",
+ dev->dma);
goto out2;
}
}
- printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
- dev->name, dev->base_addr, dev->irq, dev->dma);
-
err = register_netdev(dev);
if (err)
goto out;
+ printk(KERN_DEBUG "%s: IO: %#4lx IRQ: %d DMA: %d\n",
+ dev->name, dev->base_addr, dev->irq, dev->dma);
+
return 0;
out:
free_dma(dev->dma);
@@ -275,33 +275,11 @@ out2:
out3:
tmsdev_term(dev);
out4:
- release_region(dev->base_addr, SK_ISA_IO_EXTENT);
+ release_region(dev->base_addr, SK_ISA_IO_EXTENT);
+out5:
return err;
}
-struct net_device * __init sk_isa_probe(int unit)
-{
- struct net_device *dev = alloc_trdev(sizeof(struct net_local));
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "tr%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- err = setup_card(dev);
- if (err)
- goto out;
-
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
/*
* Reads MAC address from adapter RAM, which should've read it from
* the onboard ROM.
@@ -361,8 +339,6 @@ static int sk_isa_open(struct net_device *dev)
return tms380tr_open(dev);
}
-#ifdef MODULE
-
#define ISATR_MAX_ADAPTERS 3
static int io[ISATR_MAX_ADAPTERS];
@@ -375,13 +351,23 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(dma, int, NULL, 0);
-static struct net_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
+static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
-int init_module(void)
+static struct device_driver sk_isa_driver = {
+ .name = "skisa",
+ .bus = &platform_bus_type,
+};
+
+static int __init sk_isa_init(void)
{
struct net_device *dev;
+ struct platform_device *pdev;
int i, num = 0, err = 0;
+ err = driver_register(&sk_isa_driver);
+ if (err)
+ return err;
+
for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
dev = alloc_trdev(sizeof(struct net_local));
if (!dev)
@@ -390,12 +376,15 @@ int init_module(void)
dev->base_addr = io[i];
dev->irq = irq[i];
dev->dma = dma[i];
- err = setup_card(dev);
-
+ pdev = platform_device_register_simple("skisa",
+ i, NULL, 0);
+ err = setup_card(dev, &pdev->dev);
if (!err) {
- sk_isa_dev[i] = dev;
+ sk_isa_dev[i] = pdev;
+ dev_set_drvdata(&sk_isa_dev[i]->dev, dev);
++num;
} else {
+ platform_device_unregister(pdev);
free_netdev(dev);
}
}
@@ -409,23 +398,28 @@ int init_module(void)
return (0);
}
-void cleanup_module(void)
+static void __exit sk_isa_cleanup(void)
{
+ struct net_device *dev;
int i;
for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
- struct net_device *dev = sk_isa_dev[i];
+ struct platform_device *pdev = sk_isa_dev[i];
- if (!dev)
+ if (!pdev)
continue;
-
+ dev = dev_get_drvdata(&pdev->dev);
unregister_netdev(dev);
release_region(dev->base_addr, SK_ISA_IO_EXTENT);
free_irq(dev->irq, dev);
free_dma(dev->dma);
tmsdev_term(dev);
free_netdev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+ platform_device_unregister(pdev);
}
+ driver_unregister(&sk_isa_driver);
}
-#endif /* MODULE */
+module_init(sk_isa_init);
+module_exit(sk_isa_cleanup);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 5e0b0ce98ed..2e39bf1f746 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -62,6 +62,7 @@
* normal operation.
* 30-Dec-02 JF Removed incorrect __init from
* tms380tr_init_card.
+ * 22-Jul-05 JF Converted to dma-mapping.
*
* To do:
* 1. Multi/Broadcast packet handling (this may have fixed itself)
@@ -89,7 +90,7 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -114,8 +115,6 @@ static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, A
#endif
static unsigned int tms380tr_debug = TMS380TR_DEBUG;
-static struct device tms_device;
-
/* Index to functions, as function prototypes.
* Alphabetical by function name.
*/
@@ -434,7 +433,7 @@ static void tms380tr_init_net_local(struct net_device *dev)
skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
/* data unreachable for DMA ? then use local buffer */
- dmabuf = pci_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE);
+ dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
{
tp->Rpl[i].SkbStat = SKB_DATA_COPY;
@@ -638,10 +637,10 @@ static int tms380tr_hardware_send_packet(struct sk_buff *skb, struct net_device
/* Is buffer reachable for Busmaster-DMA? */
length = skb->len;
- dmabuf = pci_map_single(tp->pdev, skb->data, length, PCI_DMA_TODEVICE);
+ dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE);
if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
/* Copy frame to local buffer */
- pci_unmap_single(tp->pdev, dmabuf, length, PCI_DMA_TODEVICE);
+ dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE);
dmabuf = 0;
i = tp->TplFree->TPLIndex;
buf = tp->LocalTxBuffers[i];
@@ -1284,9 +1283,7 @@ static int tms380tr_reset_adapter(struct net_device *dev)
unsigned short count, c, count2;
const struct firmware *fw_entry = NULL;
- strncpy(tms_device.bus_id,dev->name, BUS_ID_SIZE);
-
- if (request_firmware(&fw_entry, "tms380tr.bin", &tms_device) != 0) {
+ if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
dev->name, "tms380tr.bin");
return (-1);
@@ -2021,7 +2018,7 @@ static void tms380tr_cancel_tx_queue(struct net_local* tp)
printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
if (tpl->DMABuff)
- pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(tpl->Skb);
}
@@ -2090,7 +2087,7 @@ static void tms380tr_tx_status_irq(struct net_device *dev)
tp->MacStat.tx_packets++;
if (tpl->DMABuff)
- pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
dev_kfree_skb_irq(tpl->Skb);
tpl->BusyFlag = 0; /* "free" TPL */
}
@@ -2209,7 +2206,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
tp->MacStat.rx_errors++;
}
if (rpl->DMABuff)
- pci_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, PCI_DMA_TODEVICE);
+ dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
rpl->DMABuff = 0;
/* Allocate new skb for rpl */
@@ -2227,7 +2224,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
skb_put(rpl->Skb, tp->MaxPacketSize);
/* Data unreachable for DMA ? then use local buffer */
- dmabuf = pci_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE);
+ dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
{
rpl->SkbStat = SKB_DATA_COPY;
@@ -2332,23 +2329,26 @@ void tmsdev_term(struct net_device *dev)
struct net_local *tp;
tp = netdev_priv(dev);
- pci_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
+ DMA_BIDIRECTIONAL);
}
-int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
- struct pci_dev *pdev)
+int tmsdev_init(struct net_device *dev, struct device *pdev)
{
struct net_local *tms_local;
memset(dev->priv, 0, sizeof(struct net_local));
tms_local = netdev_priv(dev);
init_waitqueue_head(&tms_local->wait_for_tok_int);
- tms_local->dmalimit = dmalimit;
+ if (pdev->dma_mask)
+ tms_local->dmalimit = *pdev->dma_mask;
+ else
+ return -ENOMEM;
tms_local->pdev = pdev;
- tms_local->dmabuffer = pci_map_single(pdev, (void *)tms_local,
- sizeof(struct net_local), PCI_DMA_BIDIRECTIONAL);
- if (tms_local->dmabuffer + sizeof(struct net_local) > dmalimit)
+ tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local,
+ sizeof(struct net_local), DMA_BIDIRECTIONAL);
+ if (tms_local->dmabuffer + sizeof(struct net_local) >
+ tms_local->dmalimit)
{
printk(KERN_INFO "%s: Memory not accessible for DMA\n",
dev->name);
@@ -2370,8 +2370,6 @@ int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
return 0;
}
-#ifdef MODULE
-
EXPORT_SYMBOL(tms380tr_open);
EXPORT_SYMBOL(tms380tr_close);
EXPORT_SYMBOL(tms380tr_interrupt);
@@ -2379,6 +2377,8 @@ EXPORT_SYMBOL(tmsdev_init);
EXPORT_SYMBOL(tmsdev_term);
EXPORT_SYMBOL(tms380tr_wait);
+#ifdef MODULE
+
static struct module *TMS380_module = NULL;
int init_module(void)
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index f2c5ba0f37a..30452c67bb6 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -17,8 +17,7 @@
int tms380tr_open(struct net_device *dev);
int tms380tr_close(struct net_device *dev);
irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
- struct pci_dev *pdev);
+int tmsdev_init(struct net_device *dev, struct device *pdev);
void tmsdev_term(struct net_device *dev);
void tms380tr_wait(unsigned long time);
@@ -719,7 +718,7 @@ struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */
struct sk_buff *Skb;
unsigned char TPLIndex;
volatile unsigned char BusyFlag;/* Flag: TPL busy? */
- dma_addr_t DMABuff; /* DMA IO bus address from pci_map */
+ dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
};
/* ---------------------Receive Functions-------------------------------*
@@ -1060,7 +1059,7 @@ struct s_RPL { /* Receive Parameter List */
struct sk_buff *Skb;
SKB_STAT SkbStat;
int RPLIndex;
- dma_addr_t DMABuff; /* DMA IO bus address from pci_map */
+ dma_addr_t DMABuff; /* DMA IO bus address from dma_map */
};
/* Information that need to be kept for each board. */
@@ -1091,7 +1090,7 @@ typedef struct net_local {
RPL *RplTail;
unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
- struct pci_dev *pdev;
+ struct device *pdev;
int DataRate;
unsigned char ScbInUse;
unsigned short CMDqueue;
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 2e18c0a4648..ab47c0547a3 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -100,7 +100,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
unsigned int pci_irq_line;
unsigned long pci_ioaddr;
struct card_info *cardinfo = &card_info_table[ent->driver_data];
-
+
if (versionprinted++ == 0)
printk("%s", version);
@@ -143,7 +143,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
printk(":%2.2x", dev->dev_addr[i]);
printk("\n");
- ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
+ ret = tmsdev_init(dev, &pdev->dev);
if (ret) {
printk("%s: unable to get memory for dev->priv.\n", dev->name);
goto err_out_irq;
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index 6e74af62ca0..9e56fc346ba 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -56,7 +56,7 @@
#include <linux/sched.h> /* for jiffies, HZ, etc. */
#include <linux/cycx_drv.h> /* API definitions */
#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */
-#include <linux/delay.h> /* udelay */
+#include <linux/delay.h> /* udelay, msleep_interruptible */
#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
#define MOD_VERSION 0
@@ -74,7 +74,6 @@ static int reset_cyc2x(void __iomem *addr);
static int detect_cyc2x(void __iomem *addr);
/* Miscellaneous functions */
-static void delay_cycx(int sec);
static int get_option_index(long *optlist, long optval);
static u16 checksum(u8 *buf, u32 len);
@@ -259,7 +258,7 @@ static int memory_exists(void __iomem *addr)
if (readw(addr + 0x10) == TEST_PATTERN)
return 1;
- delay_cycx(1);
+ msleep_interruptible(1 * 1000);
}
return 0;
@@ -316,7 +315,7 @@ static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len)
/* 80186 was in hold, go */
writeb(0, addr + START_CPU);
- delay_cycx(1);
+ msleep_interruptible(1 * 1000);
}
/* Load data.bin file through boot (reset) interface. */
@@ -462,13 +461,13 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
/* reset is waiting for boot */
writew(GEN_POWER_ON, pt_cycld);
- delay_cycx(1);
+ msleep_interruptible(1 * 1000);
for (j = 0 ; j < 3 ; j++)
if (!readw(pt_cycld))
goto reset_loaded;
else
- delay_cycx(1);
+ msleep_interruptible(1 * 1000);
}
printk(KERN_ERR "%s: reset not started.\n", modname);
@@ -495,7 +494,7 @@ reset_loaded:
/* Arthur Ganzert's tip: wait a while after the firmware loading...
seg abr 26 17:17:12 EST 1999 - acme */
- delay_cycx(7);
+ msleep_interruptible(7 * 1000);
printk(KERN_INFO "%s: firmware loaded!\n", modname);
/* enable interrupts */
@@ -547,20 +546,13 @@ static int get_option_index(long *optlist, long optval)
static int reset_cyc2x(void __iomem *addr)
{
writeb(0, addr + RST_ENABLE);
- delay_cycx(2);
+ msleep_interruptible(2 * 1000);
writeb(0, addr + RST_DISABLE);
- delay_cycx(2);
+ msleep_interruptible(2 * 1000);
return memory_exists(addr);
}
-/* Delay */
-static void delay_cycx(int sec)
-{
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(sec * HZ);
-}
-
/* Calculate 16-bit CRC using CCITT polynomial. */
static u16 checksum(u8 *buf, u32 len)
{
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index aabcdc2be05..9c2d07cde01 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -4322,36 +4322,36 @@ static const struct iw_priv_args orinoco_privtab[] = {
*/
static const iw_handler orinoco_handler[] = {
- [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) orinoco_ioctl_commit,
- [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getname,
- [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfreq,
- [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfreq,
- [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setmode,
- [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getmode,
- [SIOCSIWSENS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setsens,
- [SIOCGIWSENS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getsens,
- [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwrange,
- [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setspy,
- [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getspy,
- [SIOCSIWAP -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setwap,
- [SIOCGIWAP -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getwap,
- [SIOCSIWSCAN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setscan,
- [SIOCGIWSCAN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getscan,
- [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setessid,
- [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getessid,
- [SIOCSIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setnick,
- [SIOCGIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getnick,
- [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrate,
- [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrate,
- [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrts,
- [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrts,
- [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfrag,
- [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfrag,
- [SIOCGIWRETRY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getretry,
- [SIOCSIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_setiwencode,
- [SIOCGIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwencode,
- [SIOCSIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setpower,
- [SIOCGIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getpower,
+ [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_commit,
+ [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getname,
+ [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfreq,
+ [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfreq,
+ [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setmode,
+ [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getmode,
+ [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
+ [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
+ [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
+ [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy,
+ [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy,
+ [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
+ [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
+ [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
+ [SIOCGIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getscan,
+ [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setessid,
+ [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getessid,
+ [SIOCSIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setnick,
+ [SIOCGIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getnick,
+ [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrate,
+ [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrate,
+ [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrts,
+ [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrts,
+ [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfrag,
+ [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfrag,
+ [SIOCGIWRETRY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getretry,
+ [SIOCSIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setiwencode,
+ [SIOCGIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwencode,
+ [SIOCSIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setpower,
+ [SIOCGIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getpower,
};
@@ -4359,15 +4359,15 @@ static const iw_handler orinoco_handler[] = {
Added typecasting since we no longer use iwreq_data -- Moustafa
*/
static const iw_handler orinoco_private_handler[] = {
- [0] (iw_handler) orinoco_ioctl_reset,
- [1] (iw_handler) orinoco_ioctl_reset,
- [2] (iw_handler) orinoco_ioctl_setport3,
- [3] (iw_handler) orinoco_ioctl_getport3,
- [4] (iw_handler) orinoco_ioctl_setpreamble,
- [5] (iw_handler) orinoco_ioctl_getpreamble,
- [6] (iw_handler) orinoco_ioctl_setibssport,
- [7] (iw_handler) orinoco_ioctl_getibssport,
- [9] (iw_handler) orinoco_ioctl_getrid,
+ [0] = (iw_handler) orinoco_ioctl_reset,
+ [1] = (iw_handler) orinoco_ioctl_reset,
+ [2] = (iw_handler) orinoco_ioctl_setport3,
+ [3] = (iw_handler) orinoco_ioctl_getport3,
+ [4] = (iw_handler) orinoco_ioctl_setpreamble,
+ [5] = (iw_handler) orinoco_ioctl_getpreamble,
+ [6] = (iw_handler) orinoco_ioctl_setibssport,
+ [7] = (iw_handler) orinoco_ioctl_getibssport,
+ [9] = (iw_handler) orinoco_ioctl_getrid,
};
static const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 00498e2f120..d3dad0aac7c 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -23,13 +23,8 @@
#include <linux/pci.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
-#include <linux/serial.h>
-#include <linux/serialP.h>
-#include <linux/list.h>
#include <linux/8250_pci.h>
-#include <asm/serial.h>
-
enum parport_pc_pci_cards {
titan_110l = 0,
titan_210l,
@@ -168,182 +163,147 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl);
-struct pci_board_no_ids {
- int flags;
- int num_ports;
- int base_baud;
- int uart_offset;
- int reg_shift;
- int (*init_fn)(struct pci_dev *dev, struct pci_board_no_ids *board,
- int enable);
- int first_uart_offset;
-};
-
-static int __devinit siig10x_init_fn(struct pci_dev *dev, struct pci_board_no_ids *board, int enable)
-{
- return pci_siig10x_fn(dev, enable);
-}
-
-static int __devinit siig20x_init_fn(struct pci_dev *dev, struct pci_board_no_ids *board, int enable)
-{
- return pci_siig20x_fn(dev, enable);
-}
-
-static int __devinit netmos_serial_init(struct pci_dev *dev, struct pci_board_no_ids *board, int enable)
-{
- board->num_ports = dev->subsystem_device & 0xf;
- return 0;
-}
-
-static struct pci_board_no_ids pci_boards[] __devinitdata = {
- /*
- * PCI Flags, Number of Ports, Base (Maximum) Baud Rate,
- * Offset to get to next UART's registers,
- * Register shift to use for memory-mapped I/O,
- * Initialization function, first UART offset
- */
-
-// Cards not tested are marked n/t
-// If you have one of these cards and it works for you, please tell me..
-
-/* titan_110l */ { SPCI_FL_BASE1 | SPCI_FL_BASE_TABLE, 1, 921600 },
-/* titan_210l */ { SPCI_FL_BASE1 | SPCI_FL_BASE_TABLE, 2, 921600 },
-/* netmos_9xx5_combo */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200, 0, 0, netmos_serial_init },
-/* netmos_9855 */ { SPCI_FL_BASE2 | SPCI_FL_BASE_TABLE, 1, 115200, 0, 0, netmos_serial_init },
-/* avlab_1s1p (n/t) */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 },
-/* avlab_1s1p_650 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 },
-/* avlab_1s1p_850 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 },
-/* avlab_1s2p (n/t) */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 },
-/* avlab_1s2p_650 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 },
-/* avlab_1s2p_850 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 1, 115200 },
-/* avlab_2s1p (n/t) */ { SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 },
-/* avlab_2s1p_650 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 },
-/* avlab_2s1p_850 (nt)*/{ SPCI_FL_BASE0 | SPCI_FL_BASE_TABLE, 2, 115200 },
-/* siig_1s1p_10x */ { SPCI_FL_BASE2, 1, 460800, 0, 0, siig10x_init_fn },
-/* siig_2s1p_10x */ { SPCI_FL_BASE2, 1, 921600, 0, 0, siig10x_init_fn },
-/* siig_2p1s_20x */ { SPCI_FL_BASE0, 1, 921600, 0, 0, siig20x_init_fn },
-/* siig_1s1p_20x */ { SPCI_FL_BASE0, 1, 921600, 0, 0, siig20x_init_fn },
-/* siig_2s1p_20x */ { SPCI_FL_BASE0, 1, 921600, 0, 0, siig20x_init_fn },
+/*
+ * This table describes the serial "geometry" of these boards. Any
+ * quirks for these can be found in drivers/serial/8250_pci.c
+ *
+ * Cards not tested are marked n/t
+ * If you have one of these cards and it works for you, please tell me..
+ */
+static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
+ [titan_110l] = {
+ .flags = FL_BASE1 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [titan_210l] = {
+ .flags = FL_BASE1 | FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [netmos_9xx5_combo] = {
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [netmos_9855] = {
+ .flags = FL_BASE2 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_1s1p] = { /* n/t */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_1s1p_650] = { /* nt */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_1s1p_850] = { /* nt */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_1s2p] = { /* n/t */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_1s2p_650] = { /* nt */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_1s2p_850] = { /* nt */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_2s1p] = { /* n/t */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_2s1p_650] = { /* nt */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [avlab_2s1p_850] = { /* nt */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [siig_1s1p_10x] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 460800,
+ .uart_offset = 8,
+ },
+ [siig_2s1p_10x] = {
+ .flags = FL_BASE2,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [siig_2p1s_20x] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [siig_1s1p_20x] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [siig_2s1p_20x] = {
+ .flags = FL_BASE0,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
};
struct parport_serial_private {
- int num_ser;
- int line[20];
- struct pci_board_no_ids ser;
+ struct serial_private *serial;
int num_par;
struct parport *port[PARPORT_MAX];
struct parport_pc_pci par;
};
-static int __devinit get_pci_port (struct pci_dev *dev,
- struct pci_board_no_ids *board,
- struct serial_struct *req,
- int idx)
-{
- unsigned long port;
- int base_idx;
- int max_port;
- int offset;
-
- base_idx = SPCI_FL_GET_BASE(board->flags);
- if (board->flags & SPCI_FL_BASE_TABLE)
- base_idx += idx;
-
- if (board->flags & SPCI_FL_REGION_SZ_CAP) {
- max_port = pci_resource_len(dev, base_idx) / 8;
- if (idx >= max_port)
- return 1;
- }
-
- offset = board->first_uart_offset;
-
- /* Timedia/SUNIX uses a mixture of BARs and offsets */
- /* Ugh, this is ugly as all hell --- TYT */
- if(dev->vendor == PCI_VENDOR_ID_TIMEDIA ) /* 0x1409 */
- switch(idx) {
- case 0: base_idx=0;
- break;
- case 1: base_idx=0; offset=8;
- break;
- case 2: base_idx=1;
- break;
- case 3: base_idx=1; offset=8;
- break;
- case 4: /* BAR 2*/
- case 5: /* BAR 3 */
- case 6: /* BAR 4*/
- case 7: base_idx=idx-2; /* BAR 5*/
- }
-
- port = pci_resource_start(dev, base_idx) + offset;
-
- if ((board->flags & SPCI_FL_BASE_TABLE) == 0)
- port += idx * (board->uart_offset ? board->uart_offset : 8);
-
- if (pci_resource_flags (dev, base_idx) & IORESOURCE_IO) {
- int high_bits_offset = ((sizeof(long)-sizeof(int))*8);
- req->port = port;
- if (high_bits_offset)
- req->port_high = port >> high_bits_offset;
- else
- req->port_high = 0;
- return 0;
- }
- req->io_type = SERIAL_IO_MEM;
- req->iomem_base = ioremap(port, board->uart_offset);
- req->iomem_reg_shift = board->reg_shift;
- req->port = 0;
- return req->iomem_base ? 0 : 1;
-}
-
/* Register the serial port(s) of a PCI card. */
static int __devinit serial_register (struct pci_dev *dev,
const struct pci_device_id *id)
{
- struct pci_board_no_ids *board;
struct parport_serial_private *priv = pci_get_drvdata (dev);
- struct serial_struct serial_req;
- int base_baud;
- int k;
- int success = 0;
-
- priv->ser = pci_boards[id->driver_data];
- board = &priv->ser;
- if (board->init_fn && ((board->init_fn) (dev, board, 1) != 0))
- return 1;
-
- base_baud = board->base_baud;
- if (!base_baud)
- base_baud = BASE_BAUD;
- memset (&serial_req, 0, sizeof (serial_req));
-
- for (k = 0; k < board->num_ports; k++) {
- int line;
+ struct pciserial_board *board;
+ struct serial_private *serial;
- if (priv->num_ser == ARRAY_SIZE (priv->line)) {
- printk (KERN_WARNING
- "parport_serial: %s: only %u serial lines "
- "supported (%d reported)\n", pci_name (dev),
- ARRAY_SIZE (priv->line), board->num_ports);
- break;
- }
+ board = &pci_parport_serial_boards[id->driver_data];
+ serial = pciserial_init_ports(dev, board);
- serial_req.irq = dev->irq;
- if (get_pci_port (dev, board, &serial_req, k))
- break;
- serial_req.flags = ASYNC_SKIP_TEST | ASYNC_AUTOPROBE;
- serial_req.baud_base = base_baud;
- line = register_serial (&serial_req);
- if (line < 0) {
- printk (KERN_DEBUG
- "parport_serial: register_serial failed\n");
- continue;
- }
- priv->line[priv->num_ser++] = line;
- success = 1;
- }
+ if (IS_ERR(serial))
+ return PTR_ERR(serial);
- return success ? 0 : 1;
+ priv->serial = serial;
+ return 0;
}
/* Register the parallel port(s) of a PCI card. */
@@ -411,7 +371,7 @@ static int __devinit parport_serial_pci_probe (struct pci_dev *dev,
priv = kmalloc (sizeof *priv, GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->num_ser = priv->num_par = 0;
+ memset(priv, 0, sizeof(struct parport_serial_private));
pci_set_drvdata (dev, priv);
err = pci_enable_device (dev);
@@ -444,15 +404,12 @@ static void __devexit parport_serial_pci_remove (struct pci_dev *dev)
struct parport_serial_private *priv = pci_get_drvdata (dev);
int i;
+ pci_set_drvdata(dev, NULL);
+
// Serial ports
- for (i = 0; i < priv->num_ser; i++) {
- unregister_serial (priv->line[i]);
+ if (priv->serial)
+ pciserial_remove_ports(priv->serial);
- if (priv->ser.init_fn)
- (priv->ser.init_fn) (dev, &priv->ser, 0);
- }
- pci_set_drvdata (dev, NULL);
-
// Parallel ports
for (i = 0; i < priv->num_par; i++)
parport_pc_unregister_port (priv->port[i]);
@@ -461,11 +418,47 @@ static void __devexit parport_serial_pci_remove (struct pci_dev *dev)
return;
}
+static int parport_serial_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ struct parport_serial_private *priv = pci_get_drvdata(dev);
+
+ if (priv->serial)
+ pciserial_suspend_ports(priv->serial);
+
+ /* FIXME: What about parport? */
+
+ pci_save_state(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+ return 0;
+}
+
+static int parport_serial_pci_resume(struct pci_dev *dev)
+{
+ struct parport_serial_private *priv = pci_get_drvdata(dev);
+
+ pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
+
+ /*
+ * The device may have been disabled. Re-enable it.
+ */
+ pci_enable_device(dev);
+
+ if (priv->serial)
+ pciserial_resume_ports(priv->serial);
+
+ /* FIXME: What about parport? */
+
+ return 0;
+}
+
static struct pci_driver parport_serial_pci_driver = {
.name = "parport_serial",
.id_table = parport_serial_pci_tbl,
.probe = parport_serial_pci_probe,
.remove = __devexit_p(parport_serial_pci_remove),
+ .suspend = parport_serial_pci_suspend,
+ .resume = parport_serial_pci_resume,
};
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index e3b9692b968..179c95c878a 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -1,26 +1,34 @@
/*
* ahci.c - AHCI SATA support
*
- * Copyright 2004 Red Hat, Inc.
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
+ * Copyright 2004-2005 Red Hat, Inc.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
*
- * Version 1.0 of the AHCI specification:
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
* http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
*
*/
@@ -269,6 +277,8 @@ static struct pci_device_id ahci_pci_tbl[] = {
board_ahci }, /* ESB2 */
{ PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* ESB2 */
+ { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_ahci }, /* ICH7-M DH */
{ } /* terminate list */
};
@@ -584,12 +594,16 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
static void ahci_eng_timeout(struct ata_port *ap)
{
- void *mmio = ap->host_set->mmio_base;
+ struct ata_host_set *host_set = ap->host_set;
+ void *mmio = host_set->mmio_base;
void *port_mmio = ahci_port_base(mmio, ap->port_no);
struct ata_queued_cmd *qc;
+ unsigned long flags;
DPRINTK("ENTER\n");
+ spin_lock_irqsave(&host_set->lock, flags);
+
ahci_intr_error(ap, readl(port_mmio + PORT_IRQ_STAT));
qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -607,6 +621,7 @@ static void ahci_eng_timeout(struct ata_port *ap)
ata_qc_complete(qc, ATA_ERR);
}
+ spin_unlock_irqrestore(&host_set->lock, flags);
}
static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
@@ -696,9 +711,6 @@ static int ahci_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
void *port_mmio = (void *) ap->ioaddr.cmd_addr;
- writel(1, port_mmio + PORT_SCR_ACT);
- readl(port_mmio + PORT_SCR_ACT); /* flush */
-
writel(1, port_mmio + PORT_CMD_ISSUE);
readl(port_mmio + PORT_CMD_ISSUE); /* flush */
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index d96ebf9d222..fb28c126184 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -1,24 +1,42 @@
/*
-
- ata_piix.c - Intel PATA/SATA controllers
-
- Maintained by: Jeff Garzik <jgarzik@pobox.com>
- Please ALWAYS copy linux-ide@vger.kernel.org
- on emails.
-
-
- Copyright 2003-2004 Red Hat Inc
- Copyright 2003-2004 Jeff Garzik
-
-
- Copyright header from piix.c:
-
- Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
- Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
-
- May be copied or modified under the terms of the GNU General Public License
-
+ * ata_piix.c - Intel PATA/SATA controllers
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ *
+ * Copyright 2003-2005 Red Hat Inc
+ * Copyright 2003-2005 Jeff Garzik
+ *
+ *
+ * Copyright header from piix.c:
+ *
+ * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat Inc <alan@redhat.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available at http://developer.intel.com/
+ *
*/
#include <linux/kernel.h>
@@ -629,13 +647,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
port_info[1] = NULL;
if (port_info[0]->host_flags & PIIX_FLAG_AHCI) {
- u8 tmp;
- pci_read_config_byte(pdev, PIIX_SCC, &tmp);
- if (tmp == PIIX_AHCI_DEVICE) {
- int rc = piix_disable_ahci(pdev);
- if (rc)
- return rc;
- }
+ u8 tmp;
+ pci_read_config_byte(pdev, PIIX_SCC, &tmp);
+ if (tmp == PIIX_AHCI_DEVICE) {
+ int rc = piix_disable_ahci(pdev);
+ if (rc)
+ return rc;
+ }
}
if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) {
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index f4e7dcb6492..dee4b12b034 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1,25 +1,35 @@
/*
- libata-core.c - helper library for ATA
-
- Copyright 2003-2004 Red Hat, Inc. All rights reserved.
- Copyright 2003-2004 Jeff Garzik
-
- The contents of this file are subject to the Open
- Software License version 1.1 that can be found at
- http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- by reference.
-
- Alternatively, the contents of this file may be used under the terms
- of the GNU General Public License version 2 (the "GPL") as distributed
- in the kernel source COPYING file, in which case the provisions of
- the GPL are applicable instead of the above. If you wish to allow
- the use of your version of this file only under the terms of the
- GPL and not to allow others to use your version of this file under
- the OSL, indicate your decision by deleting the provisions above and
- replace them with the notice and other provisions required by the GPL.
- If you do not delete the provisions above, a recipient may use your
- version of this file under either the OSL or the GPL.
-
+ * libata-core.c - helper library for ATA
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from http://www.t13.org/ and
+ * http://www.sata-io.org/
+ *
*/
#include <linux/config.h>
@@ -1304,12 +1314,12 @@ static inline u8 ata_dev_knobble(struct ata_port *ap)
/**
* ata_dev_config - Run device specific handlers and check for
* SATA->PATA bridges
- * @ap: Bus
+ * @ap: Bus
* @i: Device
*
* LOCKING:
*/
-
+
void ata_dev_config(struct ata_port *ap, unsigned int i)
{
/* limit bridge transfers to udma5, 200 sectors */
@@ -2377,6 +2387,27 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
}
/**
+ * ata_poll_qc_complete - turn irq back on and finish qc
+ * @qc: Command to complete
+ * @drv_stat: ATA status register content
+ *
+ * LOCKING:
+ * None. (grabs host lock)
+ */
+
+void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ ap->flags &= ~ATA_FLAG_NOINTR;
+ ata_irq_on(ap);
+ ata_qc_complete(qc, drv_stat);
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+}
+
+/**
* ata_pio_poll -
* @ap:
*
@@ -2438,11 +2469,10 @@ static void ata_pio_complete (struct ata_port *ap)
u8 drv_stat;
/*
- * This is purely hueristic. This is a fast path.
- * Sometimes when we enter, BSY will be cleared in
- * a chk-status or two. If not, the drive is probably seeking
- * or something. Snooze for a couple msecs, then
- * chk-status again. If still busy, fall back to
+ * This is purely heuristic. This is a fast path. Sometimes when
+ * we enter, BSY will be cleared in a chk-status or two. If not,
+ * the drive is probably seeking or something. Snooze for a couple
+ * msecs, then chk-status again. If still busy, fall back to
* PIO_ST_POLL state.
*/
drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
@@ -2467,9 +2497,7 @@ static void ata_pio_complete (struct ata_port *ap)
ap->pio_task_state = PIO_ST_IDLE;
- ata_irq_on(ap);
-
- ata_qc_complete(qc, drv_stat);
+ ata_poll_qc_complete(qc, drv_stat);
}
@@ -2494,6 +2522,20 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
#endif /* __BIG_ENDIAN */
}
+/**
+ * ata_mmio_data_xfer - Transfer data by MMIO
+ * @ap: port to read/write
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @do_write: read/write
+ *
+ * Transfer data from/to the device data register by MMIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int write_data)
{
@@ -2502,6 +2544,7 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
u16 *buf16 = (u16 *) buf;
void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
+ /* Transfer multiple of 2 bytes */
if (write_data) {
for (i = 0; i < words; i++)
writew(le16_to_cpu(buf16[i]), mmio);
@@ -2509,19 +2552,76 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
for (i = 0; i < words; i++)
buf16[i] = cpu_to_le16(readw(mmio));
}
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ u16 align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (write_data) {
+ memcpy(align_buf, trailing_buf, 1);
+ writew(le16_to_cpu(align_buf[0]), mmio);
+ } else {
+ align_buf[0] = cpu_to_le16(readw(mmio));
+ memcpy(trailing_buf, align_buf, 1);
+ }
+ }
}
+/**
+ * ata_pio_data_xfer - Transfer data by PIO
+ * @ap: port to read/write
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @do_write: read/write
+ *
+ * Transfer data from/to the device data register by PIO.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int write_data)
{
- unsigned int dwords = buflen >> 1;
+ unsigned int words = buflen >> 1;
+ /* Transfer multiple of 2 bytes */
if (write_data)
- outsw(ap->ioaddr.data_addr, buf, dwords);
+ outsw(ap->ioaddr.data_addr, buf, words);
else
- insw(ap->ioaddr.data_addr, buf, dwords);
+ insw(ap->ioaddr.data_addr, buf, words);
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ u16 align_buf[1] = { 0 };
+ unsigned char *trailing_buf = buf + buflen - 1;
+
+ if (write_data) {
+ memcpy(align_buf, trailing_buf, 1);
+ outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
+ } else {
+ align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
+ memcpy(trailing_buf, align_buf, 1);
+ }
+ }
}
+/**
+ * ata_data_xfer - Transfer data from/to the data register.
+ * @ap: port to read/write
+ * @buf: data buffer
+ * @buflen: buffer length
+ * @do_write: read/write
+ *
+ * Transfer data from/to the device data register.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
unsigned int buflen, int do_write)
{
@@ -2531,6 +2631,16 @@ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
ata_pio_data_xfer(ap, buf, buflen, do_write);
}
+/**
+ * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
+ * @qc: Command on going
+ *
+ * Transfer ATA_SECT_SIZE of data from/to the ATA device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+
static void ata_pio_sector(struct ata_queued_cmd *qc)
{
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
@@ -2569,6 +2679,18 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
kunmap(page);
}
+/**
+ * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ * @qc: Command on going
+ * @bytes: number of bytes
+ *
+ * Transfer Transfer data from/to the ATAPI device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
{
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
@@ -2578,10 +2700,33 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
unsigned char *buf;
unsigned int offset, count;
- if (qc->curbytes == qc->nbytes - bytes)
+ if (qc->curbytes + bytes >= qc->nbytes)
ap->pio_task_state = PIO_ST_LAST;
next_sg:
+ if (unlikely(qc->cursg >= qc->n_elem)) {
+ /*
+ * The end of qc->sg is reached and the device expects
+ * more data to transfer. In order not to overrun qc->sg
+ * and fulfill length specified in the byte count register,
+ * - for read case, discard trailing data from the device
+ * - for write case, padding zero data to the device
+ */
+ u16 pad_buf[1] = { 0 };
+ unsigned int words = bytes >> 1;
+ unsigned int i;
+
+ if (words) /* warning if bytes > 1 */
+ printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
+ ap->id, bytes);
+
+ for (i = 0; i < words; i++)
+ ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
+
+ ap->pio_task_state = PIO_ST_LAST;
+ return;
+ }
+
sg = &qc->sg[qc->cursg];
page = sg->page;
@@ -2615,11 +2760,21 @@ next_sg:
kunmap(page);
- if (bytes) {
+ if (bytes)
goto next_sg;
- }
}
+/**
+ * atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ * @qc: Command on going
+ *
+ * Transfer Transfer data from/to the ATAPI device.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ *
+ */
+
static void atapi_pio_bytes(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -2692,9 +2847,7 @@ static void ata_pio_block(struct ata_port *ap)
if ((status & ATA_DRQ) == 0) {
ap->pio_task_state = PIO_ST_IDLE;
- ata_irq_on(ap);
-
- ata_qc_complete(qc, status);
+ ata_poll_qc_complete(qc, status);
return;
}
@@ -2724,9 +2877,7 @@ static void ata_pio_error(struct ata_port *ap)
ap->pio_task_state = PIO_ST_IDLE;
- ata_irq_on(ap);
-
- ata_qc_complete(qc, drv_stat | ATA_ERR);
+ ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
}
static void ata_pio_task(void *_data)
@@ -2832,8 +2983,10 @@ static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
static void ata_qc_timeout(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_host_set *host_set = ap->host_set;
struct ata_device *dev = qc->dev;
u8 host_stat = 0, drv_stat;
+ unsigned long flags;
DPRINTK("ENTER\n");
@@ -2844,7 +2997,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) {
/* finish completing original command */
+ spin_lock_irqsave(&host_set->lock, flags);
__ata_qc_complete(qc);
+ spin_unlock_irqrestore(&host_set->lock, flags);
atapi_request_sense(ap, dev, cmd);
@@ -2855,6 +3010,8 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
}
}
+ spin_lock_irqsave(&host_set->lock, flags);
+
/* hack alert! We cannot use the supplied completion
* function from inside the ->eh_strategy_handler() thread.
* libata is the only user of ->eh_strategy_handler() in
@@ -2870,7 +3027,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
host_stat = ap->ops->bmdma_status(ap);
/* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(ap);
+ ap->ops->bmdma_stop(qc);
/* fall through */
@@ -2888,6 +3045,9 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc)
ata_qc_complete(qc, drv_stat);
break;
}
+
+ spin_unlock_irqrestore(&host_set->lock, flags);
+
out:
DPRINTK("EXIT\n");
}
@@ -3061,9 +3221,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
ata_sg_clean(qc);
+ /* atapi: mark qc as inactive to prevent the interrupt handler
+ * from completing the command twice later, before the error handler
+ * is called. (when rc != 0 and atapi request sense is needed)
+ */
+ qc->flags &= ~ATA_QCFLAG_ACTIVE;
+
/* call completion callback */
rc = qc->complete_fn(qc, drv_stat);
- qc->flags &= ~ATA_QCFLAG_ACTIVE;
/* if callback indicates not to complete command (non-zero),
* return immediately
@@ -3193,11 +3358,13 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
break;
case ATA_PROT_ATAPI_NODATA:
+ ap->flags |= ATA_FLAG_NOINTR;
ata_tf_to_host_nolock(ap, &qc->tf);
queue_work(ata_wq, &ap->packet_task);
break;
case ATA_PROT_ATAPI_DMA:
+ ap->flags |= ATA_FLAG_NOINTR;
ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->bmdma_setup(qc); /* set up bmdma */
queue_work(ata_wq, &ap->packet_task);
@@ -3242,7 +3409,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
}
/**
- * ata_bmdma_start - Start a PCI IDE BMDMA transaction
+ * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
@@ -3413,7 +3580,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
/**
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
- * @ap: Port associated with this ATA transaction.
+ * @qc: Command we are ending DMA for
*
* Clears the ATA_DMA_START flag in the dma control register
*
@@ -3423,8 +3590,9 @@ u8 ata_bmdma_status(struct ata_port *ap)
* spin_lock_irqsave(host_set lock)
*/
-void ata_bmdma_stop(struct ata_port *ap)
+void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
+ struct ata_port *ap = qc->ap;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
@@ -3476,7 +3644,7 @@ inline unsigned int ata_host_intr (struct ata_port *ap,
goto idle_irq;
/* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(ap);
+ ap->ops->bmdma_stop(qc);
/* fall through */
@@ -3551,7 +3719,8 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
struct ata_port *ap;
ap = host_set->ports[i];
- if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (ap &&
+ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -3603,19 +3772,27 @@ static void atapi_packet_task(void *_data)
/* send SCSI cdb */
DPRINTK("send cdb\n");
assert(ap->cdb_len >= 12);
- ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
- /* if we are DMA'ing, irq handler takes over from here */
- if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
- ap->ops->bmdma_start(qc); /* initiate bmdma */
+ if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
+ qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
+ unsigned long flags;
- /* non-data commands are also handled via irq */
- else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
- /* do nothing */
- }
+ /* Once we're done issuing command and kicking bmdma,
+ * irq handler takes over. To not lose irq, we need
+ * to clear NOINTR flag before sending cdb, but
+ * interrupt handler shouldn't be invoked before we're
+ * finished. Hence, the following locking.
+ */
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ ap->flags &= ~ATA_FLAG_NOINTR;
+ ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
+ if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
+ ap->ops->bmdma_start(qc); /* initiate bmdma */
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+ } else {
+ ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
- /* PIO commands are handled by polling */
- else {
+ /* PIO commands are handled by polling */
ap->pio_task_state = PIO_ST;
queue_work(ata_wq, &ap->pio_task);
}
@@ -3623,7 +3800,7 @@ static void atapi_packet_task(void *_data)
return;
err_out:
- ata_qc_complete(qc, ATA_ERR);
+ ata_poll_qc_complete(qc, ATA_ERR);
}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 6a75ec2187f..346eb36b1e3 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -1,25 +1,36 @@
/*
- libata-scsi.c - helper library for ATA
-
- Copyright 2003-2004 Red Hat, Inc. All rights reserved.
- Copyright 2003-2004 Jeff Garzik
-
- The contents of this file are subject to the Open
- Software License version 1.1 that can be found at
- http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- by reference.
-
- Alternatively, the contents of this file may be used under the terms
- of the GNU General Public License version 2 (the "GPL") as distributed
- in the kernel source COPYING file, in which case the provisions of
- the GPL are applicable instead of the above. If you wish to allow
- the use of your version of this file only under the terms of the
- GPL and not to allow others to use your version of this file under
- the OSL, indicate your decision by deleting the provisions above and
- replace them with the notice and other provisions required by the GPL.
- If you do not delete the provisions above, a recipient may use your
- version of this file under either the OSL or the GPL.
-
+ * libata-scsi.c - helper library for ATA
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from
+ * - http://www.t10.org/
+ * - http://www.t13.org/
+ *
*/
#include <linux/kernel.h>
@@ -392,6 +403,60 @@ int ata_scsi_error(struct Scsi_Host *host)
}
/**
+ * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
+ * @qc: Storage for translated ATA taskfile
+ * @scsicmd: SCSI command to translate
+ *
+ * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
+ * (to start). Perhaps these commands should be preceded by
+ * CHECK POWER MODE to see what power mode the device is already in.
+ * [See SAT revision 5 at www.t10.org]
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ *
+ * RETURNS:
+ * Zero on success, non-zero on error.
+ */
+
+static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
+ u8 *scsicmd)
+{
+ struct ata_taskfile *tf = &qc->tf;
+
+ tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf->protocol = ATA_PROT_NODATA;
+ if (scsicmd[1] & 0x1) {
+ ; /* ignore IMMED bit, violates sat-r05 */
+ }
+ if (scsicmd[4] & 0x2)
+ return 1; /* LOEJ bit set not supported */
+ if (((scsicmd[4] >> 4) & 0xf) != 0)
+ return 1; /* power conditions not supported */
+ if (scsicmd[4] & 0x1) {
+ tf->nsect = 1; /* 1 sector, lba=0 */
+ tf->lbah = 0x0;
+ tf->lbam = 0x0;
+ tf->lbal = 0x0;
+ tf->device |= ATA_LBA;
+ tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
+ } else {
+ tf->nsect = 0; /* time period value (0 implies now) */
+ tf->command = ATA_CMD_STANDBY;
+ /* Consider: ATA STANDBY IMMEDIATE command */
+ }
+ /*
+ * Standby and Idle condition timers could be implemented but that
+ * would require libata to implement the Power condition mode page
+ * and allow the user to change it. Changing mode pages requires
+ * MODE SELECT to be implemented.
+ */
+
+ return 0;
+}
+
+
+/**
* ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
* @qc: Storage for translated ATA taskfile
* @scsicmd: SCSI command to translate (ignored)
@@ -576,11 +641,19 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
tf->lbah = scsicmd[3];
VPRINTK("ten-byte command\n");
+ if (qc->nsect == 0) /* we don't support length==0 cmds */
+ return 1;
return 0;
}
if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) {
qc->nsect = tf->nsect = scsicmd[4];
+ if (!qc->nsect) {
+ qc->nsect = 256;
+ if (lba48)
+ tf->hob_nsect = 1;
+ }
+
tf->lbal = scsicmd[3];
tf->lbam = scsicmd[2];
tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */
@@ -620,6 +693,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
tf->lbah = scsicmd[7];
VPRINTK("sixteen-byte command\n");
+ if (qc->nsect == 0) /* we don't support length==0 cmds */
+ return 1;
return 0;
}
@@ -1435,6 +1510,8 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case VERIFY:
case VERIFY_16:
return ata_scsi_verify_xlat;
+ case START_STOP:
+ return ata_scsi_start_stop_xlat;
}
return NULL;
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 3e7f4843020..809c634afbc 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -1,25 +1,28 @@
/*
- libata.h - helper library for ATA
-
- Copyright 2003-2004 Red Hat, Inc. All rights reserved.
- Copyright 2003-2004 Jeff Garzik
-
- The contents of this file are subject to the Open
- Software License version 1.1 that can be found at
- http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- by reference.
-
- Alternatively, the contents of this file may be used under the terms
- of the GNU General Public License version 2 (the "GPL") as distributed
- in the kernel source COPYING file, in which case the provisions of
- the GPL are applicable instead of the above. If you wish to allow
- the use of your version of this file only under the terms of the
- GPL and not to allow others to use your version of this file under
- the OSL, indicate your decision by deleting the provisions above and
- replace them with the notice and other provisions required by the GPL.
- If you do not delete the provisions above, a recipient may use your
- version of this file under either the OSL or the GPL.
-
+ * libata.h - helper library for ATA
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
*/
#ifndef __LIBATA_H__
@@ -72,7 +75,7 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *),
u8 asc, u8 ascq);
-extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
unsigned int (*actor) (struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen));
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index b0403ccd8a2..03d9bc6e69d 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -4,21 +4,37 @@
* Copyright 2004 NVIDIA Corp. All rights reserved.
* Copyright 2004 Andrew Chew
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * No hardware documentation available outside of NVIDIA.
+ * This driver programs the NVIDIA SATA controller in a similar
+ * fashion as with other PCI IDE BMDMA controllers, with a few
+ * NV-specific details such as register offsets, SATA phy location,
+ * hotplug info, etc.
+ *
+ *
+ * 0.08
+ * - Added support for MCP51 and MCP55.
+ *
+ * 0.07
+ * - Added support for RAID class code.
*
* 0.06
* - Added generic SATA support by using a pci_device_id that filters on
@@ -48,7 +64,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_nv"
-#define DRV_VERSION "0.6"
+#define DRV_VERSION "0.8"
#define NV_PORTS 2
#define NV_PIO_MASK 0x1f
@@ -116,7 +132,9 @@ enum nv_host_type
GENERIC,
NFORCE2,
NFORCE3,
- CK804
+ CK804,
+ MCP51,
+ MCP55
};
static struct pci_device_id nv_pci_tbl[] = {
@@ -134,9 +152,18 @@ static struct pci_device_id nv_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
{ 0, } /* terminate list */
};
@@ -274,7 +301,8 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
struct ata_port *ap;
ap = host_set->ports[i];
- if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (ap &&
+ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index ad31b8afec6..7c4f6ecc1cc 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -7,21 +7,26 @@
*
* Copyright 2003-2004 Red Hat, Inc.
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware information only available under NDA.
*
*/
@@ -206,6 +211,10 @@ static struct pci_device_id pdc_ata_pci_tbl[] = {
board_20319 },
{ PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20319 },
+ { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_20319 },
+ { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ board_20319 },
{ PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20319 },
@@ -357,11 +366,15 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
static void pdc_eng_timeout(struct ata_port *ap)
{
+ struct ata_host_set *host_set = ap->host_set;
u8 drv_stat;
struct ata_queued_cmd *qc;
+ unsigned long flags;
DPRINTK("ENTER\n");
+ spin_lock_irqsave(&host_set->lock, flags);
+
qc = ata_qc_from_tag(ap, ap->active_tag);
if (!qc) {
printk(KERN_ERR "ata%u: BUG: timeout without command\n",
@@ -395,6 +408,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
}
out:
+ spin_unlock_irqrestore(&host_set->lock, flags);
DPRINTK("EXIT\n");
}
@@ -477,7 +491,8 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
VPRINTK("port %u\n", i);
ap = host_set->ports[i];
tmp = mask & (1 << (i + 1));
- if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (tmp && ap &&
+ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_promise.h b/drivers/scsi/sata_promise.h
index 6e7e96b9ee1..6ee5e190262 100644
--- a/drivers/scsi/sata_promise.h
+++ b/drivers/scsi/sata_promise.h
@@ -3,21 +3,24 @@
*
* Copyright 2003-2004 Red Hat, Inc.
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
*
*/
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 1383e8a28d7..9c99ab433bd 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -6,21 +6,24 @@
* Copyright 2005 Pacific Digital Corporation.
* (OSL/GPL code release authorized by Jalil Fadavi).
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
*
*/
@@ -117,7 +120,7 @@ static void qs_phy_reset(struct ata_port *ap);
static void qs_qc_prep(struct ata_queued_cmd *qc);
static int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
-static void qs_bmdma_stop(struct ata_port *ap);
+static void qs_bmdma_stop(struct ata_queued_cmd *qc);
static u8 qs_bmdma_status(struct ata_port *ap);
static void qs_irq_clear(struct ata_port *ap);
static void qs_eng_timeout(struct ata_port *ap);
@@ -198,7 +201,7 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
return 1; /* ATAPI DMA not supported */
}
-static void qs_bmdma_stop(struct ata_port *ap)
+static void qs_bmdma_stop(struct ata_queued_cmd *qc)
{
/* nothing */
}
@@ -386,7 +389,8 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
handled = 1;
- if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (ap && !(ap->flags &
+ (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_pkt)
@@ -417,7 +421,8 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
struct ata_port *ap;
ap = host_set->ports[port_no];
- if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (ap &&
+ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
struct qs_port_priv *pp = ap->private_data;
if (!pp || pp->state != qs_state_mmio)
@@ -431,7 +436,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
continue;
DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
ap->id, qc->tf.protocol, status);
-
+
/* complete taskfile transaction */
pp->state = qs_state_idle;
ata_qc_complete(qc, status);
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 49ed557a4b6..71d49548f0a 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -5,24 +5,32 @@
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
- * Copyright 2003 Red Hat, Inc.
+ * Copyright 2003-2005 Red Hat, Inc.
* Copyright 2003 Benjamin Herrenschmidt
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Documentation for SiI 3112:
+ * http://gkernel.sourceforge.net/specs/sii/3112A_SiI-DS-0095-B2.pdf.bz2
+ *
+ * Other errata and documentation available under NDA.
*
*/
@@ -41,8 +49,11 @@
#define DRV_VERSION "0.9"
enum {
+ SIL_FLAG_MOD15WRITE = (1 << 30),
+
sil_3112 = 0,
- sil_3114 = 1,
+ sil_3112_m15w = 1,
+ sil_3114 = 2,
SIL_FIFO_R0 = 0x40,
SIL_FIFO_W0 = 0x41,
@@ -76,13 +87,13 @@ static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void sil_post_set_mode (struct ata_port *ap);
static struct pci_device_id sil_pci_tbl[] = {
- { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
- { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
+ { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
+ { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
{ 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
{ 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
- { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
- { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
- { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
+ { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
+ { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
+ { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
{ } /* terminate list */
};
@@ -174,6 +185,16 @@ static struct ata_port_info sil_port_info[] = {
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
.port_ops = &sil_ops,
+ }, /* sil_3112_15w - keep it sync'd w/ sil_3112 */
+ {
+ .sht = &sil_sht,
+ .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_SRST | ATA_FLAG_MMIO |
+ SIL_FLAG_MOD15WRITE,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = 0x3f, /* udma0-5 */
+ .port_ops = &sil_ops,
}, /* sil_3114 */
{
.sht = &sil_sht,
@@ -323,15 +344,15 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
while ((len > 0) && (s[len - 1] == ' '))
len--;
- for (n = 0; sil_blacklist[n].product; n++)
+ for (n = 0; sil_blacklist[n].product; n++)
if (!memcmp(sil_blacklist[n].product, s,
strlen(sil_blacklist[n].product))) {
quirks = sil_blacklist[n].quirk;
break;
}
-
+
/* limit requests to 15 sectors */
- if (quirks & SIL_QUIRK_MOD15WRITE) {
+ if ((ap->flags & SIL_FLAG_MOD15WRITE) && (quirks & SIL_QUIRK_MOD15WRITE)) {
printk(KERN_INFO "ata%u(%u): applying Seagate errata fix\n",
ap->id, dev->devno);
ap->host->max_sectors = 15;
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index e418b89c6b9..43af445b3ad 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -7,21 +7,26 @@
*
* Copyright 2004 Uwe Koziolek
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
*
*/
@@ -234,7 +239,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
probe_ent->host_flags |= SIS_FLAG_CFGSCR;
-
+
/* if hardware thinks SCRs are in IO space, but there are
* no IO resources assigned, change to PCI cfg space.
*/
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 858e07185db..19d3bb3b0fb 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -13,21 +13,26 @@
* This driver probably works with non-Apple versions of the
* Broadcom chipset...
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
*
*/
@@ -195,18 +200,18 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
/* start host DMA transaction */
dmactl = readb(mmio + ATA_DMA_CMD);
writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
- /* There is a race condition in certain SATA controllers that can
- be seen when the r/w command is given to the controller before the
+ /* There is a race condition in certain SATA controllers that can
+ be seen when the r/w command is given to the controller before the
host DMA is started. On a Read command, the controller would initiate
the command to the drive even before it sees the DMA start. When there
- are very fast drives connected to the controller, or when the data request
+ are very fast drives connected to the controller, or when the data request
hits in the drive cache, there is the possibility that the drive returns a part
or all of the requested data to the controller before the DMA start is issued.
In this case, the controller would become confused as to what to do with the data.
In the worst case when all the data is returned back to the controller, the
controller could hang. In other cases it could return partial data returning
in data corruption. This problem has been seen in PPC systems and can also appear
- on an system with very fast disks, where the SATA controller is sitting behind a
+ on an system with very fast disks, where the SATA controller is sitting behind a
number of bridges, and hence there is significant latency between the r/w command
and the start command. */
/* issue r/w command if the access is to ATA*/
@@ -214,7 +219,7 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
ap->ops->exec_command(ap, &qc->tf);
}
-
+
static u8 k2_stat_check_status(struct ata_port *ap)
{
return readl((void *) ap->ioaddr.status_addr);
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index efd7d7a6113..c72fcc46f0f 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -7,21 +7,26 @@
*
* Copyright 2003-2004 Red Hat, Inc.
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
*
*/
@@ -94,7 +99,7 @@ enum {
PDC_DIMM1_CONTROL_OFFSET = 0x84,
PDC_SDRAM_CONTROL_OFFSET = 0x88,
PDC_I2C_WRITE = 0x00000000,
- PDC_I2C_READ = 0x00000040,
+ PDC_I2C_READ = 0x00000040,
PDC_I2C_START = 0x00000080,
PDC_I2C_MASK_INT = 0x00000020,
PDC_I2C_COMPLETE = 0x00010000,
@@ -105,16 +110,16 @@ enum {
PDC_DIMM_SPD_COLUMN_NUM = 4,
PDC_DIMM_SPD_MODULE_ROW = 5,
PDC_DIMM_SPD_TYPE = 11,
- PDC_DIMM_SPD_FRESH_RATE = 12,
- PDC_DIMM_SPD_BANK_NUM = 17,
+ PDC_DIMM_SPD_FRESH_RATE = 12,
+ PDC_DIMM_SPD_BANK_NUM = 17,
PDC_DIMM_SPD_CAS_LATENCY = 18,
- PDC_DIMM_SPD_ATTRIBUTE = 21,
+ PDC_DIMM_SPD_ATTRIBUTE = 21,
PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
- PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
+ PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
PDC_DIMM_SPD_SYSTEM_FREQ = 126,
- PDC_CTL_STATUS = 0x08,
+ PDC_CTL_STATUS = 0x08,
PDC_DIMM_WINDOW_CTLR = 0x0C,
PDC_TIME_CONTROL = 0x3C,
PDC_TIME_PERIOD = 0x40,
@@ -157,15 +162,15 @@ static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
static void pdc20621_host_stop(struct ata_host_set *host_set);
static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
-static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
+static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
u32 device, u32 subaddr, u32 *pdata);
static int pdc20621_prog_dimm0(struct ata_probe_ent *pe);
static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe);
#ifdef ATA_VERBOSE_DEBUG
-static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
+static void pdc20621_get_from_dimm(struct ata_probe_ent *pe,
void *psource, u32 offset, u32 size);
#endif
-static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
+static void pdc20621_put_to_dimm(struct ata_probe_ent *pe,
void *psource, u32 offset, u32 size);
static void pdc20621_irq_clear(struct ata_port *ap);
static int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc);
@@ -825,7 +830,8 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
ap = host_set->ports[port_no];
tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
- if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (tmp && ap &&
+ !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -847,10 +853,14 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
static void pdc_eng_timeout(struct ata_port *ap)
{
u8 drv_stat;
+ struct ata_host_set *host_set = ap->host_set;
struct ata_queued_cmd *qc;
+ unsigned long flags;
DPRINTK("ENTER\n");
+ spin_lock_irqsave(&host_set->lock, flags);
+
qc = ata_qc_from_tag(ap, ap->active_tag);
if (!qc) {
printk(KERN_ERR "ata%u: BUG: timeout without command\n",
@@ -884,6 +894,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
}
out:
+ spin_unlock_irqrestore(&host_set->lock, flags);
DPRINTK("EXIT\n");
}
@@ -922,7 +933,7 @@ static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
#ifdef ATA_VERBOSE_DEBUG
-static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
+static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
u32 offset, u32 size)
{
u32 window_size;
@@ -936,9 +947,9 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
- page_mask = 0x00;
- window_size = 0x2000 * 4; /* 32K byte uchar size */
- idx = (u16) (offset / window_size);
+ page_mask = 0x00;
+ window_size = 0x2000 * 4; /* 32K byte uchar size */
+ idx = (u16) (offset / window_size);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
@@ -947,19 +958,19 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
offset -= (idx * window_size);
idx++;
- dist = ((long) (window_size - (offset + size))) >= 0 ? size :
+ dist = ((long) (window_size - (offset + size))) >= 0 ? size :
(long) (window_size - offset);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
+ memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
dist);
- psource += dist;
+ psource += dist;
size -= dist;
for (; (long) size >= (long) window_size ;) {
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio),
+ memcpy_fromio((char *) psource, (char *) (dimm_mmio),
window_size / 4);
psource += window_size;
size -= window_size;
@@ -971,14 +982,14 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
readl(mmio + PDC_GENERAL_CTLR);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
- memcpy_fromio((char *) psource, (char *) (dimm_mmio),
+ memcpy_fromio((char *) psource, (char *) (dimm_mmio),
size / 4);
}
}
#endif
-static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
+static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
u32 offset, u32 size)
{
u32 window_size;
@@ -989,16 +1000,16 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
struct pdc_host_priv *hpriv = pe->private_data;
void *dimm_mmio = hpriv->dimm_mmio;
- /* hard-code chip #0 */
+ /* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
- page_mask = 0x00;
- window_size = 0x2000 * 4; /* 32K byte uchar size */
+ page_mask = 0x00;
+ window_size = 0x2000 * 4; /* 32K byte uchar size */
idx = (u16) (offset / window_size);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
- offset -= (idx * window_size);
+ offset -= (idx * window_size);
idx++;
dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
(long) (window_size - offset);
@@ -1006,12 +1017,12 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
- psource += dist;
+ psource += dist;
size -= dist;
for (; (long) size >= (long) window_size ;) {
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
- memcpy_toio((char *) (dimm_mmio), (char *) psource,
+ memcpy_toio((char *) (dimm_mmio), (char *) psource,
window_size / 4);
writel(0x01, mmio + PDC_GENERAL_CTLR);
readl(mmio + PDC_GENERAL_CTLR);
@@ -1019,7 +1030,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
size -= window_size;
idx ++;
}
-
+
if (size) {
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
readl(mmio + PDC_DIMM_WINDOW_CTLR);
@@ -1030,12 +1041,12 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
}
-static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
+static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
u32 subaddr, u32 *pdata)
{
void *mmio = pe->mmio_base;
u32 i2creg = 0;
- u32 status;
+ u32 status;
u32 count =0;
/* hard-code chip #0 */
@@ -1049,7 +1060,7 @@ static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
readl(mmio + PDC_I2C_ADDR_DATA_OFFSET);
/* Write Control to perform read operation, mask int */
- writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
+ writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
mmio + PDC_I2C_CONTROL_OFFSET);
for (count = 0; count <= 1000; count ++) {
@@ -1062,26 +1073,26 @@ static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
}
*pdata = (status >> 8) & 0x000000ff;
- return 1;
+ return 1;
}
static int pdc20621_detect_dimm(struct ata_probe_ent *pe)
{
u32 data=0 ;
- if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
+ if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
if (data == 100)
return 100;
} else
return 0;
-
+
if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
- if(data <= 0x75)
+ if(data <= 0x75)
return 133;
} else
return 0;
-
+
return 0;
}
@@ -1091,15 +1102,15 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
u32 spd0[50];
u32 data = 0;
int size, i;
- u8 bdimmsize;
+ u8 bdimmsize;
void *mmio = pe->mmio_base;
static const struct {
unsigned int reg;
unsigned int ofs;
} pdc_i2c_read_data [] = {
- { PDC_DIMM_SPD_TYPE, 11 },
+ { PDC_DIMM_SPD_TYPE, 11 },
{ PDC_DIMM_SPD_FRESH_RATE, 12 },
- { PDC_DIMM_SPD_COLUMN_NUM, 4 },
+ { PDC_DIMM_SPD_COLUMN_NUM, 4 },
{ PDC_DIMM_SPD_ATTRIBUTE, 21 },
{ PDC_DIMM_SPD_ROW_NUM, 3 },
{ PDC_DIMM_SPD_BANK_NUM, 17 },
@@ -1108,7 +1119,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
{ PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
{ PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
{ PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
- { PDC_DIMM_SPD_CAS_LATENCY, 18 },
+ { PDC_DIMM_SPD_CAS_LATENCY, 18 },
};
/* hard-code chip #0 */
@@ -1116,17 +1127,17 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
- pdc_i2c_read_data[i].reg,
+ pdc_i2c_read_data[i].reg,
&spd0[pdc_i2c_read_data[i].ofs]);
-
+
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
- data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
+ data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
((((spd0[27] + 9) / 10) - 1) << 8) ;
- data |= (((((spd0[29] > spd0[28])
- ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
+ data |= (((((spd0[29] > spd0[28])
+ ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
-
- if (spd0[18] & 0x08)
+
+ if (spd0[18] & 0x08)
data |= ((0x03) << 14);
else if (spd0[18] & 0x04)
data |= ((0x02) << 14);
@@ -1135,7 +1146,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
else
data |= (0 << 14);
- /*
+ /*
Calculate the size of bDIMMSize (power of 2) and
merge the DIMM size by program start/end address.
*/
@@ -1145,9 +1156,9 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
data |= (((size / 16) - 1) << 16);
data |= (0 << 23);
data |= 8;
- writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
+ writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET);
readl(mmio + PDC_DIMM0_CONTROL_OFFSET);
- return size;
+ return size;
}
@@ -1167,12 +1178,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
Refresh Enable (bit 17)
*/
- data = 0x022259F1;
+ data = 0x022259F1;
writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
/* Turn on for ECC */
- pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
+ pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0);
if (spd0 == 0x02) {
data |= (0x01 << 16);
@@ -1186,22 +1197,22 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
data |= (1<<19);
writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET);
- error = 1;
+ error = 1;
for (i = 1; i <= 10; i++) { /* polling ~5 secs */
data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET);
if (!(data & (1<<19))) {
error = 0;
- break;
+ break;
}
msleep(i*100);
}
return error;
}
-
+
static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
{
- int speed, size, length;
+ int speed, size, length;
u32 addr,spd0,pci_status;
u32 tmp=0;
u32 time_period=0;
@@ -1228,7 +1239,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
/* Wait 3 seconds */
msleep(3000);
- /*
+ /*
When timer is enabled, counter is decreased every internal
clock cycle.
*/
@@ -1236,24 +1247,24 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
tcount = readl(mmio + PDC_TIME_COUNTER);
VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
- /*
+ /*
If SX4 is on PCI-X bus, after 3 seconds, the timer counter
register should be >= (0xffffffff - 3x10^8).
*/
if(tcount >= PCI_X_TCOUNT) {
ticks = (time_period - tcount);
VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
-
+
clock = (ticks / 300000);
VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
-
+
clock = (clock * 33);
VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
/* PLL F Param (bit 22:16) */
fparam = (1400000 / clock) - 2;
VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
-
+
/* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
pci_status = (0x8a001824 | (fparam << 16));
} else
@@ -1264,21 +1275,21 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
writel(pci_status, mmio + PDC_CTL_STATUS);
readl(mmio + PDC_CTL_STATUS);
- /*
+ /*
Read SPD of DIMM by I2C interface,
and program the DIMM Module Controller.
*/
if (!(speed = pdc20621_detect_dimm(pe))) {
- printk(KERN_ERR "Detect Local DIMM Fail\n");
+ printk(KERN_ERR "Detect Local DIMM Fail\n");
return 1; /* DIMM error */
}
VPRINTK("Local DIMM Speed = %d\n", speed);
- /* Programming DIMM0 Module Control Register (index_CID0:80h) */
+ /* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(pe);
VPRINTK("Local DIMM Size = %dMB\n",size);
- /* Programming DIMM Module Global Control Register (index_CID0:88h) */
+ /* Programming DIMM Module Global Control Register (index_CID0:88h) */
if (pdc20621_prog_dimm_global(pe)) {
printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
return 1;
@@ -1297,30 +1308,30 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40);
pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+ printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
- pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
+ pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040,
40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+ printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40);
pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+ printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
}
#endif
/* ECC initiliazation. */
- pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
+ pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0);
if (spd0 == 0x02) {
VPRINTK("Start ECC initialization\n");
addr = 0;
length = size * 1024 * 1024;
while (addr < length) {
- pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
+ pdc20621_put_to_dimm(pe, (void *) &tmp, addr,
sizeof(u32));
addr += sizeof(u32);
}
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index a71fb54eebd..1566886815f 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -1,21 +1,26 @@
/*
* sata_uli.c - ULi Electronics SATA
*
- * The contents of this file are subject to the Open
- * Software License version 1.1 that can be found at
- * http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- * by reference.
*
- * Alternatively, the contents of this file may be used under the terms
- * of the GNU General Public License version 2 (the "GPL") as distributed
- * in the kernel source COPYING file, in which case the provisions of
- * the GPL are applicable instead of the above. If you wish to allow
- * the use of your version of this file only under the terms of the
- * GPL and not to allow others to use your version of this file under
- * the OSL, indicate your decision by deleting the provisions above and
- * replace them with the notice and other provisions required by the GPL.
- * If you do not delete the provisions above, a recipient may use your
- * version of this file under either the OSL or the GPL.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
*
*/
@@ -214,7 +219,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -ENOMEM;
goto err_out_regions;
}
-
+
switch (board_idx) {
case uli_5287:
probe_ent->port[0].scr_addr = ULI5287_BASE;
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index f43183c19a1..128b996b07b 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -1,34 +1,38 @@
/*
- sata_via.c - VIA Serial ATA controllers
-
- Maintained by: Jeff Garzik <jgarzik@pobox.com>
- Please ALWAYS copy linux-ide@vger.kernel.org
+ * sata_via.c - VIA Serial ATA controllers
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
on emails.
-
- Copyright 2003-2004 Red Hat, Inc. All rights reserved.
- Copyright 2003-2004 Jeff Garzik
-
- The contents of this file are subject to the Open
- Software License version 1.1 that can be found at
- http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- by reference.
-
- Alternatively, the contents of this file may be used under the terms
- of the GNU General Public License version 2 (the "GPL") as distributed
- in the kernel source COPYING file, in which case the provisions of
- the GPL are applicable instead of the above. If you wish to allow
- the use of your version of this file only under the terms of the
- GPL and not to allow others to use your version of this file under
- the OSL, indicate your decision by deleting the provisions above and
- replace them with the notice and other provisions required by the GPL.
- If you do not delete the provisions above, a recipient may use your
- version of this file under either the OSL or the GPL.
-
- ----------------------------------------------------------------------
-
- To-do list:
- * VT6421 PATA support
-
+ *
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available under NDA.
+ *
+ *
+ * To-do list:
+ * - VT6421 PATA support
+ *
*/
#include <linux/kernel.h>
@@ -347,7 +351,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent = vt6420_init_probe_ent(pdev);
else
probe_ent = vt6421_init_probe_ent(pdev);
-
+
if (!probe_ent) {
printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
pci_name(pdev));
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index c5e09dc6f3d..3985f344da4 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -9,9 +9,29 @@
*
* Bits from Jeff Garzik, Copyright RedHat, Inc.
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Vitesse hardware documentation presumably available under NDA.
+ * Intel 31244 (same hardware interface) documentation presumably
+ * available from http://developer.intel.com/
+ *
*/
#include <linux/kernel.h>
@@ -173,7 +193,8 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
struct ata_port *ap;
ap = host_set->ports[i];
- if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
+ if (ap && !(ap->flags &
+ (ATA_FLAG_PORT_DISABLED|ATA_FLAG_NOINTR))) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -342,7 +363,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
pci_set_master(pdev);
- /*
+ /*
* Config offset 0x98 is "Extended Control and Status Register 0"
* Default value is (1 << 28). All bits except bit 28 are reserved in
* DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 07f05e9d095..0e21f583690 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -34,36 +34,6 @@
#undef SERIAL_DEBUG_PCI
/*
- * Definitions for PCI support.
- */
-#define FL_BASE_MASK 0x0007
-#define FL_BASE0 0x0000
-#define FL_BASE1 0x0001
-#define FL_BASE2 0x0002
-#define FL_BASE3 0x0003
-#define FL_BASE4 0x0004
-#define FL_GET_BASE(x) (x & FL_BASE_MASK)
-
-/* Use successive BARs (PCI base address registers),
- else use offset into some specified BAR */
-#define FL_BASE_BARS 0x0008
-
-/* do not assign an irq */
-#define FL_NOIRQ 0x0080
-
-/* Use the Base address register size to cap number of ports */
-#define FL_REGION_SZ_CAP 0x0100
-
-struct pci_board {
- unsigned int flags;
- unsigned int num_ports;
- unsigned int base_baud;
- unsigned int uart_offset;
- unsigned int reg_shift;
- unsigned int first_offset;
-};
-
-/*
* init function returns:
* > 0 - number of ports
* = 0 - use board->num_ports
@@ -75,14 +45,15 @@ struct pci_serial_quirk {
u32 subvendor;
u32 subdevice;
int (*init)(struct pci_dev *dev);
- int (*setup)(struct pci_dev *dev, struct pci_board *board,
- struct uart_port *port, int idx);
+ int (*setup)(struct serial_private *, struct pciserial_board *,
+ struct uart_port *, int);
void (*exit)(struct pci_dev *dev);
};
#define PCI_NUM_BAR_RESOURCES 6
struct serial_private {
+ struct pci_dev *dev;
unsigned int nr;
void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES];
struct pci_serial_quirk *quirk;
@@ -101,17 +72,18 @@ static void moan_device(const char *str, struct pci_dev *dev)
}
static int
-setup_port(struct pci_dev *dev, struct uart_port *port,
+setup_port(struct serial_private *priv, struct uart_port *port,
int bar, int offset, int regshift)
{
- struct serial_private *priv = pci_get_drvdata(dev);
+ struct pci_dev *dev = priv->dev;
unsigned long base, len;
if (bar >= PCI_NUM_BAR_RESOURCES)
return -EINVAL;
+ base = pci_resource_start(dev, bar);
+
if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
- base = pci_resource_start(dev, bar);
len = pci_resource_len(dev, bar);
if (!priv->remapped_bar[bar])
@@ -120,13 +92,16 @@ setup_port(struct pci_dev *dev, struct uart_port *port,
return -ENOMEM;
port->iotype = UPIO_MEM;
+ port->iobase = 0;
port->mapbase = base + offset;
port->membase = priv->remapped_bar[bar] + offset;
port->regshift = regshift;
} else {
- base = pci_resource_start(dev, bar) + offset;
port->iotype = UPIO_PORT;
- port->iobase = base;
+ port->iobase = base + offset;
+ port->mapbase = 0;
+ port->membase = NULL;
+ port->regshift = 0;
}
return 0;
}
@@ -136,7 +111,7 @@ setup_port(struct pci_dev *dev, struct uart_port *port,
* Not that ugly ;) -- HW
*/
static int
-afavlab_setup(struct pci_dev *dev, struct pci_board *board,
+afavlab_setup(struct serial_private *priv, struct pciserial_board *board,
struct uart_port *port, int idx)
{
unsigned int bar, offset = board->first_offset;
@@ -149,7 +124,7 @@ afavlab_setup(struct pci_dev *dev, struct pci_board *board,
offset += (idx - 4) * board->uart_offset;
}
- return setup_port(dev, port, bar, offset, board->reg_shift);
+ return setup_port(priv, port, bar, offset, board->reg_shift);
}
/*
@@ -189,13 +164,13 @@ static int __devinit pci_hp_diva_init(struct pci_dev *dev)
* some serial ports are supposed to be hidden on certain models.
*/
static int
-pci_hp_diva_setup(struct pci_dev *dev, struct pci_board *board,
+pci_hp_diva_setup(struct serial_private *priv, struct pciserial_board *board,
struct uart_port *port, int idx)
{
unsigned int offset = board->first_offset;
unsigned int bar = FL_GET_BASE(board->flags);
- switch (dev->subsystem_device) {
+ switch (priv->dev->subsystem_device) {
case PCI_DEVICE_ID_HP_DIVA_MAESTRO:
if (idx == 3)
idx++;
@@ -212,7 +187,7 @@ pci_hp_diva_setup(struct pci_dev *dev, struct pci_board *board,
offset += idx * board->uart_offset;
- return setup_port(dev, port, bar, offset, board->reg_shift);
+ return setup_port(priv, port, bar, offset, board->reg_shift);
}
/*
@@ -307,7 +282,7 @@ static void __devexit pci_plx9050_exit(struct pci_dev *dev)
/* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */
static int
-sbs_setup(struct pci_dev *dev, struct pci_board *board,
+sbs_setup(struct serial_private *priv, struct pciserial_board *board,
struct uart_port *port, int idx)
{
unsigned int bar, offset = board->first_offset;
@@ -323,7 +298,7 @@ sbs_setup(struct pci_dev *dev, struct pci_board *board,
} else /* we have only 8 ports on PMC-OCTALPRO */
return 1;
- return setup_port(dev, port, bar, offset, board->reg_shift);
+ return setup_port(priv, port, bar, offset, board->reg_shift);
}
/*
@@ -389,6 +364,9 @@ static void __devexit sbs_exit(struct pci_dev *dev)
* - 10x cards have control registers in IO and/or memory space;
* - 20x cards have control registers in standard PCI configuration space.
*
+ * Note: all 10x cards have PCI device ids 0x10..
+ * all 20x cards have PCI device ids 0x20..
+ *
* There are also Quartet Serial cards which use Oxford Semiconductor
* 16954 quad UART PCI chip clocked by 18.432 MHz quartz.
*
@@ -445,24 +423,18 @@ static int pci_siig20x_init(struct pci_dev *dev)
return 0;
}
-int pci_siig10x_fn(struct pci_dev *dev, int enable)
+static int pci_siig_init(struct pci_dev *dev)
{
- int ret = 0;
- if (enable)
- ret = pci_siig10x_init(dev);
- return ret;
-}
+ unsigned int type = dev->device & 0xff00;
-int pci_siig20x_fn(struct pci_dev *dev, int enable)
-{
- int ret = 0;
- if (enable)
- ret = pci_siig20x_init(dev);
- return ret;
-}
+ if (type == 0x1000)
+ return pci_siig10x_init(dev);
+ else if (type == 0x2000)
+ return pci_siig20x_init(dev);
-EXPORT_SYMBOL(pci_siig10x_fn);
-EXPORT_SYMBOL(pci_siig20x_fn);
+ moan_device("Unknown SIIG card", dev);
+ return -ENODEV;
+}
/*
* Timedia has an explosion of boards, and to avoid the PCI table from
@@ -523,7 +495,7 @@ static int __devinit pci_timedia_init(struct pci_dev *dev)
* Ugh, this is ugly as all hell --- TYT
*/
static int
-pci_timedia_setup(struct pci_dev *dev, struct pci_board *board,
+pci_timedia_setup(struct serial_private *priv, struct pciserial_board *board,
struct uart_port *port, int idx)
{
unsigned int bar = 0, offset = board->first_offset;
@@ -549,14 +521,15 @@ pci_timedia_setup(struct pci_dev *dev, struct pci_board *board,
bar = idx - 2;
}
- return setup_port(dev, port, bar, offset, board->reg_shift);
+ return setup_port(priv, port, bar, offset, board->reg_shift);
}
/*
* Some Titan cards are also a little weird
*/
static int
-titan_400l_800l_setup(struct pci_dev *dev, struct pci_board *board,
+titan_400l_800l_setup(struct serial_private *priv,
+ struct pciserial_board *board,
struct uart_port *port, int idx)
{
unsigned int bar, offset = board->first_offset;
@@ -573,7 +546,7 @@ titan_400l_800l_setup(struct pci_dev *dev, struct pci_board *board,
offset = (idx - 2) * board->uart_offset;
}
- return setup_port(dev, port, bar, offset, board->reg_shift);
+ return setup_port(priv, port, bar, offset, board->reg_shift);
}
static int __devinit pci_xircom_init(struct pci_dev *dev)
@@ -593,7 +566,7 @@ static int __devinit pci_netmos_init(struct pci_dev *dev)
}
static int
-pci_default_setup(struct pci_dev *dev, struct pci_board *board,
+pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
struct uart_port *port, int idx)
{
unsigned int bar, offset = board->first_offset, maxnr;
@@ -604,13 +577,13 @@ pci_default_setup(struct pci_dev *dev, struct pci_board *board,
else
offset += idx * board->uart_offset;
- maxnr = (pci_resource_len(dev, bar) - board->first_offset) /
+ maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) /
(8 << board->reg_shift);
if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
return 1;
- return setup_port(dev, port, bar, offset, board->reg_shift);
+ return setup_port(priv, port, bar, offset, board->reg_shift);
}
/* This should be in linux/pci_ids.h */
@@ -754,152 +727,15 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = sbs_setup,
.exit = __devexit_p(sbs_exit),
},
-
/*
* SIIG cards.
- * It is not clear whether these could be collapsed.
*/
{
.vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_1S_10x_550,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_1S_10x_650,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_1S_10x_850,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_2S_10x_550,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_2S_10x_650,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_2S_10x_850,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_4S_10x_550,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_4S_10x_650,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_4S_10x_850,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig10x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_1S_20x_550,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_1S_20x_650,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_1S_20x_850,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_2S_20x_550,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
- .setup = pci_default_setup,
- },
- { .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_2S_20x_650,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_2S_20x_850,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_4S_20x_550,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_4S_20x_650,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
- .setup = pci_default_setup,
- },
- {
- .vendor = PCI_VENDOR_ID_SIIG,
- .device = PCI_DEVICE_ID_SIIG_4S_20x_850,
+ .device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .init = pci_siig20x_init,
+ .init = pci_siig_init,
.setup = pci_default_setup,
},
/*
@@ -990,7 +826,7 @@ static struct pci_serial_quirk *find_quirk(struct pci_dev *dev)
}
static _INLINE_ int
-get_pci_irq(struct pci_dev *dev, struct pci_board *board, int idx)
+get_pci_irq(struct pci_dev *dev, struct pciserial_board *board)
{
if (board->flags & FL_NOIRQ)
return 0;
@@ -1115,7 +951,7 @@ enum pci_board_num_t {
* see first lines of serial_in() and serial_out() in 8250.c
*/
-static struct pci_board pci_boards[] __devinitdata = {
+static struct pciserial_board pci_boards[] __devinitdata = {
[pbn_default] = {
.flags = FL_BASE0,
.num_ports = 1,
@@ -1575,7 +1411,7 @@ static struct pci_board pci_boards[] __devinitdata = {
* serial specs. Returns 0 on success, 1 on failure.
*/
static int __devinit
-serial_pci_guess_board(struct pci_dev *dev, struct pci_board *board)
+serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
{
int num_iomem, num_port, first_port = -1, i;
@@ -1640,7 +1476,8 @@ serial_pci_guess_board(struct pci_dev *dev, struct pci_board *board)
}
static inline int
-serial_pci_matches(struct pci_board *board, struct pci_board *guessed)
+serial_pci_matches(struct pciserial_board *board,
+ struct pciserial_board *guessed)
{
return
board->num_ports == guessed->num_ports &&
@@ -1650,58 +1487,14 @@ serial_pci_matches(struct pci_board *board, struct pci_board *guessed)
board->first_offset == guessed->first_offset;
}
-/*
- * Probe one serial board. Unfortunately, there is no rhyme nor reason
- * to the arrangement of serial ports on a PCI card.
- */
-static int __devinit
-pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
+struct serial_private *
+pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
{
+ struct uart_port serial_port;
struct serial_private *priv;
- struct pci_board *board, tmp;
struct pci_serial_quirk *quirk;
int rc, nr_ports, i;
- if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
- printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n",
- ent->driver_data);
- return -EINVAL;
- }
-
- board = &pci_boards[ent->driver_data];
-
- rc = pci_enable_device(dev);
- if (rc)
- return rc;
-
- if (ent->driver_data == pbn_default) {
- /*
- * Use a copy of the pci_board entry for this;
- * avoid changing entries in the table.
- */
- memcpy(&tmp, board, sizeof(struct pci_board));
- board = &tmp;
-
- /*
- * We matched one of our class entries. Try to
- * determine the parameters of this board.
- */
- rc = serial_pci_guess_board(dev, board);
- if (rc)
- goto disable;
- } else {
- /*
- * We matched an explicit entry. If we are able to
- * detect this boards settings with our heuristic,
- * then we no longer need this entry.
- */
- memcpy(&tmp, &pci_boards[pbn_default], sizeof(struct pci_board));
- rc = serial_pci_guess_board(dev, &tmp);
- if (rc == 0 && serial_pci_matches(board, &tmp))
- moan_device("Redundant entry in serial pci_table.",
- dev);
- }
-
nr_ports = board->num_ports;
/*
@@ -1718,8 +1511,10 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
*/
if (quirk->init) {
rc = quirk->init(dev);
- if (rc < 0)
- goto disable;
+ if (rc < 0) {
+ priv = ERR_PTR(rc);
+ goto err_out;
+ }
if (rc)
nr_ports = rc;
}
@@ -1728,27 +1523,26 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
sizeof(unsigned int) * nr_ports,
GFP_KERNEL);
if (!priv) {
- rc = -ENOMEM;
- goto deinit;
+ priv = ERR_PTR(-ENOMEM);
+ goto err_deinit;
}
memset(priv, 0, sizeof(struct serial_private) +
sizeof(unsigned int) * nr_ports);
+ priv->dev = dev;
priv->quirk = quirk;
- pci_set_drvdata(dev, priv);
+
+ memset(&serial_port, 0, sizeof(struct uart_port));
+ serial_port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ serial_port.uartclk = board->base_baud * 16;
+ serial_port.irq = get_pci_irq(dev, board);
+ serial_port.dev = &dev->dev;
for (i = 0; i < nr_ports; i++) {
- struct uart_port serial_port;
- memset(&serial_port, 0, sizeof(struct uart_port));
-
- serial_port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF |
- UPF_SHARE_IRQ;
- serial_port.uartclk = board->base_baud * 16;
- serial_port.irq = get_pci_irq(dev, board, i);
- serial_port.dev = &dev->dev;
- if (quirk->setup(dev, board, &serial_port, i))
+ if (quirk->setup(priv, board, &serial_port, i))
break;
+
#ifdef SERIAL_DEBUG_PCI
printk("Setup PCI port: port %x, irq %d, type %d\n",
serial_port.iobase, serial_port.irq, serial_port.iotype);
@@ -1763,24 +1557,21 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
priv->nr = i;
- return 0;
+ return priv;
- deinit:
+ err_deinit:
if (quirk->exit)
quirk->exit(dev);
- disable:
- pci_disable_device(dev);
- return rc;
+ err_out:
+ return priv;
}
+EXPORT_SYMBOL_GPL(pciserial_init_ports);
-static void __devexit pciserial_remove_one(struct pci_dev *dev)
+void pciserial_remove_ports(struct serial_private *priv)
{
- struct serial_private *priv = pci_get_drvdata(dev);
struct pci_serial_quirk *quirk;
int i;
- pci_set_drvdata(dev, NULL);
-
for (i = 0; i < priv->nr; i++)
serial8250_unregister_port(priv->line[i]);
@@ -1793,25 +1584,123 @@ static void __devexit pciserial_remove_one(struct pci_dev *dev)
/*
* Find the exit quirks.
*/
- quirk = find_quirk(dev);
+ quirk = find_quirk(priv->dev);
if (quirk->exit)
- quirk->exit(dev);
+ quirk->exit(priv->dev);
+
+ kfree(priv);
+}
+EXPORT_SYMBOL_GPL(pciserial_remove_ports);
+
+void pciserial_suspend_ports(struct serial_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->nr; i++)
+ if (priv->line[i] >= 0)
+ serial8250_suspend_port(priv->line[i]);
+}
+EXPORT_SYMBOL_GPL(pciserial_suspend_ports);
+
+void pciserial_resume_ports(struct serial_private *priv)
+{
+ int i;
+
+ /*
+ * Ensure that the board is correctly configured.
+ */
+ if (priv->quirk->init)
+ priv->quirk->init(priv->dev);
+
+ for (i = 0; i < priv->nr; i++)
+ if (priv->line[i] >= 0)
+ serial8250_resume_port(priv->line[i]);
+}
+EXPORT_SYMBOL_GPL(pciserial_resume_ports);
+
+/*
+ * Probe one serial board. Unfortunately, there is no rhyme nor reason
+ * to the arrangement of serial ports on a PCI card.
+ */
+static int __devinit
+pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct serial_private *priv;
+ struct pciserial_board *board, tmp;
+ int rc;
+
+ if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
+ printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n",
+ ent->driver_data);
+ return -EINVAL;
+ }
+
+ board = &pci_boards[ent->driver_data];
+
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+
+ if (ent->driver_data == pbn_default) {
+ /*
+ * Use a copy of the pci_board entry for this;
+ * avoid changing entries in the table.
+ */
+ memcpy(&tmp, board, sizeof(struct pciserial_board));
+ board = &tmp;
+
+ /*
+ * We matched one of our class entries. Try to
+ * determine the parameters of this board.
+ */
+ rc = serial_pci_guess_board(dev, board);
+ if (rc)
+ goto disable;
+ } else {
+ /*
+ * We matched an explicit entry. If we are able to
+ * detect this boards settings with our heuristic,
+ * then we no longer need this entry.
+ */
+ memcpy(&tmp, &pci_boards[pbn_default],
+ sizeof(struct pciserial_board));
+ rc = serial_pci_guess_board(dev, &tmp);
+ if (rc == 0 && serial_pci_matches(board, &tmp))
+ moan_device("Redundant entry in serial pci_table.",
+ dev);
+ }
+ priv = pciserial_init_ports(dev, board);
+ if (!IS_ERR(priv)) {
+ pci_set_drvdata(dev, priv);
+ return 0;
+ }
+
+ rc = PTR_ERR(priv);
+
+ disable:
pci_disable_device(dev);
+ return rc;
+}
- kfree(priv);
+static void __devexit pciserial_remove_one(struct pci_dev *dev)
+{
+ struct serial_private *priv = pci_get_drvdata(dev);
+
+ pci_set_drvdata(dev, NULL);
+
+ pciserial_remove_ports(priv);
+
+ pci_disable_device(dev);
}
static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state)
{
struct serial_private *priv = pci_get_drvdata(dev);
- if (priv) {
- int i;
+ if (priv)
+ pciserial_suspend_ports(priv);
- for (i = 0; i < priv->nr; i++)
- serial8250_suspend_port(priv->line[i]);
- }
pci_save_state(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
return 0;
@@ -1825,21 +1714,12 @@ static int pciserial_resume_one(struct pci_dev *dev)
pci_restore_state(dev);
if (priv) {
- int i;
-
/*
* The device may have been disabled. Re-enable it.
*/
pci_enable_device(dev);
- /*
- * Ensure that the board is correctly configured.
- */
- if (priv->quirk->init)
- priv->quirk->init(dev);
-
- for (i = 0; i < priv->nr; i++)
- serial8250_resume_port(priv->line[i]);
+ pciserial_resume_ports(priv);
}
return 0;
}
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 63f5df9afb7..fd528433de4 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -97,7 +97,7 @@ extern int adfs_dir_update(struct super_block *sb, struct object_info *obj);
extern struct inode_operations adfs_file_inode_operations;
extern struct file_operations adfs_file_operations;
-extern inline __u32 signed_asl(__u32 val, signed int shift)
+static inline __u32 signed_asl(__u32 val, signed int shift)
{
if (shift >= 0)
val <<= shift;
@@ -112,7 +112,7 @@ extern inline __u32 signed_asl(__u32 val, signed int shift)
*
* The root directory ID should always be looked up in the map [3.4]
*/
-extern inline int
+static inline int
__adfs_block_map(struct super_block *sb, unsigned int object_id,
unsigned int block)
{
diff --git a/include/asm-arm/arch-sa1100/mcp.h b/include/asm-arm/arch-sa1100/mcp.h
new file mode 100644
index 00000000000..f58a22755c6
--- /dev/null
+++ b/include/asm-arm/arch-sa1100/mcp.h
@@ -0,0 +1,21 @@
+/*
+ * linux/include/asm-arm/arch-sa1100/mcp.h
+ *
+ * Copyright (C) 2005 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_ARCH_MCP_H
+#define __ASM_ARM_ARCH_MCP_H
+
+#include <linux/types.h>
+
+struct mcp_plat_data {
+ u32 mccr0;
+ u32 mccr1;
+ unsigned int sclk_rate;
+};
+
+#endif
diff --git a/include/asm-arm/hardware/gic.h b/include/asm-arm/hardware/gic.h
new file mode 100644
index 00000000000..3fa5eb70f64
--- /dev/null
+++ b/include/asm-arm/hardware/gic.h
@@ -0,0 +1,41 @@
+/*
+ * linux/include/asm-arm/hardware/gic.h
+ *
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_HARDWARE_GIC_H
+#define __ASM_ARM_HARDWARE_GIC_H
+
+#include <linux/compiler.h>
+
+#define GIC_CPU_CTRL 0x00
+#define GIC_CPU_PRIMASK 0x04
+#define GIC_CPU_BINPOINT 0x08
+#define GIC_CPU_INTACK 0x0c
+#define GIC_CPU_EOI 0x10
+#define GIC_CPU_RUNNINGPRI 0x14
+#define GIC_CPU_HIGHPRI 0x18
+
+#define GIC_DIST_CTRL 0x000
+#define GIC_DIST_CTR 0x004
+#define GIC_DIST_ENABLE_SET 0x100
+#define GIC_DIST_ENABLE_CLEAR 0x180
+#define GIC_DIST_PENDING_SET 0x200
+#define GIC_DIST_PENDING_CLEAR 0x280
+#define GIC_DIST_ACTIVE_BIT 0x300
+#define GIC_DIST_PRI 0x400
+#define GIC_DIST_TARGET 0x800
+#define GIC_DIST_CONFIG 0xc00
+#define GIC_DIST_SOFTINT 0xf00
+
+#ifndef __ASSEMBLY__
+void gic_dist_init(void __iomem *base);
+void gic_cpu_init(void __iomem *base);
+void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
+#endif
+
+#endif
diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h
index 32c9699367c..5a7a1a8d29a 100644
--- a/include/asm-sparc/processor.h
+++ b/include/asm-sparc/processor.h
@@ -19,7 +19,6 @@
#include <asm/ptrace.h>
#include <asm/head.h>
#include <asm/signal.h>
-#include <asm/segment.h>
#include <asm/btfixup.h>
#include <asm/page.h>
diff --git a/include/asm-sparc/segment.h b/include/asm-sparc/segment.h
deleted file mode 100644
index a1b7ffc9eec..00000000000
--- a/include/asm-sparc/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC_SEGMENT_H
-#define __SPARC_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 898562ebe94..3557781a4bf 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -9,7 +9,6 @@
#include <linux/threads.h> /* NR_CPUS */
#include <linux/thread_info.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/psr.h>
#include <asm/ptrace.h>
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index d80f3379669..e175afcf2cd 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -72,10 +72,10 @@ extern int atomic64_sub_ret(int, atomic64_t *);
/* Atomic operations are already serializing */
#ifdef CONFIG_SMP
-#define smp_mb__before_atomic_dec() membar("#StoreLoad | #LoadLoad")
-#define smp_mb__after_atomic_dec() membar("#StoreLoad | #StoreStore")
-#define smp_mb__before_atomic_inc() membar("#StoreLoad | #LoadLoad")
-#define smp_mb__after_atomic_inc() membar("#StoreLoad | #StoreStore")
+#define smp_mb__before_atomic_dec() membar_storeload_loadload();
+#define smp_mb__after_atomic_dec() membar_storeload_storestore();
+#define smp_mb__before_atomic_inc() membar_storeload_loadload();
+#define smp_mb__after_atomic_inc() membar_storeload_storestore();
#else
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 9c5e7197028..6388b8376c5 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -72,8 +72,8 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
}
#ifdef CONFIG_SMP
-#define smp_mb__before_clear_bit() membar("#StoreLoad | #LoadLoad")
-#define smp_mb__after_clear_bit() membar("#StoreLoad | #StoreStore")
+#define smp_mb__before_clear_bit() membar_storeload_loadload()
+#define smp_mb__after_clear_bit() membar_storeload_storestore()
#else
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index d0bee241356..3169f3e2237 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -18,7 +18,6 @@
#include <asm/a.out.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
-#include <asm/segment.h>
#include <asm/page.h>
/* The sparc has no problems with write protection */
diff --git a/include/asm-sparc64/segment.h b/include/asm-sparc64/segment.h
deleted file mode 100644
index b03e709fc94..00000000000
--- a/include/asm-sparc64/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __SPARC64_SEGMENT_H
-#define __SPARC64_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif
diff --git a/include/asm-sparc64/sfafsr.h b/include/asm-sparc64/sfafsr.h
new file mode 100644
index 00000000000..2f792c20b53
--- /dev/null
+++ b/include/asm-sparc64/sfafsr.h
@@ -0,0 +1,82 @@
+#ifndef _SPARC64_SFAFSR_H
+#define _SPARC64_SFAFSR_H
+
+#include <asm/const.h>
+
+/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
+
+#define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT)
+#define SFAFSR_ME_SHIFT 32
+#define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT)
+#define SFAFSR_PRIV_SHIFT 31
+#define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT)
+#define SFAFSR_ISAP_SHIFT 30
+#define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT)
+#define SFAFSR_ETP_SHIFT 29
+#define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT)
+#define SFAFSR_IVUE_SHIFT 28
+#define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT)
+#define SFAFSR_TO_SHIFT 27
+#define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT)
+#define SFAFSR_BERR_SHIFT 26
+#define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT)
+#define SFAFSR_LDP_SHIFT 25
+#define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT)
+#define SFAFSR_CP_SHIFT 24
+#define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT)
+#define SFAFSR_WP_SHIFT 23
+#define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT)
+#define SFAFSR_EDP_SHIFT 22
+#define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT)
+#define SFAFSR_UE_SHIFT 21
+#define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT)
+#define SFAFSR_CE_SHIFT 20
+#define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT)
+#define SFAFSR_ETS_SHIFT 16
+#define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT)
+#define SFAFSR_PSYND_SHIFT 0
+
+/* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read
+ * ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write
+ */
+
+#define UDBE_UE (_AC(1,UL) << 9)
+#define UDBE_CE (_AC(1,UL) << 8)
+#define UDBE_E_SYNDR (_AC(0xff,UL) << 0)
+
+/* The trap handlers for asynchronous errors encode the AFSR and
+ * other pieces of information into a 64-bit argument for C code
+ * encoded as follows:
+ *
+ * -----------------------------------------------
+ * | UDB_H | UDB_L | TL>1 | TT | AFSR |
+ * -----------------------------------------------
+ * 63 54 53 44 42 41 33 32 0
+ *
+ * The AFAR is passed in unchanged.
+ */
+#define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
+#define SFSTAT_UDBH_SHIFT 54
+#define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
+#define SFSTAT_UDBL_SHIFT 44
+#define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT)
+#define SFSTAT_TL_GT_ONE_SHIFT 42
+#define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT)
+#define SFSTAT_TRAP_TYPE_SHIFT 33
+#define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT)
+#define SFSTAT_AFSR_SHIFT 0
+
+/* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */
+#define ESTATE_ERR_CE 0x1 /* Correctable errors */
+#define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */
+#define ESTATE_ERR_ISAP 0x4 /* System address parity error */
+#define ESTATE_ERR_ALL (ESTATE_ERR_CE | \
+ ESTATE_ERR_NCE | \
+ ESTATE_ERR_ISAP)
+
+/* The various trap types that report using the above state. */
+#define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */
+#define TRAP_TYPE_DAE 0x32 /* Data Access Error */
+#define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */
+
+#endif /* _SPARC64_SFAFSR_H */
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index 9cb93a5c2b4..a02c4370eb4 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -43,7 +43,7 @@ typedef struct {
#define spin_is_locked(lp) ((lp)->lock != 0)
#define spin_unlock_wait(lp) \
-do { membar("#LoadLoad"); \
+do { rmb(); \
} while((lp)->lock)
static inline void _raw_spin_lock(spinlock_t *lock)
@@ -129,15 +129,18 @@ typedef struct {
#define spin_is_locked(__lock) ((__lock)->lock != 0)
#define spin_unlock_wait(__lock) \
do { \
- membar("#LoadLoad"); \
+ rmb(); \
} while((__lock)->lock)
-extern void _do_spin_lock (spinlock_t *lock, char *str);
-extern void _do_spin_unlock (spinlock_t *lock);
-extern int _do_spin_trylock (spinlock_t *lock);
+extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller);
+extern void _do_spin_unlock(spinlock_t *lock);
+extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller);
-#define _raw_spin_trylock(lp) _do_spin_trylock(lp)
-#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
+#define _raw_spin_trylock(lp) \
+ _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0))
+#define _raw_spin_lock(lock) \
+ _do_spin_lock(lock, "spin_lock", \
+ (unsigned long) __builtin_return_address(0))
#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
@@ -279,37 +282,41 @@ typedef struct {
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
-extern void _do_read_lock(rwlock_t *rw, char *str);
-extern void _do_read_unlock(rwlock_t *rw, char *str);
-extern void _do_write_lock(rwlock_t *rw, char *str);
-extern void _do_write_unlock(rwlock_t *rw);
-extern int _do_write_trylock(rwlock_t *rw, char *str);
+extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller);
+extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller);
+extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller);
+extern void _do_write_unlock(rwlock_t *rw, unsigned long caller);
+extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller);
#define _raw_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- _do_read_lock(lock, "read_lock"); \
+ _do_read_lock(lock, "read_lock", \
+ (unsigned long) __builtin_return_address(0)); \
local_irq_restore(flags); \
} while(0)
#define _raw_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- _do_read_unlock(lock, "read_unlock"); \
+ _do_read_unlock(lock, "read_unlock", \
+ (unsigned long) __builtin_return_address(0)); \
local_irq_restore(flags); \
} while(0)
#define _raw_write_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- _do_write_lock(lock, "write_lock"); \
+ _do_write_lock(lock, "write_lock", \
+ (unsigned long) __builtin_return_address(0)); \
local_irq_restore(flags); \
} while(0)
#define _raw_write_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- _do_write_unlock(lock); \
+ _do_write_unlock(lock, \
+ (unsigned long) __builtin_return_address(0)); \
local_irq_restore(flags); \
} while(0)
@@ -317,7 +324,8 @@ do { unsigned long flags; \
({ unsigned long flags; \
int val; \
local_irq_save(flags); \
- val = _do_write_trylock(lock, "write_trylock"); \
+ val = _do_write_trylock(lock, "write_trylock", \
+ (unsigned long) __builtin_return_address(0)); \
local_irq_restore(flags); \
val; \
})
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index ee4bdfc6b88..5e94c05dc2f 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -28,6 +28,14 @@ enum sparc_cpu {
#define ARCH_SUN4C_SUN4 0
#define ARCH_SUN4 0
+extern void mb(void);
+extern void rmb(void);
+extern void wmb(void);
+extern void membar_storeload(void);
+extern void membar_storeload_storestore(void);
+extern void membar_storeload_loadload(void);
+extern void membar_storestore_loadstore(void);
+
#endif
#define setipl(__new_ipl) \
@@ -78,16 +86,11 @@ enum sparc_cpu {
#define nop() __asm__ __volatile__ ("nop")
-#define membar(type) __asm__ __volatile__ ("membar " type : : : "memory")
-#define mb() \
- membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
-#define rmb() membar("#LoadLoad")
-#define wmb() membar("#StoreStore")
#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) \
- do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0)
+ do { __var = __value; membar_storeload_storestore(); } while(0)
#define set_wmb(__var, __value) \
- do { __var = __value; membar("#StoreStore"); } while(0)
+ do { __var = __value; wmb(); } while(0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h
index 5f3ab21b339..3209dd46ea7 100644
--- a/include/linux/8250_pci.h
+++ b/include/linux/8250_pci.h
@@ -1,2 +1,37 @@
-int pci_siig10x_fn(struct pci_dev *dev, int enable);
-int pci_siig20x_fn(struct pci_dev *dev, int enable);
+/*
+ * Definitions for PCI support.
+ */
+#define FL_BASE_MASK 0x0007
+#define FL_BASE0 0x0000
+#define FL_BASE1 0x0001
+#define FL_BASE2 0x0002
+#define FL_BASE3 0x0003
+#define FL_BASE4 0x0004
+#define FL_GET_BASE(x) (x & FL_BASE_MASK)
+
+/* Use successive BARs (PCI base address registers),
+ else use offset into some specified BAR */
+#define FL_BASE_BARS 0x0008
+
+/* do not assign an irq */
+#define FL_NOIRQ 0x0080
+
+/* Use the Base address register size to cap number of ports */
+#define FL_REGION_SZ_CAP 0x0100
+
+struct pciserial_board {
+ unsigned int flags;
+ unsigned int num_ports;
+ unsigned int base_baud;
+ unsigned int uart_offset;
+ unsigned int reg_shift;
+ unsigned int first_offset;
+};
+
+struct serial_private;
+
+struct serial_private *
+pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board);
+void pciserial_remove_ports(struct serial_private *priv);
+void pciserial_suspend_ports(struct serial_private *priv);
+void pciserial_resume_ports(struct serial_private *priv);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index ca5fcadf998..a5b74efab06 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -1,24 +1,29 @@
/*
- Copyright 2003-2004 Red Hat, Inc. All rights reserved.
- Copyright 2003-2004 Jeff Garzik
-
- The contents of this file are subject to the Open
- Software License version 1.1 that can be found at
- http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- by reference.
-
- Alternatively, the contents of this file may be used under the terms
- of the GNU General Public License version 2 (the "GPL") as distributed
- in the kernel source COPYING file, in which case the provisions of
- the GPL are applicable instead of the above. If you wish to allow
- the use of your version of this file only under the terms of the
- GPL and not to allow others to use your version of this file under
- the OSL, indicate your decision by deleting the provisions above and
- replace them with the notice and other provisions required by the GPL.
- If you do not delete the provisions above, a recipient may use your
- version of this file under either the OSL or the GPL.
-
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from http://www.t13.org/
+ *
*/
#ifndef __LINUX_ATA_H__
@@ -108,6 +113,8 @@ enum {
/* ATA device commands */
ATA_CMD_CHK_POWER = 0xE5, /* check power mode */
+ ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */
+ ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
ATA_CMD_EDD = 0x90, /* execute device diagnostic */
ATA_CMD_FLUSH = 0xE7,
ATA_CMD_FLUSH_EXT = 0xEA,
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index a0ab26aab45..d7021c391b2 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -408,6 +408,8 @@ struct ethtool_ops {
#define SUPPORTED_FIBRE (1 << 10)
#define SUPPORTED_BNC (1 << 11)
#define SUPPORTED_10000baseT_Full (1 << 12)
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
/* Indicates what features are advertised by the interface. */
#define ADVERTISED_10baseT_Half (1 << 0)
@@ -423,6 +425,8 @@ struct ethtool_ops {
#define ADVERTISED_FIBRE (1 << 10)
#define ADVERTISED_BNC (1 << 11)
#define ADVERTISED_10000baseT_Full (1 << 12)
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6cd9ba63563..fc05a989928 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1,23 +1,26 @@
/*
- Copyright 2003-2004 Red Hat, Inc. All rights reserved.
- Copyright 2003-2004 Jeff Garzik
-
- The contents of this file are subject to the Open
- Software License version 1.1 that can be found at
- http://www.opensource.org/licenses/osl-1.1.txt and is included herein
- by reference.
-
- Alternatively, the contents of this file may be used under the terms
- of the GNU General Public License version 2 (the "GPL") as distributed
- in the kernel source COPYING file, in which case the provisions of
- the GPL are applicable instead of the above. If you wish to allow
- the use of your version of this file only under the terms of the
- GPL and not to allow others to use your version of this file under
- the OSL, indicate your decision by deleting the provisions above and
- replace them with the notice and other provisions required by the GPL.
- If you do not delete the provisions above, a recipient may use your
- version of this file under either the OSL or the GPL.
-
+ * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2005 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
*/
#ifndef __LINUX_LIBATA_H__
@@ -113,6 +116,8 @@ enum {
ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */
ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */
ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */
+ ATA_FLAG_NOINTR = (1 << 9), /* FIXME: Remove this once
+ * proper HSM is in place. */
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
@@ -363,7 +368,7 @@ struct ata_port_operations {
void (*host_stop) (struct ata_host_set *host_set);
- void (*bmdma_stop) (struct ata_port *ap);
+ void (*bmdma_stop) (struct ata_queued_cmd *qc);
u8 (*bmdma_status) (struct ata_port *ap);
};
@@ -424,7 +429,7 @@ extern void ata_dev_id_string(u16 *id, unsigned char *s,
extern void ata_dev_config(struct ata_port *ap, unsigned int i);
extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
extern void ata_bmdma_start (struct ata_queued_cmd *qc);
-extern void ata_bmdma_stop(struct ata_port *ap);
+extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat);
@@ -644,7 +649,7 @@ static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val)
ap->ops->scr_write(ap, reg, val);
}
-static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
+static inline void scr_write_flush(struct ata_port *ap, unsigned int reg,
u32 val)
{
ap->ops->scr_write(ap, reg, val);
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 374b615ea9e..9b8d0476988 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -22,6 +22,7 @@
#define MII_EXPANSION 0x06 /* Expansion register */
#define MII_CTRL1000 0x09 /* 1000BASE-T control */
#define MII_STAT1000 0x0a /* 1000BASE-T status */
+#define MII_ESTATUS 0x0f /* Extended Status */
#define MII_DCOUNTER 0x12 /* Disconnect counter */
#define MII_FCSCOUNTER 0x13 /* False carrier counter */
#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
@@ -54,7 +55,10 @@
#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
#define BMSR_RFAULT 0x0010 /* Remote fault detected */
#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
-#define BMSR_RESV 0x07c0 /* Unused... */
+#define BMSR_RESV 0x00c0 /* Unused... */
+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
+#define BMSR_100FULL2 0x0200 /* Can do 100BASE-T2 HDX */
+#define BMSR_100HALF2 0x0400 /* Can do 100BASE-T2 FDX */
#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
@@ -114,6 +118,9 @@
#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */
#define EXPANSION_RESV 0xffe0 /* Unused... */
+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+
/* N-way test register. */
#define NWAYTEST_RESV1 0x00ff /* Unused... */
#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index f90f674eb3b..9a0893f3249 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -63,11 +63,12 @@ struct device;
struct mmc_host {
struct device *dev;
+ struct class_device class_dev;
+ int index;
struct mmc_host_ops *ops;
unsigned int f_min;
unsigned int f_max;
u32 ocr_avail;
- char host_name[8];
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
@@ -97,6 +98,7 @@ extern void mmc_free_host(struct mmc_host *);
#define mmc_priv(x) ((void *)((x) + 1))
#define mmc_dev(x) ((x)->dev)
+#define mmc_hostname(x) ((x)->class_dev.class_id)
extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index dce53ac1625..97bbccdbcca 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -33,7 +33,8 @@ struct ieee1394_device_id {
__u32 model_id;
__u32 specifier_id;
__u32 version;
- kernel_ulong_t driver_data;
+ kernel_ulong_t driver_data
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
};
@@ -182,7 +183,11 @@ struct of_device_id
char name[32];
char type[32];
char compatible[128];
+#if __KERNEL__
void *data;
+#else
+ kernel_ulong_t data;
+#endif
};
@@ -208,7 +213,8 @@ struct pcmcia_device_id {
#ifdef __KERNEL__
const char * prod_id[4];
#else
- kernel_ulong_t prod_id[4];
+ kernel_ulong_t prod_id[4]
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
#endif
/* not matched against */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 927ed487630..499a5325f67 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1249,6 +1249,7 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x036F
#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
#define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B
diff --git a/include/linux/phy.h b/include/linux/phy.h
new file mode 100644
index 00000000000..72cb67b66e0
--- /dev/null
+++ b/include/linux/phy.h
@@ -0,0 +1,377 @@
+/*
+ * include/linux/phy.h
+ *
+ * Framework and drivers for configuring and reading different PHYs
+ * Based on code in sungem_phy.c and gianfar_phy.c
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __PHY_H
+#define __PHY_H
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+
+#define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full | \
+ SUPPORTED_Autoneg | \
+ SUPPORTED_TP | \
+ SUPPORTED_MII)
+
+#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
+ SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full)
+
+/* Set phydev->irq to PHY_POLL if interrupts are not supported,
+ * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
+ * the attached driver handles the interrupt
+ */
+#define PHY_POLL -1
+#define PHY_IGNORE_INTERRUPT -2
+
+#define PHY_HAS_INTERRUPT 0x00000001
+#define PHY_HAS_MAGICANEG 0x00000002
+
+#define MII_BUS_MAX 4
+
+
+#define PHY_INIT_TIMEOUT 100000
+#define PHY_STATE_TIME 1
+#define PHY_FORCE_TIMEOUT 10
+#define PHY_AN_TIMEOUT 10
+
+#define PHY_MAX_ADDR 32
+
+/* The Bus class for PHYs. Devices which provide access to
+ * PHYs should register using this structure */
+struct mii_bus {
+ const char *name;
+ int id;
+ void *priv;
+ int (*read)(struct mii_bus *bus, int phy_id, int regnum);
+ int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
+ int (*reset)(struct mii_bus *bus);
+
+ /* A lock to ensure that only one thing can read/write
+ * the MDIO bus at a time */
+ spinlock_t mdio_lock;
+
+ struct device *dev;
+
+ /* list of all PHYs on bus */
+ struct phy_device *phy_map[PHY_MAX_ADDR];
+
+ /* Pointer to an array of interrupts, each PHY's
+ * interrupt at the index matching its address */
+ int *irq;
+};
+
+#define PHY_INTERRUPT_DISABLED 0x0
+#define PHY_INTERRUPT_ENABLED 0x80000000
+
+/* PHY state machine states:
+ *
+ * DOWN: PHY device and driver are not ready for anything. probe
+ * should be called if and only if the PHY is in this state,
+ * given that the PHY device exists.
+ * - PHY driver probe function will, depending on the PHY, set
+ * the state to STARTING or READY
+ *
+ * STARTING: PHY device is coming up, and the ethernet driver is
+ * not ready. PHY drivers may set this in the probe function.
+ * If they do, they are responsible for making sure the state is
+ * eventually set to indicate whether the PHY is UP or READY,
+ * depending on the state when the PHY is done starting up.
+ * - PHY driver will set the state to READY
+ * - start will set the state to PENDING
+ *
+ * READY: PHY is ready to send and receive packets, but the
+ * controller is not. By default, PHYs which do not implement
+ * probe will be set to this state by phy_probe(). If the PHY
+ * driver knows the PHY is ready, and the PHY state is STARTING,
+ * then it sets this STATE.
+ * - start will set the state to UP
+ *
+ * PENDING: PHY device is coming up, but the ethernet driver is
+ * ready. phy_start will set this state if the PHY state is
+ * STARTING.
+ * - PHY driver will set the state to UP when the PHY is ready
+ *
+ * UP: The PHY and attached device are ready to do work.
+ * Interrupts should be started here.
+ * - timer moves to AN
+ *
+ * AN: The PHY is currently negotiating the link state. Link is
+ * therefore down for now. phy_timer will set this state when it
+ * detects the state is UP. config_aneg will set this state
+ * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
+ * - If autonegotiation finishes, but there's no link, it sets
+ * the state to NOLINK.
+ * - If aneg finishes with link, it sets the state to RUNNING,
+ * and calls adjust_link
+ * - If autonegotiation did not finish after an arbitrary amount
+ * of time, autonegotiation should be tried again if the PHY
+ * supports "magic" autonegotiation (back to AN)
+ * - If it didn't finish, and no magic_aneg, move to FORCING.
+ *
+ * NOLINK: PHY is up, but not currently plugged in.
+ * - If the timer notes that the link comes back, we move to RUNNING
+ * - config_aneg moves to AN
+ * - phy_stop moves to HALTED
+ *
+ * FORCING: PHY is being configured with forced settings
+ * - if link is up, move to RUNNING
+ * - If link is down, we drop to the next highest setting, and
+ * retry (FORCING) after a timeout
+ * - phy_stop moves to HALTED
+ *
+ * RUNNING: PHY is currently up, running, and possibly sending
+ * and/or receiving packets
+ * - timer will set CHANGELINK if we're polling (this ensures the
+ * link state is polled every other cycle of this state machine,
+ * which makes it every other second)
+ * - irq will set CHANGELINK
+ * - config_aneg will set AN
+ * - phy_stop moves to HALTED
+ *
+ * CHANGELINK: PHY experienced a change in link state
+ * - timer moves to RUNNING if link
+ * - timer moves to NOLINK if the link is down
+ * - phy_stop moves to HALTED
+ *
+ * HALTED: PHY is up, but no polling or interrupts are done. Or
+ * PHY is in an error state.
+ *
+ * - phy_start moves to RESUMING
+ *
+ * RESUMING: PHY was halted, but now wants to run again.
+ * - If we are forcing, or aneg is done, timer moves to RUNNING
+ * - If aneg is not done, timer moves to AN
+ * - phy_stop moves to HALTED
+ */
+enum phy_state {
+ PHY_DOWN=0,
+ PHY_STARTING,
+ PHY_READY,
+ PHY_PENDING,
+ PHY_UP,
+ PHY_AN,
+ PHY_RUNNING,
+ PHY_NOLINK,
+ PHY_FORCING,
+ PHY_CHANGELINK,
+ PHY_HALTED,
+ PHY_RESUMING
+};
+
+/* phy_device: An instance of a PHY
+ *
+ * drv: Pointer to the driver for this PHY instance
+ * bus: Pointer to the bus this PHY is on
+ * dev: driver model device structure for this PHY
+ * phy_id: UID for this device found during discovery
+ * state: state of the PHY for management purposes
+ * dev_flags: Device-specific flags used by the PHY driver.
+ * addr: Bus address of PHY
+ * link_timeout: The number of timer firings to wait before the
+ * giving up on the current attempt at acquiring a link
+ * irq: IRQ number of the PHY's interrupt (-1 if none)
+ * phy_timer: The timer for handling the state machine
+ * phy_queue: A work_queue for the interrupt
+ * attached_dev: The attached enet driver's device instance ptr
+ * adjust_link: Callback for the enet controller to respond to
+ * changes in the link state.
+ * adjust_state: Callback for the enet driver to respond to
+ * changes in the state machine.
+ *
+ * speed, duplex, pause, supported, advertising, and
+ * autoneg are used like in mii_if_info
+ *
+ * interrupts currently only supports enabled or disabled,
+ * but could be changed in the future to support enabling
+ * and disabling specific interrupts
+ *
+ * Contains some infrastructure for polling and interrupt
+ * handling, as well as handling shifts in PHY hardware state
+ */
+struct phy_device {
+ /* Information about the PHY type */
+ /* And management functions */
+ struct phy_driver *drv;
+
+ struct mii_bus *bus;
+
+ struct device dev;
+
+ u32 phy_id;
+
+ enum phy_state state;
+
+ u32 dev_flags;
+
+ /* Bus address of the PHY (0-32) */
+ int addr;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+
+ /* The most recently read link state */
+ int link;
+
+ /* Enabled Interrupts */
+ u32 interrupts;
+
+ /* Union of PHY and Attached devices' supported modes */
+ /* See mii.h for more info */
+ u32 supported;
+ u32 advertising;
+
+ int autoneg;
+
+ int link_timeout;
+
+ /* Interrupt number for this PHY
+ * -1 means no interrupt */
+ int irq;
+
+ /* private data pointer */
+ /* For use by PHYs to maintain extra state */
+ void *priv;
+
+ /* Interrupt and Polling infrastructure */
+ struct work_struct phy_queue;
+ struct timer_list phy_timer;
+
+ spinlock_t lock;
+
+ struct net_device *attached_dev;
+
+ void (*adjust_link)(struct net_device *dev);
+
+ void (*adjust_state)(struct net_device *dev);
+};
+#define to_phy_device(d) container_of(d, struct phy_device, dev)
+
+/* struct phy_driver: Driver structure for a particular PHY type
+ *
+ * phy_id: The result of reading the UID registers of this PHY
+ * type, and ANDing them with the phy_id_mask. This driver
+ * only works for PHYs with IDs which match this field
+ * name: The friendly name of this PHY type
+ * phy_id_mask: Defines the important bits of the phy_id
+ * features: A list of features (speed, duplex, etc) supported
+ * by this PHY
+ * flags: A bitfield defining certain other features this PHY
+ * supports (like interrupts)
+ *
+ * The drivers must implement config_aneg and read_status. All
+ * other functions are optional. Note that none of these
+ * functions should be called from interrupt time. The goal is
+ * for the bus read/write functions to be able to block when the
+ * bus transaction is happening, and be freed up by an interrupt
+ * (The MPC85xx has this ability, though it is not currently
+ * supported in the driver).
+ */
+struct phy_driver {
+ u32 phy_id;
+ char *name;
+ unsigned int phy_id_mask;
+ u32 features;
+ u32 flags;
+
+ /* Called to initialize the PHY,
+ * including after a reset */
+ int (*config_init)(struct phy_device *phydev);
+
+ /* Called during discovery. Used to set
+ * up device-specific structures, if any */
+ int (*probe)(struct phy_device *phydev);
+
+ /* PHY Power Management */
+ int (*suspend)(struct phy_device *phydev);
+ int (*resume)(struct phy_device *phydev);
+
+ /* Configures the advertisement and resets
+ * autonegotiation if phydev->autoneg is on,
+ * forces the speed to the current settings in phydev
+ * if phydev->autoneg is off */
+ int (*config_aneg)(struct phy_device *phydev);
+
+ /* Determines the negotiated speed and duplex */
+ int (*read_status)(struct phy_device *phydev);
+
+ /* Clears any pending interrupts */
+ int (*ack_interrupt)(struct phy_device *phydev);
+
+ /* Enables or disables interrupts */
+ int (*config_intr)(struct phy_device *phydev);
+
+ /* Clears up any memory if needed */
+ void (*remove)(struct phy_device *phydev);
+
+ struct device_driver driver;
+};
+#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
+
+int phy_read(struct phy_device *phydev, u16 regnum);
+int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
+struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
+int phy_clear_interrupt(struct phy_device *phydev);
+int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
+struct phy_device * phy_attach(struct net_device *dev,
+ const char *phy_id, u32 flags);
+struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
+ void (*handler)(struct net_device *), u32 flags);
+void phy_disconnect(struct phy_device *phydev);
+void phy_detach(struct phy_device *phydev);
+void phy_start(struct phy_device *phydev);
+void phy_stop(struct phy_device *phydev);
+int phy_start_aneg(struct phy_device *phydev);
+
+int mdiobus_register(struct mii_bus *bus);
+void mdiobus_unregister(struct mii_bus *bus);
+void phy_sanitize_settings(struct phy_device *phydev);
+int phy_stop_interrupts(struct phy_device *phydev);
+
+static inline int phy_read_status(struct phy_device *phydev) {
+ return phydev->drv->read_status(phydev);
+}
+
+int genphy_config_advert(struct phy_device *phydev);
+int genphy_setup_forced(struct phy_device *phydev);
+int genphy_restart_aneg(struct phy_device *phydev);
+int genphy_config_aneg(struct phy_device *phydev);
+int genphy_update_link(struct phy_device *phydev);
+int genphy_read_status(struct phy_device *phydev);
+void phy_driver_unregister(struct phy_driver *drv);
+int phy_driver_register(struct phy_driver *new_driver);
+void phy_prepare_link(struct phy_device *phydev,
+ void (*adjust_link)(struct net_device *));
+void phy_start_machine(struct phy_device *phydev,
+ void (*handler)(struct net_device *));
+void phy_stop_machine(struct phy_device *phydev);
+int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_mii_ioctl(struct phy_device *phydev,
+ struct mii_ioctl_data *mii_data, int cmd);
+int phy_start_interrupts(struct phy_device *phydev);
+void phy_print_status(struct phy_device *phydev);
+
+extern struct bus_type mdio_bus_type;
+#endif /* __PHY_H */
diff --git a/include/linux/serialP.h b/include/linux/serialP.h
index 2b2f35a64d7..2b9e6b9554d 100644
--- a/include/linux/serialP.h
+++ b/include/linux/serialP.h
@@ -140,44 +140,4 @@ struct rs_multiport_struct {
#define ALPHA_KLUDGE_MCR 0
#endif
-/*
- * Definitions for PCI support.
- */
-#define SPCI_FL_BASE_MASK 0x0007
-#define SPCI_FL_BASE0 0x0000
-#define SPCI_FL_BASE1 0x0001
-#define SPCI_FL_BASE2 0x0002
-#define SPCI_FL_BASE3 0x0003
-#define SPCI_FL_BASE4 0x0004
-#define SPCI_FL_GET_BASE(x) (x & SPCI_FL_BASE_MASK)
-
-#define SPCI_FL_IRQ_MASK (0x0007 << 4)
-#define SPCI_FL_IRQBASE0 (0x0000 << 4)
-#define SPCI_FL_IRQBASE1 (0x0001 << 4)
-#define SPCI_FL_IRQBASE2 (0x0002 << 4)
-#define SPCI_FL_IRQBASE3 (0x0003 << 4)
-#define SPCI_FL_IRQBASE4 (0x0004 << 4)
-#define SPCI_FL_GET_IRQBASE(x) ((x & SPCI_FL_IRQ_MASK) >> 4)
-
-/* Use successive BARs (PCI base address registers),
- else use offset into some specified BAR */
-#define SPCI_FL_BASE_TABLE 0x0100
-
-/* Use successive entries in the irq resource table */
-#define SPCI_FL_IRQ_TABLE 0x0200
-
-/* Use the irq resource table instead of dev->irq */
-#define SPCI_FL_IRQRESOURCE 0x0400
-
-/* Use the Base address register size to cap number of ports */
-#define SPCI_FL_REGION_SZ_CAP 0x0800
-
-/* Do not use irq sharing for this device */
-#define SPCI_FL_NO_SHIRQ 0x1000
-
-/* This is a PNP device */
-#define SPCI_FL_ISPNP 0x2000
-
-#define SPCI_FL_PNPDEFAULT (SPCI_FL_IRQRESOURCE|SPCI_FL_ISPNP)
-
#endif /* _LINUX_SERIAL_H */
diff --git a/drivers/infiniband/include/ib_cache.h b/include/rdma/ib_cache.h
index 44ef6bb9b9d..5bf9834f7dc 100644
--- a/drivers/infiniband/include/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -1,5 +1,7 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -35,7 +37,7 @@
#ifndef _IB_CACHE_H
#define _IB_CACHE_H
-#include <ib_verbs.h>
+#include <rdma/ib_verbs.h>
/**
* ib_get_cached_gid - Returns a cached GID table entry
diff --git a/drivers/infiniband/include/ib_cm.h b/include/rdma/ib_cm.h
index da650115e79..77fe9039209 100644
--- a/drivers/infiniband/include/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -37,8 +37,8 @@
#if !defined(IB_CM_H)
#define IB_CM_H
-#include <ib_mad.h>
-#include <ib_sa.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_sa.h>
enum ib_cm_state {
IB_CM_IDLE,
@@ -115,7 +115,7 @@ struct ib_cm_req_event_param {
struct ib_sa_path_rec *primary_path;
struct ib_sa_path_rec *alternate_path;
- u64 remote_ca_guid;
+ __be64 remote_ca_guid;
u32 remote_qkey;
u32 remote_qpn;
enum ib_qp_type qp_type;
@@ -132,7 +132,7 @@ struct ib_cm_req_event_param {
};
struct ib_cm_rep_event_param {
- u64 remote_ca_guid;
+ __be64 remote_ca_guid;
u32 remote_qkey;
u32 remote_qpn;
u32 starting_psn;
@@ -146,39 +146,39 @@ struct ib_cm_rep_event_param {
};
enum ib_cm_rej_reason {
- IB_CM_REJ_NO_QP = __constant_htons(1),
- IB_CM_REJ_NO_EEC = __constant_htons(2),
- IB_CM_REJ_NO_RESOURCES = __constant_htons(3),
- IB_CM_REJ_TIMEOUT = __constant_htons(4),
- IB_CM_REJ_UNSUPPORTED = __constant_htons(5),
- IB_CM_REJ_INVALID_COMM_ID = __constant_htons(6),
- IB_CM_REJ_INVALID_COMM_INSTANCE = __constant_htons(7),
- IB_CM_REJ_INVALID_SERVICE_ID = __constant_htons(8),
- IB_CM_REJ_INVALID_TRANSPORT_TYPE = __constant_htons(9),
- IB_CM_REJ_STALE_CONN = __constant_htons(10),
- IB_CM_REJ_RDC_NOT_EXIST = __constant_htons(11),
- IB_CM_REJ_INVALID_GID = __constant_htons(12),
- IB_CM_REJ_INVALID_LID = __constant_htons(13),
- IB_CM_REJ_INVALID_SL = __constant_htons(14),
- IB_CM_REJ_INVALID_TRAFFIC_CLASS = __constant_htons(15),
- IB_CM_REJ_INVALID_HOP_LIMIT = __constant_htons(16),
- IB_CM_REJ_INVALID_PACKET_RATE = __constant_htons(17),
- IB_CM_REJ_INVALID_ALT_GID = __constant_htons(18),
- IB_CM_REJ_INVALID_ALT_LID = __constant_htons(19),
- IB_CM_REJ_INVALID_ALT_SL = __constant_htons(20),
- IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = __constant_htons(21),
- IB_CM_REJ_INVALID_ALT_HOP_LIMIT = __constant_htons(22),
- IB_CM_REJ_INVALID_ALT_PACKET_RATE = __constant_htons(23),
- IB_CM_REJ_PORT_CM_REDIRECT = __constant_htons(24),
- IB_CM_REJ_PORT_REDIRECT = __constant_htons(25),
- IB_CM_REJ_INVALID_MTU = __constant_htons(26),
- IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = __constant_htons(27),
- IB_CM_REJ_CONSUMER_DEFINED = __constant_htons(28),
- IB_CM_REJ_INVALID_RNR_RETRY = __constant_htons(29),
- IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = __constant_htons(30),
- IB_CM_REJ_INVALID_CLASS_VERSION = __constant_htons(31),
- IB_CM_REJ_INVALID_FLOW_LABEL = __constant_htons(32),
- IB_CM_REJ_INVALID_ALT_FLOW_LABEL = __constant_htons(33)
+ IB_CM_REJ_NO_QP = 1,
+ IB_CM_REJ_NO_EEC = 2,
+ IB_CM_REJ_NO_RESOURCES = 3,
+ IB_CM_REJ_TIMEOUT = 4,
+ IB_CM_REJ_UNSUPPORTED = 5,
+ IB_CM_REJ_INVALID_COMM_ID = 6,
+ IB_CM_REJ_INVALID_COMM_INSTANCE = 7,
+ IB_CM_REJ_INVALID_SERVICE_ID = 8,
+ IB_CM_REJ_INVALID_TRANSPORT_TYPE = 9,
+ IB_CM_REJ_STALE_CONN = 10,
+ IB_CM_REJ_RDC_NOT_EXIST = 11,
+ IB_CM_REJ_INVALID_GID = 12,
+ IB_CM_REJ_INVALID_LID = 13,
+ IB_CM_REJ_INVALID_SL = 14,
+ IB_CM_REJ_INVALID_TRAFFIC_CLASS = 15,
+ IB_CM_REJ_INVALID_HOP_LIMIT = 16,
+ IB_CM_REJ_INVALID_PACKET_RATE = 17,
+ IB_CM_REJ_INVALID_ALT_GID = 18,
+ IB_CM_REJ_INVALID_ALT_LID = 19,
+ IB_CM_REJ_INVALID_ALT_SL = 20,
+ IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = 21,
+ IB_CM_REJ_INVALID_ALT_HOP_LIMIT = 22,
+ IB_CM_REJ_INVALID_ALT_PACKET_RATE = 23,
+ IB_CM_REJ_PORT_CM_REDIRECT = 24,
+ IB_CM_REJ_PORT_REDIRECT = 25,
+ IB_CM_REJ_INVALID_MTU = 26,
+ IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = 27,
+ IB_CM_REJ_CONSUMER_DEFINED = 28,
+ IB_CM_REJ_INVALID_RNR_RETRY = 29,
+ IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30,
+ IB_CM_REJ_INVALID_CLASS_VERSION = 31,
+ IB_CM_REJ_INVALID_FLOW_LABEL = 32,
+ IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33
};
struct ib_cm_rej_event_param {
@@ -222,8 +222,7 @@ struct ib_cm_sidr_req_event_param {
struct ib_cm_id *listen_id;
struct ib_device *device;
u8 port;
-
- u16 pkey;
+ u16 pkey;
};
enum ib_cm_sidr_status {
@@ -285,12 +284,12 @@ typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
struct ib_cm_id {
ib_cm_handler cm_handler;
void *context;
- u64 service_id;
- u64 service_mask;
+ __be64 service_id;
+ __be64 service_mask;
enum ib_cm_state state; /* internal CM/debug use */
enum ib_cm_lap_state lap_state; /* internal CM/debug use */
- u32 local_id;
- u32 remote_id;
+ __be32 local_id;
+ __be32 remote_id;
};
/**
@@ -330,13 +329,13 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
* IB_CM_ASSIGN_SERVICE_ID.
*/
int ib_cm_listen(struct ib_cm_id *cm_id,
- u64 service_id,
- u64 service_mask);
+ __be64 service_id,
+ __be64 service_mask);
struct ib_cm_req_param {
struct ib_sa_path_rec *primary_path;
struct ib_sa_path_rec *alternate_path;
- u64 service_id;
+ __be64 service_id;
u32 qp_num;
enum ib_qp_type qp_type;
u32 starting_psn;
@@ -528,7 +527,7 @@ int ib_send_cm_apr(struct ib_cm_id *cm_id,
struct ib_cm_sidr_req_param {
struct ib_sa_path_rec *path;
- u64 service_id;
+ __be64 service_id;
int timeout_ms;
const void *private_data;
u8 private_data_len;
diff --git a/drivers/infiniband/include/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
index 6c9e24d6e14..86b7e93f198 100644
--- a/drivers/infiniband/include/ib_fmr_pool.h
+++ b/include/rdma/ib_fmr_pool.h
@@ -36,7 +36,7 @@
#if !defined(IB_FMR_POOL_H)
#define IB_FMR_POOL_H
-#include <ib_verbs.h>
+#include <rdma/ib_verbs.h>
struct ib_fmr_pool;
diff --git a/drivers/infiniband/include/ib_mad.h b/include/rdma/ib_mad.h
index 491b6f25b3b..fc6b1c18ffc 100644
--- a/drivers/infiniband/include/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -41,7 +41,7 @@
#include <linux/pci.h>
-#include <ib_verbs.h>
+#include <rdma/ib_verbs.h>
/* Management base version */
#define IB_MGMT_BASE_VERSION 1
@@ -90,6 +90,7 @@
#define IB_MGMT_RMPP_STATUS_SUCCESS 0
#define IB_MGMT_RMPP_STATUS_RESX 1
+#define IB_MGMT_RMPP_STATUS_ABORT_MIN 118
#define IB_MGMT_RMPP_STATUS_T2L 118
#define IB_MGMT_RMPP_STATUS_BAD_LEN 119
#define IB_MGMT_RMPP_STATUS_BAD_SEG 120
@@ -100,6 +101,7 @@
#define IB_MGMT_RMPP_STATUS_UNV 125
#define IB_MGMT_RMPP_STATUS_TMR 126
#define IB_MGMT_RMPP_STATUS_UNSPEC 127
+#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
#define IB_QP0 0
#define IB_QP1 __constant_htonl(1)
@@ -111,12 +113,12 @@ struct ib_mad_hdr {
u8 mgmt_class;
u8 class_version;
u8 method;
- u16 status;
- u16 class_specific;
- u64 tid;
- u16 attr_id;
- u16 resv;
- u32 attr_mod;
+ __be16 status;
+ __be16 class_specific;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
};
struct ib_rmpp_hdr {
@@ -124,8 +126,8 @@ struct ib_rmpp_hdr {
u8 rmpp_type;
u8 rmpp_rtime_flags;
u8 rmpp_status;
- u32 seg_num;
- u32 paylen_newwin;
+ __be32 seg_num;
+ __be32 paylen_newwin;
};
typedef u64 __bitwise ib_sa_comp_mask;
@@ -139,9 +141,9 @@ typedef u64 __bitwise ib_sa_comp_mask;
* the wire so we can't change the layout)
*/
struct ib_sa_hdr {
- u64 sm_key;
- u16 attr_offset;
- u16 reserved;
+ __be64 sm_key;
+ __be16 attr_offset;
+ __be16 reserved;
ib_sa_comp_mask comp_mask;
} __attribute__ ((packed));
diff --git a/drivers/infiniband/include/ib_pack.h b/include/rdma/ib_pack.h
index fe480f3e865..f926020d633 100644
--- a/drivers/infiniband/include/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -35,7 +35,7 @@
#ifndef IB_PACK_H
#define IB_PACK_H
-#include <ib_verbs.h>
+#include <rdma/ib_verbs.h>
enum {
IB_LRH_BYTES = 8,
diff --git a/drivers/infiniband/include/ib_sa.h b/include/rdma/ib_sa.h
index 6d999f7b5d9..c022edfc49d 100644
--- a/drivers/infiniband/include/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -38,8 +38,8 @@
#include <linux/compiler.h>
-#include <ib_verbs.h>
-#include <ib_mad.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_mad.h>
enum {
IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */
@@ -133,16 +133,16 @@ struct ib_sa_path_rec {
/* reserved */
union ib_gid dgid;
union ib_gid sgid;
- u16 dlid;
- u16 slid;
+ __be16 dlid;
+ __be16 slid;
int raw_traffic;
/* reserved */
- u32 flow_label;
+ __be32 flow_label;
u8 hop_limit;
u8 traffic_class;
int reversible;
u8 numb_path;
- u16 pkey;
+ __be16 pkey;
/* reserved */
u8 sl;
u8 mtu_selector;
@@ -176,18 +176,18 @@ struct ib_sa_path_rec {
struct ib_sa_mcmember_rec {
union ib_gid mgid;
union ib_gid port_gid;
- u32 qkey;
- u16 mlid;
+ __be32 qkey;
+ __be16 mlid;
u8 mtu_selector;
u8 mtu;
u8 traffic_class;
- u16 pkey;
+ __be16 pkey;
u8 rate_selector;
u8 rate;
u8 packet_life_time_selector;
u8 packet_life_time;
u8 sl;
- u32 flow_label;
+ __be32 flow_label;
u8 hop_limit;
u8 scope;
u8 join_state;
@@ -238,7 +238,7 @@ struct ib_sa_mcmember_rec {
struct ib_sa_service_rec {
u64 id;
union ib_gid gid;
- u16 pkey;
+ __be16 pkey;
/* reserved */
u32 lease;
u8 key[16];
diff --git a/drivers/infiniband/include/ib_smi.h b/include/rdma/ib_smi.h
index ca821651496..87f60737f69 100644
--- a/drivers/infiniband/include/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -39,9 +39,7 @@
#if !defined( IB_SMI_H )
#define IB_SMI_H
-#include <ib_mad.h>
-
-#define IB_LID_PERMISSIVE 0xFFFF
+#include <rdma/ib_mad.h>
#define IB_SMP_DATA_SIZE 64
#define IB_SMP_MAX_PATH_HOPS 64
@@ -51,16 +49,16 @@ struct ib_smp {
u8 mgmt_class;
u8 class_version;
u8 method;
- u16 status;
+ __be16 status;
u8 hop_ptr;
u8 hop_cnt;
- u64 tid;
- u16 attr_id;
- u16 resv;
- u32 attr_mod;
- u64 mkey;
- u16 dr_slid;
- u16 dr_dlid;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ __be64 mkey;
+ __be16 dr_slid;
+ __be16 dr_dlid;
u8 reserved[28];
u8 data[IB_SMP_DATA_SIZE];
u8 initial_path[IB_SMP_MAX_PATH_HOPS];
diff --git a/drivers/infiniband/include/ib_user_cm.h b/include/rdma/ib_user_cm.h
index 500b1af6ff7..72182d16778 100644
--- a/drivers/infiniband/include/ib_user_cm.h
+++ b/include/rdma/ib_user_cm.h
@@ -88,15 +88,15 @@ struct ib_ucm_attr_id {
};
struct ib_ucm_attr_id_resp {
- __u64 service_id;
- __u64 service_mask;
- __u32 local_id;
- __u32 remote_id;
+ __be64 service_id;
+ __be64 service_mask;
+ __be32 local_id;
+ __be32 remote_id;
};
struct ib_ucm_listen {
- __u64 service_id;
- __u64 service_mask;
+ __be64 service_id;
+ __be64 service_mask;
__u32 id;
};
@@ -114,13 +114,13 @@ struct ib_ucm_private_data {
struct ib_ucm_path_rec {
__u8 dgid[16];
__u8 sgid[16];
- __u16 dlid;
- __u16 slid;
+ __be16 dlid;
+ __be16 slid;
__u32 raw_traffic;
- __u32 flow_label;
+ __be32 flow_label;
__u32 reversible;
__u32 mtu;
- __u16 pkey;
+ __be16 pkey;
__u8 hop_limit;
__u8 traffic_class;
__u8 numb_path;
@@ -138,7 +138,7 @@ struct ib_ucm_req {
__u32 qpn;
__u32 qp_type;
__u32 psn;
- __u64 sid;
+ __be64 sid;
__u64 data;
__u64 primary_path;
__u64 alternate_path;
@@ -200,7 +200,7 @@ struct ib_ucm_lap {
struct ib_ucm_sidr_req {
__u32 id;
__u32 timeout;
- __u64 sid;
+ __be64 sid;
__u64 data;
__u64 path;
__u16 pkey;
@@ -237,7 +237,7 @@ struct ib_ucm_req_event_resp {
/* port */
struct ib_ucm_path_rec primary_path;
struct ib_ucm_path_rec alternate_path;
- __u64 remote_ca_guid;
+ __be64 remote_ca_guid;
__u32 remote_qkey;
__u32 remote_qpn;
__u32 qp_type;
@@ -253,7 +253,7 @@ struct ib_ucm_req_event_resp {
};
struct ib_ucm_rep_event_resp {
- __u64 remote_ca_guid;
+ __be64 remote_ca_guid;
__u32 remote_qkey;
__u32 remote_qpn;
__u32 starting_psn;
diff --git a/drivers/infiniband/include/ib_user_mad.h b/include/rdma/ib_user_mad.h
index a9a56b50aac..44537aa32e6 100644
--- a/drivers/infiniband/include/ib_user_mad.h
+++ b/include/rdma/ib_user_mad.h
@@ -70,8 +70,6 @@
* @traffic_class - Traffic class in GRH
* @gid - Remote GID in GRH
* @flow_label - Flow label in GRH
- *
- * All multi-byte quantities are stored in network (big endian) byte order.
*/
struct ib_user_mad_hdr {
__u32 id;
@@ -79,9 +77,9 @@ struct ib_user_mad_hdr {
__u32 timeout_ms;
__u32 retries;
__u32 length;
- __u32 qpn;
- __u32 qkey;
- __u16 lid;
+ __be32 qpn;
+ __be32 qkey;
+ __be16 lid;
__u8 sl;
__u8 path_bits;
__u8 grh_present;
@@ -89,7 +87,7 @@ struct ib_user_mad_hdr {
__u8 hop_limit;
__u8 traffic_class;
__u8 gid[16];
- __u32 flow_label;
+ __be32 flow_label;
};
/**
diff --git a/drivers/infiniband/include/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 7c613706af7..7ebb01c8f99 100644
--- a/drivers/infiniband/include/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -78,7 +78,12 @@ enum {
IB_USER_VERBS_CMD_POST_SEND,
IB_USER_VERBS_CMD_POST_RECV,
IB_USER_VERBS_CMD_ATTACH_MCAST,
- IB_USER_VERBS_CMD_DETACH_MCAST
+ IB_USER_VERBS_CMD_DETACH_MCAST,
+ IB_USER_VERBS_CMD_CREATE_SRQ,
+ IB_USER_VERBS_CMD_MODIFY_SRQ,
+ IB_USER_VERBS_CMD_QUERY_SRQ,
+ IB_USER_VERBS_CMD_DESTROY_SRQ,
+ IB_USER_VERBS_CMD_POST_SRQ_RECV
};
/*
@@ -143,8 +148,8 @@ struct ib_uverbs_query_device {
struct ib_uverbs_query_device_resp {
__u64 fw_ver;
- __u64 node_guid;
- __u64 sys_image_guid;
+ __be64 node_guid;
+ __be64 sys_image_guid;
__u64 max_mr_size;
__u64 page_size_cap;
__u32 vendor_id;
@@ -386,4 +391,32 @@ struct ib_uverbs_detach_mcast {
__u64 driver_data[0];
};
+struct ib_uverbs_create_srq {
+ __u64 response;
+ __u64 user_handle;
+ __u32 pd_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u64 driver_data[0];
+};
+
+struct ib_uverbs_create_srq_resp {
+ __u32 srq_handle;
+};
+
+struct ib_uverbs_modify_srq {
+ __u32 srq_handle;
+ __u32 attr_mask;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u32 reserved;
+ __u64 driver_data[0];
+};
+
+struct ib_uverbs_destroy_srq {
+ __u32 srq_handle;
+};
+
#endif /* IB_USER_VERBS_H */
diff --git a/drivers/infiniband/include/ib_verbs.h b/include/rdma/ib_verbs.h
index 5d24edaa66e..e16cf94870f 100644
--- a/drivers/infiniband/include/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4,6 +4,7 @@
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -50,8 +51,8 @@
union ib_gid {
u8 raw[16];
struct {
- u64 subnet_prefix;
- u64 interface_id;
+ __be64 subnet_prefix;
+ __be64 interface_id;
} global;
};
@@ -87,8 +88,8 @@ enum ib_atomic_cap {
struct ib_device_attr {
u64 fw_ver;
- u64 node_guid;
- u64 sys_image_guid;
+ __be64 node_guid;
+ __be64 sys_image_guid;
u64 max_mr_size;
u64 page_size_cap;
u32 vendor_id;
@@ -255,7 +256,10 @@ enum ib_event_type {
IB_EVENT_PORT_ERR,
IB_EVENT_LID_CHANGE,
IB_EVENT_PKEY_CHANGE,
- IB_EVENT_SM_CHANGE
+ IB_EVENT_SM_CHANGE,
+ IB_EVENT_SRQ_ERR,
+ IB_EVENT_SRQ_LIMIT_REACHED,
+ IB_EVENT_QP_LAST_WQE_REACHED
};
struct ib_event {
@@ -263,6 +267,7 @@ struct ib_event {
union {
struct ib_cq *cq;
struct ib_qp *qp;
+ struct ib_srq *srq;
u8 port_num;
} element;
enum ib_event_type event;
@@ -290,8 +295,8 @@ struct ib_global_route {
};
struct ib_grh {
- u32 version_tclass_flow;
- u16 paylen;
+ __be32 version_tclass_flow;
+ __be16 paylen;
u8 next_hdr;
u8 hop_limit;
union ib_gid sgid;
@@ -302,6 +307,8 @@ enum {
IB_MULTICAST_QPN = 0xffffff
};
+#define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
+
enum ib_ah_flags {
IB_AH_GRH = 1
};
@@ -383,6 +390,23 @@ enum ib_cq_notify {
IB_CQ_NEXT_COMP
};
+enum ib_srq_attr_mask {
+ IB_SRQ_MAX_WR = 1 << 0,
+ IB_SRQ_LIMIT = 1 << 1,
+};
+
+struct ib_srq_attr {
+ u32 max_wr;
+ u32 max_sge;
+ u32 srq_limit;
+};
+
+struct ib_srq_init_attr {
+ void (*event_handler)(struct ib_event *, void *);
+ void *srq_context;
+ struct ib_srq_attr attr;
+};
+
struct ib_qp_cap {
u32 max_send_wr;
u32 max_recv_wr;
@@ -710,10 +734,11 @@ struct ib_cq {
};
struct ib_srq {
- struct ib_device *device;
- struct ib_uobject *uobject;
- struct ib_pd *pd;
- void *srq_context;
+ struct ib_device *device;
+ struct ib_pd *pd;
+ struct ib_uobject *uobject;
+ void (*event_handler)(struct ib_event *, void *);
+ void *srq_context;
atomic_t usecnt;
};
@@ -827,6 +852,18 @@ struct ib_device {
int (*query_ah)(struct ib_ah *ah,
struct ib_ah_attr *ah_attr);
int (*destroy_ah)(struct ib_ah *ah);
+ struct ib_srq * (*create_srq)(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+ int (*modify_srq)(struct ib_srq *srq,
+ struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask);
+ int (*query_srq)(struct ib_srq *srq,
+ struct ib_srq_attr *srq_attr);
+ int (*destroy_srq)(struct ib_srq *srq);
+ int (*post_srq_recv)(struct ib_srq *srq,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
struct ib_qp * (*create_qp)(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
@@ -1039,6 +1076,65 @@ int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
int ib_destroy_ah(struct ib_ah *ah);
/**
+ * ib_create_srq - Creates a SRQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the SRQ.
+ *
+ * srq_attr->max_wr and srq_attr->max_sge are read the determine the
+ * requested size of the SRQ, and set to the actual values allocated
+ * on return. If ib_create_srq() succeeds, then max_wr and max_sge
+ * will always be at least as large as the requested values.
+ */
+struct ib_srq *ib_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr);
+
+/**
+ * ib_modify_srq - Modifies the attributes for the specified SRQ.
+ * @srq: The SRQ to modify.
+ * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
+ * the current values of selected SRQ attributes are returned.
+ * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
+ * are being modified.
+ *
+ * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
+ * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
+ * the number of receives queued drops below the limit.
+ */
+int ib_modify_srq(struct ib_srq *srq,
+ struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask);
+
+/**
+ * ib_query_srq - Returns the attribute list and current values for the
+ * specified SRQ.
+ * @srq: The SRQ to query.
+ * @srq_attr: The attributes of the specified SRQ.
+ */
+int ib_query_srq(struct ib_srq *srq,
+ struct ib_srq_attr *srq_attr);
+
+/**
+ * ib_destroy_srq - Destroys the specified SRQ.
+ * @srq: The SRQ to destroy.
+ */
+int ib_destroy_srq(struct ib_srq *srq);
+
+/**
+ * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
+ * @srq: The SRQ to post the work request on.
+ * @recv_wr: A list of work requests to post on the receive queue.
+ * @bad_recv_wr: On an immediate failure, this parameter will reference
+ * the work request that failed to be posted on the QP.
+ */
+static inline int ib_post_srq_recv(struct ib_srq *srq,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
+{
+ return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
+}
+
+/**
* ib_create_qp - Creates a QP associated with the specified protection
* domain.
* @pd: The protection domain associated with the QP.