summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/8139too.c10
-rw-r--r--drivers/net/8390p.c9
-rw-r--r--drivers/net/mlx4/en_main.c5
-rw-r--r--drivers/net/mlx4/en_netdev.c21
-rw-r--r--drivers/net/mlx4/en_port.c45
-rw-r--r--drivers/net/mlx4/en_resources.c6
-rw-r--r--drivers/net/mlx4/en_rx.c12
-rw-r--r--drivers/net/mlx4/en_tx.c1
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/ucc_geth.c3
-rw-r--r--drivers/net/wireless/atmel.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c99
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c148
-rw-r--r--drivers/net/wireless/mwl8k.c3
-rw-r--r--firmware/Makefile1
-rw-r--r--firmware/WHENCE10
-rw-r--r--firmware/cis/LA-PCM.cis.ihex20
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/net/iucv/af_iucv.h9
-rw-r--r--net/iucv/af_iucv.c432
-rw-r--r--net/iucv/iucv.c43
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netlabel/netlabel_addrlist.c26
-rw-r--r--net/netrom/af_netrom.c6
31 files changed, 683 insertions, 257 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 00fe1301a9c..d90177509bf 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1383,6 +1383,11 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
+ tp->cur_rx = 0;
+
+ /* init Rx ring buffer DMA address */
+ RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
/* Must enable Tx/Rx before setting transfer thresholds! */
RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
@@ -1390,8 +1395,6 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W32 (RxConfig, tp->rx_config);
RTL_W32 (TxConfig, rtl8139_tx_config);
- tp->cur_rx = 0;
-
rtl_check_media (dev, 1);
if (tp->chipset >= CH_8139B) {
@@ -1406,9 +1409,6 @@ static void rtl8139_hw_start (struct net_device *dev)
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
- /* init Rx ring buffer DMA address */
- RTL_W32_F (RxBuf, tp->rx_ring_dma);
-
/* init Tx buffer DMA addresses */
for (i = 0; i < NUM_TX_DESC; i++)
RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index 6ec11dafdb1..d225c291fd9 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -91,16 +91,15 @@ void NS8390p_init(struct net_device *dev, int startp)
}
EXPORT_SYMBOL(NS8390p_init);
-#if defined(MODULE)
-
-int init_module(void)
+static int __init NS8390p_init_module(void)
{
return 0;
}
-void cleanup_module(void)
+static void __exit NS8390p_cleanup_module(void)
{
}
-#endif /* MODULE */
+module_init(NS8390p_init_module);
+module_exit(NS8390p_cleanup_module);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index eda72dd2120..510633fd57f 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -181,7 +181,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
if (!mdev->workqueue) {
err = -ENOMEM;
- goto err_close_nic;
+ goto err_mr;
}
/* At this stage all non-port specific tasks are complete:
@@ -214,9 +214,8 @@ err_free_netdev:
flush_workqueue(mdev->workqueue);
/* Stop event queue before we drop down to release shared SW state */
-
-err_close_nic:
destroy_workqueue(mdev->workqueue);
+
err_mr:
mlx4_mr_free(dev, &mdev->mr);
err_uar:
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 303c23de6ca..438678ab2a1 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -348,11 +348,9 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (netif_msg_timer(priv))
mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
- if (netif_carrier_ok(dev)) {
- priv->port_stats.tx_timeout++;
- mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
- queue_work(mdev->workqueue, &priv->watchdog_task);
- }
+ priv->port_stats.tx_timeout++;
+ mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
+ queue_work(mdev->workqueue, &priv->watchdog_task);
}
@@ -761,9 +759,14 @@ static void mlx4_en_restart(struct work_struct *work)
struct net_device *dev = priv->dev;
mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
- mlx4_en_stop_port(dev);
- if (mlx4_en_start_port(dev))
- mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ mlx4_en_stop_port(dev);
+ if (mlx4_en_start_port(dev))
+ mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+ }
+ mutex_unlock(&mdev->state_lock);
}
@@ -1054,7 +1057,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
* Set driver features
*/
dev->features |= NETIF_F_SG;
- dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index c5a4c038975..a29abe845d2 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -151,6 +151,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
+ int i;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -165,38 +166,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
- stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
- be32_to_cpu(mlx4_en_stats->RDROP);
- stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
- be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
- be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
- be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
- be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
- be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
- be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
- be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
- be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
- be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
- stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
- be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
- be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
- be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
- be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
- be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
- be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
- be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
- be64_to_cpu(mlx4_en_stats->ROCT_novlan);
-
- stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
- be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
+ stats->rx_packets = 0;
+ stats->rx_bytes = 0;
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ stats->rx_packets += priv->rx_ring[i].packets;
+ stats->rx_bytes += priv->rx_ring[i].bytes;
+ }
+ stats->tx_packets = 0;
+ stats->tx_bytes = 0;
+ for (i = 0; i <= priv->tx_ring_num; i++) {
+ stats->tx_packets += priv->tx_ring[i].packets;
+ stats->tx_bytes += priv->tx_ring[i].bytes;
+ }
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
be32_to_cpu(mlx4_en_stats->RdropLength) +
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
index a0545209e50..65ca706c04b 100644
--- a/drivers/net/mlx4/en_resources.c
+++ b/drivers/net/mlx4/en_resources.c
@@ -94,3 +94,9 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
vunmap(buf->direct.buf);
}
+
+void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
+{
+ return;
+}
+
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 7e40741fb7d..0cbb78ca7b2 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -436,8 +436,9 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
/* Initialize page allocators */
err = mlx4_en_init_allocator(priv, ring);
if (err) {
- mlx4_err(mdev, "Failed initializing ring allocator\n");
- goto err_allocator;
+ mlx4_err(mdev, "Failed initializing ring allocator\n");
+ ring_ind--;
+ goto err_allocator;
}
/* Fill Rx buffers */
@@ -467,6 +468,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
ring->wqres.db.dma, &ring->srq);
if (err){
mlx4_err(mdev, "Failed to allocate srq\n");
+ ring_ind--;
goto err_srq;
}
ring->srq.event = mlx4_en_srq_event;
@@ -926,12 +928,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
}
}
-static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
-{
- return;
-}
-
-
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
int qpn, int srqn, int cqn,
enum mlx4_qp_state *state,
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 4afd5993e31..ac6fc499b28 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -112,6 +112,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
}
+ ring->qp.event = mlx4_en_sqp_event;
return 0;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index e9af32d41ca..ef840abbcd3 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -538,6 +538,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int srqn,
struct mlx4_qp_context *context);
+void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
int mlx4_en_map_buffer(struct mlx4_buf *buf);
void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2fbf9f9ddd3..652a3688836 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1758,7 +1758,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"),
- PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "LA-PCM.cis"),
+ PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 98f961e92ca..811f97cb0a2 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1394,7 +1394,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- upsmr |= UCC_GETH_UPSMR_RPM;
+ if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
switch (ugeth->max_speed) {
case SPEED_10:
upsmr |= UCC_GETH_UPSMR_R10M;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 857d84148b1..27eef8fb710 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1502,7 +1502,6 @@ static const struct net_device_ops atmel_netdev_ops = {
.ndo_set_mac_address = atmel_set_mac_address,
.ndo_start_xmit = start_tx,
.ndo_do_ioctl = atmel_ioctl,
- .ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index edc0cb38033..41f1d66cfeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1192,7 +1192,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
return -ENOMEM;
}
} else
- iwl_rx_queue_reset(priv, rxq);
+ iwl3945_rx_queue_reset(priv, rxq);
iwl3945_rx_replenish(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 4f5473e0161..da87528f355 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -215,6 +215,7 @@ extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq, int count, u32 id);
extern void iwl3945_rx_replenish(void *data);
+extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
const void *data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 9e41e1bb7c7..277dfc57fde 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -883,11 +883,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL;
- dma_sync_single_range_for_cpu(
- &priv->pci_dev->dev, rxb->real_dma_addr,
- rxb->aligned_dma_addr - rxb->real_dma_addr,
- priv->hw_params.rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
+ priv->hw_params.rx_buf_size + 256,
+ PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data;
/* Reclaim a command buffer only if this packet is a response
@@ -939,9 +937,6 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL;
}
- pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
- priv->hw_params.rx_buf_size + 256,
- PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 2f1242447b3..6e983149b83 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -223,7 +223,7 @@
#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
/* EEPROM GP */
-#define CSR_EEPROM_GP_VALID_MSK (0x00000006)
+#define CSR_EEPROM_GP_VALID_MSK (0x00000007)
#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index e363d9b05ee..5aa76a70632 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -361,12 +361,16 @@ struct iwl_host_cmd {
/**
* struct iwl_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @dma_addr: bus address of buffer of receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
* @rx_free: list of free SKBs for use
* @rx_used: List of Rx buffers with no SKB
* @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 58cdd329421..a82cca0a30c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -799,6 +799,22 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ /* Total # bytes to be transmitted */
+ len = (u16)skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
+
+ /* set is_hcca to 0; it probably will never be implemented */
+ iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
+
+ iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
+
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -819,21 +835,30 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
else
len_org = 0;
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (len_org)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
- out_cmd, sizeof(struct iwl_cmd),
+ &out_cmd->hdr, len,
PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
- pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
+ pci_unmap_len_set(&out_cmd->meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
- txcmd_phys += offsetof(struct iwl_cmd, hdr);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
txcmd_phys, len, 1, 0);
- if (info->control.hw_key)
- iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ if (qc)
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -846,41 +871,29 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
0, 0);
}
- /* Tell NIC about any 2-byte padding after MAC header */
- if (len_org)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
- /* TODO need this for burst mode later on */
- iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
-
- iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
-
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
+ len, PCI_DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- if (qc)
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
+ le16_to_cpu(out_cmd->hdr.sequence));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
-
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
+ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
+ len, PCI_DMA_BIDIRECTIONAL);
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -968,18 +981,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
INDEX_TO_SEQ(q->write_ptr));
if (out_cmd->meta.flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = (idx == TFD_CMD_SLOTS) ?
- IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
-
- phys_addr = pci_map_single(priv->pci_dev, out_cmd,
- len, PCI_DMA_BIDIRECTIONAL);
- pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
- pci_unmap_len_set(&out_cmd->meta, len, len);
- phys_addr += offsetof(struct iwl_cmd, hdr);
+ len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
+ len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, fix_size, 1,
- U32_PAD(cmd->len));
#ifdef CONFIG_IWLWIFI_DEBUG
switch (out_cmd->hdr.cmd) {
@@ -1007,6 +1011,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* Set up entry in queue's byte count circular buffer */
priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
+ phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
+ fix_size, PCI_DMA_BIDIRECTIONAL);
+ pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
+ pci_unmap_len_set(&out_cmd->meta, len, fix_size);
+
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ phys_addr, fix_size, 1,
+ U32_PAD(cmd->len));
+
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
ret = iwl_txq_update_write_ptr(priv, txq);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index a5efb3b28c7..84feeb70fea 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -690,7 +690,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
int txq_id = skb_get_queue_mapping(skb);
- u16 len, idx, len_org, hdr_len;
+ u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */
u8 id;
u8 unicast;
u8 sta_id;
@@ -792,6 +792,40 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx->hdr, hdr, hdr_len);
+
+ if (info->control.hw_key)
+ iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
+
+ /* set is_hcca to 0; it probably will never be implemented */
+ iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
+
+ /* Total # bytes to be transmitted */
+ len = (u16)skb->len;
+ tx->len = cpu_to_le16(len);
+
+
+ tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+ tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ if (qc)
+ priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
+ le16_to_cpu(out_cmd->hdr.sequence));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
+ iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
+ ieee80211_hdrlen(fc));
+
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@@ -814,22 +848,18 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- out_cmd, sizeof(struct iwl_cmd),
- PCI_DMA_TODEVICE);
+ txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
+ len, PCI_DMA_TODEVICE);
+ /* we do not map meta data ... so we can safely access address to
+ * provide to unmap command*/
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
- pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- txcmd_phys += offsetof(struct iwl_cmd, hdr);
+ pci_unmap_len_set(&out_cmd->meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
txcmd_phys, len, 1, 0);
- if (info->control.hw_key)
- iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@@ -842,32 +872,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
0, U32_PAD(len));
}
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx->len = cpu_to_le16(len);
-
- /* TODO need this for burst mode later on */
- iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
-
- tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
- tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- if (qc)
- priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
-
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
- ieee80211_hdrlen(fc));
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -1379,6 +1383,37 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
spin_unlock_irqrestore(&rxq->lock, flags);
}
+void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].skb != NULL) {
+ pci_unmap_single(priv->pci_dev,
+ rxq->pool[i].real_dma_addr,
+ priv->hw_params.rx_buf_size,
+ PCI_DMA_FROMDEVICE);
+ priv->alloc_rxb_skb--;
+ dev_kfree_skb(rxq->pool[i].skb);
+ rxq->pool[i].skb = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+EXPORT_SYMBOL(iwl3945_rx_queue_reset);
+
/*
* this should be called while priv->lock is locked
*/
@@ -1403,6 +1438,34 @@ void iwl3945_rx_replenish(void *data)
spin_unlock_irqrestore(&priv->lock, flags);
}
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].skb != NULL) {
+ pci_unmap_single(priv->pci_dev,
+ rxq->pool[i].real_dma_addr,
+ priv->hw_params.rx_buf_size,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rxq->pool[i].skb);
+ }
+ }
+
+ pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+EXPORT_SYMBOL(iwl3945_rx_queue_free);
+
+
/* Convert linear signal-to-noise ratio into dB */
static u8 ratio2dB[100] = {
/* 0 1 2 3 4 5 6 7 8 9 */
@@ -1520,9 +1583,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL;
- pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
- priv->hw_params.rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
+ priv->hw_params.rx_buf_size,
+ PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data;
/* Reclaim a command buffer only if this packet is a response
@@ -1571,9 +1634,6 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL;
}
- pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
- priv->hw_params.rx_buf_size,
- PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
@@ -4424,12 +4484,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
iwl_rfkill_unregister(priv);
- cancel_delayed_work(&priv->rfkill_poll);
+ cancel_delayed_work_sync(&priv->rfkill_poll);
iwl3945_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
- iwl_rx_queue_free(priv, &priv->rxq);
+ iwl3945_rx_queue_free(priv, &priv->rxq);
iwl3945_hw_txq_ctx_free(priv);
iwl3945_unset_hw_params(priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b5dbf6d9e51..a9a970469c2 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -893,8 +893,7 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
rx_desc->next_rx_desc_phys_addr =
cpu_to_le32(rxq->rx_desc_dma
+ nexti * sizeof(*rx_desc));
- rx_desc->rx_ctrl =
- cpu_to_le32(MWL8K_RX_CTRL_OWNED_BY_HOST);
+ rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST;
}
return 0;
diff --git a/firmware/Makefile b/firmware/Makefile
index 142c17ab9e5..25200d106ee 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -46,6 +46,7 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
e100/d102e_ucode.bin
fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
+fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis
fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
advansys/3550.bin advansys/38C0800.bin
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 10f61c9e5a8..4c52984a831 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -576,6 +576,16 @@ Found in hex form in kernel source.
--------------------------------------------------------------------------
+Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter
+
+File: cis/LA-PCM.cis
+
+Licence: GPL
+
+Originally developed by the pcmcia-cs project
+
+--------------------------------------------------------------------------
+
Driver: PCMCIA_SMC91C92 - SMC 91Cxx PCMCIA
File: ositech/Xilinx7OD.bin
diff --git a/firmware/cis/LA-PCM.cis.ihex b/firmware/cis/LA-PCM.cis.ihex
new file mode 100644
index 00000000000..a0ff0c7b393
--- /dev/null
+++ b/firmware/cis/LA-PCM.cis.ihex
@@ -0,0 +1,20 @@
+:100000000105D4F953E9FF17035338FF20040FC04B
+:1000100002002102060315390401416C6C69656414
+:100020002054656C657369732C4B2E4B00457468C6
+:1000300065726E6574204C414E20436172640043CA
+:10004000656E747265434F4D004C412D50434D0019
+:10005000FF1A0602100000020B1B08810108E06075
+:1000600000021F1B08820108E06020021F1B08839A
+:100070000108E06040021F1B08840108E060600284
+:100080001F1B08850108E06080021F1B088601080D
+:10009000E060A0021F1B08870108E060C0021F1B70
+:1000A00008880108E060E0021F1B08890108E06081
+:1000B00000031F1B088A0108E06020031F1B088B38
+:1000C0000108E06040031F1B088C0108E06060032A
+:1000D0001F1B088D0108E06080031F1B088E0108AC
+:1000E000E060A0031F1B088F0108E060C0031F1B16
+:0D00F00008900108E060E0031F1400FF000D
+:00000001FF
+#
+# Replacement CIS for Allied Telesis LA-PCM
+#
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 42a0396f2c5..d2310cb45d2 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -303,6 +303,7 @@ struct ucred {
#define SOL_BLUETOOTH 274
#define SOL_PNPIPE 275
#define SOL_RDS 276
+#define SOL_IUCV 277
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 85f80eadfa3..21ee49ffcba 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -73,8 +73,17 @@ struct iucv_sock {
struct sk_buff_head backlog_skb_q;
struct sock_msg_q message_q;
unsigned int send_tag;
+ u8 flags;
+ u16 msglimit;
};
+/* iucv socket options (SOL_IUCV) */
+#define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */
+#define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
+
+/* iucv related control messages (scm) */
+#define SCM_IUCV_TRGCLS 0x0001 /* target class control message */
+
struct iucv_sock_list {
struct hlist_head head;
rwlock_t lock;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 49e786535dc..a9b3a6f9ea9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -29,10 +29,7 @@
#include <net/iucv/iucv.h>
#include <net/iucv/af_iucv.h>
-#define CONFIG_IUCV_SOCK_DEBUG 1
-
-#define IPRMDATA 0x80
-#define VERSION "1.0"
+#define VERSION "1.1"
static char iucv_userid[80];
@@ -44,6 +41,19 @@ static struct proto iucv_proto = {
.obj_size = sizeof(struct iucv_sock),
};
+/* special AF_IUCV IPRM messages */
+static const u8 iprm_shutdown[8] =
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
+
+#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
+
+/* macros to set/get socket control buffer at correct offset */
+#define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
+#define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
+#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
+#define CB_TRGCLS_LEN (TRGCLS_SIZE)
+
+
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
@@ -54,6 +64,7 @@ static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
u8 ipuser[16]);
static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
+static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
static struct iucv_sock_list iucv_sk_list = {
.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
@@ -65,7 +76,8 @@ static struct iucv_handler af_iucv_handler = {
.path_complete = iucv_callback_connack,
.path_severed = iucv_callback_connrej,
.message_pending = iucv_callback_rx,
- .message_complete = iucv_callback_txdone
+ .message_complete = iucv_callback_txdone,
+ .path_quiesced = iucv_callback_shutdown,
};
static inline void high_nmcpy(unsigned char *dst, char *src)
@@ -78,6 +90,37 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
memcpy(&dst[8], src, 8);
}
+/**
+ * iucv_msg_length() - Returns the length of an iucv message.
+ * @msg: Pointer to struct iucv_message, MUST NOT be NULL
+ *
+ * The function returns the length of the specified iucv message @msg of data
+ * stored in a buffer and of data stored in the parameter list (PRMDATA).
+ *
+ * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
+ * data:
+ * PRMDATA[0..6] socket data (max 7 bytes);
+ * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
+ *
+ * The socket data length is computed by substracting the socket data length
+ * value from 0xFF.
+ * If the socket data len is greater 7, then PRMDATA can be used for special
+ * notifications (see iucv_sock_shutdown); and further,
+ * if the socket data len is > 7, the function returns 8.
+ *
+ * Use this function to allocate socket buffers to store iucv message data.
+ */
+static inline size_t iucv_msg_length(struct iucv_message *msg)
+{
+ size_t datalen;
+
+ if (msg->flags & IUCV_IPRMDATA) {
+ datalen = 0xff - msg->rmmsg[7];
+ return (datalen < 8) ? datalen : 8;
+ }
+ return msg->length;
+}
+
/* Timers */
static void iucv_sock_timeout(unsigned long arg)
{
@@ -172,6 +215,7 @@ static void iucv_sock_close(struct sock *sk)
err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
}
+ case IUCV_CLOSING: /* fall through */
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
@@ -224,6 +268,10 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
spin_lock_init(&iucv_sk(sk)->message_q.lock);
skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
iucv_sk(sk)->send_tag = 0;
+ iucv_sk(sk)->flags = 0;
+ iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
+ iucv_sk(sk)->path = NULL;
+ memset(&iucv_sk(sk)->src_user_id , 0, 32);
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -245,11 +293,22 @@ static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
{
struct sock *sk;
- if (sock->type != SOCK_STREAM)
- return -ESOCKTNOSUPPORT;
+ if (protocol && protocol != PF_IUCV)
+ return -EPROTONOSUPPORT;
sock->state = SS_UNCONNECTED;
- sock->ops = &iucv_sock_ops;
+
+ switch (sock->type) {
+ case SOCK_STREAM:
+ sock->ops = &iucv_sock_ops;
+ break;
+ case SOCK_SEQPACKET:
+ /* currently, proto ops can handle both sk types */
+ sock->ops = &iucv_sock_ops;
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+ }
sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
if (!sk)
@@ -460,11 +519,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
return -EBADFD;
- if (sk->sk_type != SOCK_STREAM)
+ if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
return -EINVAL;
- iucv = iucv_sk(sk);
-
if (sk->sk_state == IUCV_OPEN) {
err = iucv_sock_autobind(sk);
if (unlikely(err))
@@ -483,8 +540,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
iucv = iucv_sk(sk);
/* Create path. */
- iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
- IPRMDATA, GFP_KERNEL);
+ iucv->path = iucv_path_alloc(iucv->msglimit,
+ IUCV_IPRMDATA, GFP_KERNEL);
if (!iucv->path) {
err = -ENOMEM;
goto done;
@@ -518,8 +575,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
}
if (sk->sk_state == IUCV_DISCONN) {
- release_sock(sk);
- return -ECONNREFUSED;
+ err = -ECONNREFUSED;
}
if (err) {
@@ -542,7 +598,10 @@ static int iucv_sock_listen(struct socket *sock, int backlog)
lock_sock(sk);
err = -EINVAL;
- if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
+ if (sk->sk_state != IUCV_BOUND)
+ goto done;
+
+ if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto done;
sk->sk_max_ack_backlog = backlog;
@@ -633,6 +692,30 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
return 0;
}
+/**
+ * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
+ * @path: IUCV path
+ * @msg: Pointer to a struct iucv_message
+ * @skb: The socket data to send, skb->len MUST BE <= 7
+ *
+ * Send the socket data in the parameter list in the iucv message
+ * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
+ * list and the socket data len at index 7 (last byte).
+ * See also iucv_msg_length().
+ *
+ * Returns the error code from the iucv_message_send() call.
+ */
+static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
+ struct sk_buff *skb)
+{
+ u8 prmdata[8];
+
+ memcpy(prmdata, (void *) skb->data, skb->len);
+ prmdata[7] = 0xff - (u8) skb->len;
+ return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
+ (void *) prmdata, 8);
+}
+
static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
@@ -640,6 +723,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct iucv_sock *iucv = iucv_sk(sk);
struct sk_buff *skb;
struct iucv_message txmsg;
+ struct cmsghdr *cmsg;
+ int cmsg_done;
char user_id[9];
char appl_id[9];
int err;
@@ -651,6 +736,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
+ /* SOCK_SEQPACKET: we do not support segmented records */
+ if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
+ return -EOPNOTSUPP;
+
lock_sock(sk);
if (sk->sk_shutdown & SEND_SHUTDOWN) {
@@ -659,6 +748,52 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
}
if (sk->sk_state == IUCV_CONNECTED) {
+ /* initialize defaults */
+ cmsg_done = 0; /* check for duplicate headers */
+ txmsg.class = 0;
+
+ /* iterate over control messages */
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
+ cmsg = CMSG_NXTHDR(msg, cmsg)) {
+
+ if (!CMSG_OK(msg, cmsg)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (cmsg->cmsg_level != SOL_IUCV)
+ continue;
+
+ if (cmsg->cmsg_type & cmsg_done) {
+ err = -EINVAL;
+ goto out;
+ }
+ cmsg_done |= cmsg->cmsg_type;
+
+ switch (cmsg->cmsg_type) {
+ case SCM_IUCV_TRGCLS:
+ if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* set iucv message target class */
+ memcpy(&txmsg.class,
+ (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
+
+ break;
+
+ default:
+ err = -EINVAL;
+ goto out;
+ break;
+ }
+ }
+
+ /* allocate one skb for each iucv message:
+ * this is fine for SOCK_SEQPACKET (unless we want to support
+ * segmented records using the MSG_EOR flag), but
+ * for SOCK_STREAM we might want to improve it in future */
if (!(skb = sock_alloc_send_skb(sk, len,
msg->msg_flags & MSG_DONTWAIT,
&err)))
@@ -669,13 +804,33 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto fail;
}
- txmsg.class = 0;
- memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
+ /* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
- memcpy(skb->cb, &txmsg.tag, 4);
+ memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
skb_queue_tail(&iucv->send_skb_q, skb);
- err = iucv_message_send(iucv->path, &txmsg, 0, 0,
- (void *) skb->data, skb->len);
+
+ if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
+ && skb->len <= 7) {
+ err = iucv_send_iprm(iucv->path, &txmsg, skb);
+
+ /* on success: there is no message_complete callback
+ * for an IPRMDATA msg; remove skb from send queue */
+ if (err == 0) {
+ skb_unlink(skb, &iucv->send_skb_q);
+ kfree_skb(skb);
+ }
+
+ /* this error should never happen since the
+ * IUCV_IPRMDATA path flag is set... sever path */
+ if (err == 0x15) {
+ iucv_path_sever(iucv->path, NULL);
+ skb_unlink(skb, &iucv->send_skb_q);
+ err = -EPIPE;
+ goto fail;
+ }
+ } else
+ err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+ (void *) skb->data, skb->len);
if (err) {
if (err == 3) {
user_id[8] = 0;
@@ -722,6 +877,10 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
if (!nskb)
return -ENOMEM;
+ /* copy target class to control buffer of new skb */
+ memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
+
+ /* copy data fragment */
memcpy(nskb->data, skb->data + copied, size);
copied += size;
dataleft -= size;
@@ -741,19 +900,33 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_message *msg)
{
int rc;
+ unsigned int len;
+
+ len = iucv_msg_length(msg);
- if (msg->flags & IPRMDATA) {
- skb->data = NULL;
- skb->len = 0;
+ /* store msg target class in the second 4 bytes of skb ctrl buffer */
+ /* Note: the first 4 bytes are reserved for msg tag */
+ memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
+
+ /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
+ if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
+ if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
+ skb->data = NULL;
+ skb->len = 0;
+ }
} else {
- rc = iucv_message_receive(path, msg, 0, skb->data,
- msg->length, NULL);
+ rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
+ skb->data, len, NULL);
if (rc) {
kfree_skb(skb);
return;
}
- if (skb->truesize >= sk->sk_rcvbuf / 4) {
- rc = iucv_fragment_skb(sk, skb, msg->length);
+ /* we need to fragment iucv messages for SOCK_STREAM only;
+ * for SOCK_SEQPACKET, it is only relevant if we support
+ * record segmentation using MSG_EOR (see also recvmsg()) */
+ if (sk->sk_type == SOCK_STREAM &&
+ skb->truesize >= sk->sk_rcvbuf / 4) {
+ rc = iucv_fragment_skb(sk, skb, len);
kfree_skb(skb);
skb = NULL;
if (rc) {
@@ -764,7 +937,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
} else {
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
- skb->len = msg->length;
+ skb->len = len;
}
}
@@ -779,7 +952,7 @@ static void iucv_process_message_q(struct sock *sk)
struct sock_msg_q *p, *n;
list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
- skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
+ skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
if (!skb)
break;
iucv_process_message(sk, skb, p->path, &p->msg);
@@ -796,7 +969,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
- int target, copied = 0;
+ unsigned int copied, rlen;
struct sk_buff *skb, *rskb, *cskb;
int err = 0;
@@ -809,8 +982,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
- target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
-
+ /* receive/dequeue next skb:
+ * the function understands MSG_PEEK and, thus, does not dequeue skb */
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -818,25 +991,45 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
- copied = min_t(unsigned int, skb->len, len);
+ rlen = skb->len; /* real length of skb */
+ copied = min_t(unsigned int, rlen, len);
cskb = skb;
if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
- skb_queue_head(&sk->sk_receive_queue, skb);
- if (copied == 0)
- return -EFAULT;
- goto done;
+ if (!(flags & MSG_PEEK))
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ return -EFAULT;
+ }
+
+ /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
+ if (sk->sk_type == SOCK_SEQPACKET) {
+ if (copied < rlen)
+ msg->msg_flags |= MSG_TRUNC;
+ /* each iucv message contains a complete record */
+ msg->msg_flags |= MSG_EOR;
}
- len -= copied;
+ /* create control message to store iucv msg target class:
+ * get the trgcls from the control buffer of the skb due to
+ * fragmentation of original iucv message. */
+ err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
+ CB_TRGCLS_LEN, CB_TRGCLS(skb));
+ if (err) {
+ if (!(flags & MSG_PEEK))
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ return err;
+ }
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
- skb_pull(skb, copied);
- if (skb->len) {
- skb_queue_head(&sk->sk_receive_queue, skb);
- goto done;
+ /* SOCK_STREAM: re-queue skb if it contains unreceived data */
+ if (sk->sk_type == SOCK_STREAM) {
+ skb_pull(skb, copied);
+ if (skb->len) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ goto done;
+ }
}
kfree_skb(skb);
@@ -858,12 +1051,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
iucv_process_message_q(sk);
spin_unlock_bh(&iucv->message_q.lock);
}
-
- } else
- skb_queue_head(&sk->sk_receive_queue, skb);
+ }
done:
- return err ? : copied;
+ /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
+ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
+ copied = rlen;
+
+ return copied;
}
static inline unsigned int iucv_accept_poll(struct sock *parent)
@@ -925,7 +1120,6 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_message txmsg;
int err = 0;
- u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
how++;
@@ -934,6 +1128,9 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
switch (sk->sk_state) {
+ case IUCV_DISCONN:
+ case IUCV_CLOSING:
+ case IUCV_SEVERED:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
@@ -947,7 +1144,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
txmsg.class = 0;
txmsg.tag = 0;
err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
- (void *) prmmsg, 8);
+ (void *) iprm_shutdown, 8);
if (err) {
switch (err) {
case 1:
@@ -1001,6 +1198,98 @@ static int iucv_sock_release(struct socket *sock)
return err;
}
+/* getsockopt and setsockopt */
+static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ int val;
+ int rc;
+
+ if (level != SOL_IUCV)
+ return -ENOPROTOOPT;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *) optval))
+ return -EFAULT;
+
+ rc = 0;
+
+ lock_sock(sk);
+ switch (optname) {
+ case SO_IPRMDATA_MSG:
+ if (val)
+ iucv->flags |= IUCV_IPRMDATA;
+ else
+ iucv->flags &= ~IUCV_IPRMDATA;
+ break;
+ case SO_MSGLIMIT:
+ switch (sk->sk_state) {
+ case IUCV_OPEN:
+ case IUCV_BOUND:
+ if (val < 1 || val > (u16)(~0))
+ rc = -EINVAL;
+ else
+ iucv->msglimit = val;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ rc = -ENOPROTOOPT;
+ break;
+ }
+ release_sock(sk);
+
+ return rc;
+}
+
+static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ int val, len;
+
+ if (level != SOL_IUCV)
+ return -ENOPROTOOPT;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ if (len < 0)
+ return -EINVAL;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ switch (optname) {
+ case SO_IPRMDATA_MSG:
+ val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
+ break;
+ case SO_MSGLIMIT:
+ lock_sock(sk);
+ val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
+ : iucv->msglimit; /* default */
+ release_sock(sk);
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+
/* Callback wrappers - called from iucv base support */
static int iucv_callback_connreq(struct iucv_path *path,
u8 ipvmid[8], u8 ipuser[16])
@@ -1054,7 +1343,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
}
/* Create the new socket */
- nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
+ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
if (!nsk) {
err = iucv_path_sever(path, user_data);
iucv_path_free(path);
@@ -1077,7 +1366,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
memcpy(nuser_data + 8, niucv->src_name, 8);
ASCEBC(nuser_data + 8, 8);
- path->msglim = IUCV_QUEUELEN_DEFAULT;
+ /* set message limit for path based on msglimit of accepting socket */
+ niucv->msglimit = iucv->msglimit;
+ path->msglim = iucv->msglimit;
err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
if (err) {
err = iucv_path_sever(path, user_data);
@@ -1113,27 +1404,28 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
struct sock_msg_q *save_msg;
int len;
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ iucv_message_reject(path, msg);
return;
+ }
+
+ spin_lock(&iucv->message_q.lock);
if (!list_empty(&iucv->message_q.list) ||
!skb_queue_empty(&iucv->backlog_skb_q))
goto save_message;
len = atomic_read(&sk->sk_rmem_alloc);
- len += msg->length + sizeof(struct sk_buff);
+ len += iucv_msg_length(msg) + sizeof(struct sk_buff);
if (len > sk->sk_rcvbuf)
goto save_message;
- skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
+ skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
if (!skb)
goto save_message;
- spin_lock(&iucv->message_q.lock);
iucv_process_message(sk, skb, path, msg);
- spin_unlock(&iucv->message_q.lock);
-
- return;
+ goto out_unlock;
save_message:
save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
@@ -1142,8 +1434,9 @@ save_message:
save_msg->path = path;
save_msg->msg = *msg;
- spin_lock(&iucv->message_q.lock);
list_add_tail(&save_msg->list, &iucv->message_q.list);
+
+out_unlock:
spin_unlock(&iucv->message_q.lock);
}
@@ -1160,7 +1453,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
spin_lock_irqsave(&list->lock, flags);
while (list_skb != (struct sk_buff *)list) {
- if (!memcmp(&msg->tag, list_skb->cb, 4)) {
+ if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
this = list_skb;
break;
}
@@ -1196,6 +1489,21 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
sk->sk_state_change(sk);
}
+/* called if the other communication side shuts down its RECV direction;
+ * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
+ */
+static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
+{
+ struct sock *sk = path->private;
+
+ bh_lock_sock(sk);
+ if (sk->sk_state != IUCV_CLOSED) {
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ sk->sk_state_change(sk);
+ }
+ bh_unlock_sock(sk);
+}
+
static struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
.owner = THIS_MODULE,
@@ -1212,8 +1520,8 @@ static struct proto_ops iucv_sock_ops = {
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
.shutdown = iucv_sock_shutdown,
- .setsockopt = sock_no_setsockopt,
- .getsockopt = sock_no_getsockopt
+ .setsockopt = iucv_sock_setsockopt,
+ .getsockopt = iucv_sock_getsockopt,
};
static struct net_proto_family iucv_sock_family_ops = {
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a35240f61ec..61e8038a55e 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -280,6 +280,7 @@ union iucv_param {
* Anchor for per-cpu IUCV command parameter block.
*/
static union iucv_param *iucv_param[NR_CPUS];
+static union iucv_param *iucv_param_irq[NR_CPUS];
/**
* iucv_call_b2f0
@@ -358,7 +359,7 @@ static void iucv_allow_cpu(void *data)
* 0x10 - Flag to allow priority message completion interrupts
* 0x08 - Flag to allow IUCV control interrupts
*/
- parm = iucv_param[cpu];
+ parm = iucv_param_irq[cpu];
memset(parm, 0, sizeof(union iucv_param));
parm->set_mask.ipmask = 0xf8;
iucv_call_b2f0(IUCV_SETMASK, parm);
@@ -379,7 +380,7 @@ static void iucv_block_cpu(void *data)
union iucv_param *parm;
/* Disable all iucv interrupts. */
- parm = iucv_param[cpu];
+ parm = iucv_param_irq[cpu];
memset(parm, 0, sizeof(union iucv_param));
iucv_call_b2f0(IUCV_SETMASK, parm);
@@ -403,7 +404,7 @@ static void iucv_declare_cpu(void *data)
return;
/* Declare interrupt buffer. */
- parm = iucv_param[cpu];
+ parm = iucv_param_irq[cpu];
memset(parm, 0, sizeof(union iucv_param));
parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
@@ -460,7 +461,7 @@ static void iucv_retrieve_cpu(void *data)
iucv_block_cpu(NULL);
/* Retrieve interrupt buffer. */
- parm = iucv_param[cpu];
+ parm = iucv_param_irq[cpu];
iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
/* Clear indication that an iucv buffer exists for this cpu. */
@@ -574,11 +575,22 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
iucv_irq_data[cpu] = NULL;
return NOTIFY_BAD;
}
+ iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_param_irq[cpu]) {
+ kfree(iucv_param[cpu]);
+ iucv_param[cpu] = NULL;
+ kfree(iucv_irq_data[cpu]);
+ iucv_irq_data[cpu] = NULL;
+ return NOTIFY_BAD;
+ }
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
+ kfree(iucv_param_irq[cpu]);
+ iucv_param_irq[cpu] = NULL;
kfree(iucv_param[cpu]);
iucv_param[cpu] = NULL;
kfree(iucv_irq_data[cpu]);
@@ -625,7 +637,7 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
{
union iucv_param *parm;
- parm = iucv_param[smp_processor_id()];
+ parm = iucv_param_irq[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
if (userdata)
memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
@@ -918,10 +930,8 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
if (iucv_active_cpu != smp_processor_id())
spin_lock_bh(&iucv_table_lock);
rc = iucv_sever_pathid(path->pathid, userdata);
- if (!rc) {
- iucv_path_table[path->pathid] = NULL;
- list_del_init(&path->list);
- }
+ iucv_path_table[path->pathid] = NULL;
+ list_del_init(&path->list);
if (iucv_active_cpu != smp_processor_id())
spin_unlock_bh(&iucv_table_lock);
preempt_enable();
@@ -1378,6 +1388,8 @@ static void iucv_path_complete(struct iucv_irq_data *data)
struct iucv_path_complete *ipc = (void *) data;
struct iucv_path *path = iucv_path_table[ipc->ippathid];
+ if (path)
+ path->flags = ipc->ipflags1;
if (path && path->handler && path->handler->path_complete)
path->handler->path_complete(path, ipc->ipuser);
}
@@ -1413,7 +1425,7 @@ static void iucv_path_severed(struct iucv_irq_data *data)
else {
iucv_sever_pathid(path->pathid, NULL);
iucv_path_table[path->pathid] = NULL;
- list_del_init(&path->list);
+ list_del(&path->list);
iucv_path_free(path);
}
}
@@ -1717,6 +1729,13 @@ static int __init iucv_init(void)
rc = -ENOMEM;
goto out_free;
}
+ iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_param_irq[cpu]) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
}
rc = register_hotcpu_notifier(&iucv_cpu_notifier);
if (rc)
@@ -1734,6 +1753,8 @@ out_cpu:
unregister_hotcpu_notifier(&iucv_cpu_notifier);
out_free:
for_each_possible_cpu(cpu) {
+ kfree(iucv_param_irq[cpu]);
+ iucv_param_irq[cpu] = NULL;
kfree(iucv_param[cpu]);
iucv_param[cpu] = NULL;
kfree(iucv_irq_data[cpu]);
@@ -1764,6 +1785,8 @@ static void __exit iucv_exit(void)
spin_unlock_irq(&iucv_queue_lock);
unregister_hotcpu_notifier(&iucv_cpu_notifier);
for_each_possible_cpu(cpu) {
+ kfree(iucv_param_irq[cpu]);
+ iucv_param_irq[cpu] = NULL;
kfree(iucv_param[cpu]);
iucv_param[cpu] = NULL;
kfree(iucv_irq_data[cpu]);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 19c4b4589fe..a5afb79dab6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1397,7 +1397,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
* mac80211. That also explains the __skb_push()
* below.
*/
- align = (unsigned long)skb->data & 4;
+ align = (unsigned long)skb->data & 3;
if (align) {
if (WARN_ON(skb_headroom(skb) < 3)) {
dev_kfree_skb(skb);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0ea36e0c8a0..f13fc57e1ec 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -988,7 +988,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
{
struct nf_conntrack_helper *helper;
struct nf_conn_help *help = nfct_help(ct);
- char *helpname;
+ char *helpname = NULL;
int err;
/* don't change helper of sibling connections */
@@ -1231,7 +1231,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
rcu_read_lock();
if (cda[CTA_HELP]) {
- char *helpname;
+ char *helpname = NULL;
err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
if (err < 0)
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index 834c6eb7f48..c0519139679 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -256,13 +256,11 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
{
struct netlbl_af4list *entry;
- entry = netlbl_af4list_search(addr, head);
- if (entry != NULL && entry->addr == addr && entry->mask == mask) {
- netlbl_af4list_remove_entry(entry);
- return entry;
- }
-
- return NULL;
+ entry = netlbl_af4list_search_exact(addr, mask, head);
+ if (entry == NULL)
+ return NULL;
+ netlbl_af4list_remove_entry(entry);
+ return entry;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -299,15 +297,11 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr,
{
struct netlbl_af6list *entry;
- entry = netlbl_af6list_search(addr, head);
- if (entry != NULL &&
- ipv6_addr_equal(&entry->addr, addr) &&
- ipv6_addr_equal(&entry->mask, mask)) {
- netlbl_af6list_remove_entry(entry);
- return entry;
- }
-
- return NULL;
+ entry = netlbl_af6list_search_exact(addr, mask, head);
+ if (entry == NULL)
+ return NULL;
+ netlbl_af6list_remove_entry(entry);
+ return entry;
}
#endif /* IPv6 */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4e705f87969..3be0e016ab7 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1084,8 +1084,10 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Build a packet - the conventional user limit is 236 bytes. We can
do ludicrously large NetROM frames but must not overflow */
- if (len > 65536)
- return -EMSGSIZE;
+ if (len > 65536) {
+ err = -EMSGSIZE;
+ goto out;
+ }
SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;