summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sun/sunvnet.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 11:35:36 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 11:35:36 -0800
commit4ba24fef3eb3b142197135223b90ced2f319cd53 (patch)
treea20c125b27740ec7b4c761b11d801108e1b316b2 /drivers/net/ethernet/sun/sunvnet.c
parent47c1ffb2b6b630894e9a16442611c056ab21c057 (diff)
parent98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff)
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'drivers/net/ethernet/sun/sunvnet.c')
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1127
1 files changed, 918 insertions, 209 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index f67539650c3..d2835bf7b4f 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -15,6 +15,16 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/icmpv6.h>
+#endif
+
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/route.h>
#include <asm/vio.h>
#include <asm/ldc.h>
@@ -32,13 +42,20 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
+#define VNET_MAX_TXQS 16
+
/* Heuristic for the number of times to exponentially backoff and
* retry sending an LDC trigger when EAGAIN is encountered
*/
#define VNET_MAX_RETRIES 10
+static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
+
/* Ordered from largest major to lowest */
static struct vio_version vnet_versions[] = {
+ { .major = 1, .minor = 8 },
+ { .major = 1, .minor = 7 },
+ { .major = 1, .minor = 6 },
{ .major = 1, .minor = 0 },
};
@@ -60,31 +77,67 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg)
return -ECONNRESET;
}
+static int vnet_port_alloc_tx_ring(struct vnet_port *port);
+
static int vnet_send_attr(struct vio_driver_state *vio)
{
struct vnet_port *port = to_vnet_port(vio);
struct net_device *dev = port->vp->dev;
struct vio_net_attr_info pkt;
- int i;
+ int framelen = ETH_FRAME_LEN;
+ int i, err;
+
+ err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
+ if (err)
+ return err;
memset(&pkt, 0, sizeof(pkt));
pkt.tag.type = VIO_TYPE_CTRL;
pkt.tag.stype = VIO_SUBTYPE_INFO;
pkt.tag.stype_env = VIO_ATTR_INFO;
pkt.tag.sid = vio_send_sid(vio);
- pkt.xfer_mode = VIO_DRING_MODE;
+ if (vio_version_before(vio, 1, 2))
+ pkt.xfer_mode = VIO_DRING_MODE;
+ else
+ pkt.xfer_mode = VIO_NEW_DRING_MODE;
pkt.addr_type = VNET_ADDR_ETHERMAC;
pkt.ack_freq = 0;
for (i = 0; i < 6; i++)
pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
- pkt.mtu = ETH_FRAME_LEN;
+ if (vio_version_after(vio, 1, 3)) {
+ if (port->rmtu) {
+ port->rmtu = min(VNET_MAXPACKET, port->rmtu);
+ pkt.mtu = port->rmtu;
+ } else {
+ port->rmtu = VNET_MAXPACKET;
+ pkt.mtu = port->rmtu;
+ }
+ if (vio_version_after_eq(vio, 1, 6))
+ pkt.options = VIO_TX_DRING;
+ } else if (vio_version_before(vio, 1, 3)) {
+ pkt.mtu = framelen;
+ } else { /* v1.3 */
+ pkt.mtu = framelen + VLAN_HLEN;
+ }
+
+ pkt.cflags = 0;
+ if (vio_version_after_eq(vio, 1, 7) && port->tso) {
+ pkt.cflags |= VNET_LSO_IPV4_CAPAB;
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ pkt.ipv4_lso_maxlen = port->tsolen;
+ }
+
+ pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
- "ackfreq[%u] mtu[%llu]\n",
+ "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
+ "cflags[0x%04x] lso_max[%u]\n",
pkt.xfer_mode, pkt.addr_type,
- (unsigned long long) pkt.addr,
- pkt.ack_freq,
- (unsigned long long) pkt.mtu);
+ (unsigned long long)pkt.addr,
+ pkt.ack_freq, pkt.plnk_updt, pkt.options,
+ (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
+
return vio_ldc_send(vio, &pkt, sizeof(pkt));
}
@@ -92,18 +145,72 @@ static int vnet_send_attr(struct vio_driver_state *vio)
static int handle_attr_info(struct vio_driver_state *vio,
struct vio_net_attr_info *pkt)
{
- viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
- "ackfreq[%u] mtu[%llu]\n",
+ struct vnet_port *port = to_vnet_port(vio);
+ u64 localmtu;
+ u8 xfer_mode;
+
+ viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
+ "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
+ " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
pkt->xfer_mode, pkt->addr_type,
- (unsigned long long) pkt->addr,
- pkt->ack_freq,
- (unsigned long long) pkt->mtu);
+ (unsigned long long)pkt->addr,
+ pkt->ack_freq, pkt->plnk_updt, pkt->options,
+ (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
+ pkt->ipv4_lso_maxlen);
pkt->tag.sid = vio_send_sid(vio);
- if (pkt->xfer_mode != VIO_DRING_MODE ||
+ xfer_mode = pkt->xfer_mode;
+ /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
+ if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
+ xfer_mode = VIO_NEW_DRING_MODE;
+
+ /* MTU negotiation:
+ * < v1.3 - ETH_FRAME_LEN exactly
+ * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
+ * pkt->mtu for ACK
+ * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
+ */
+ if (vio_version_before(vio, 1, 3)) {
+ localmtu = ETH_FRAME_LEN;
+ } else if (vio_version_after(vio, 1, 3)) {
+ localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
+ localmtu = min(pkt->mtu, localmtu);
+ pkt->mtu = localmtu;
+ } else { /* v1.3 */
+ localmtu = ETH_FRAME_LEN + VLAN_HLEN;
+ }
+ port->rmtu = localmtu;
+
+ /* LSO negotiation */
+ if (vio_version_after_eq(vio, 1, 7))
+ port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
+ else
+ port->tso = false;
+ if (port->tso) {
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
+ if (port->tsolen < VNET_MINTSO) {
+ port->tso = false;
+ port->tsolen = 0;
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ }
+ pkt->ipv4_lso_maxlen = port->tsolen;
+ } else {
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ pkt->ipv4_lso_maxlen = 0;
+ }
+
+ /* for version >= 1.6, ACK packet mode we support */
+ if (vio_version_after_eq(vio, 1, 6)) {
+ pkt->xfer_mode = VIO_NEW_DRING_MODE;
+ pkt->options = VIO_TX_DRING;
+ }
+
+ if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
pkt->addr_type != VNET_ADDR_ETHERMAC ||
- pkt->mtu != ETH_FRAME_LEN) {
+ pkt->mtu != localmtu) {
viodbg(HS, "SEND NET ATTR NACK\n");
pkt->tag.stype = VIO_SUBTYPE_NACK;
@@ -112,7 +219,14 @@ static int handle_attr_info(struct vio_driver_state *vio,
return -ECONNRESET;
} else {
- viodbg(HS, "SEND NET ATTR ACK\n");
+ viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
+ "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
+ "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
+ pkt->xfer_mode, pkt->addr_type,
+ (unsigned long long)pkt->addr,
+ pkt->ack_freq, pkt->plnk_updt, pkt->options,
+ (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
+ pkt->ipv4_lso_maxlen);
pkt->tag.stype = VIO_SUBTYPE_ACK;
@@ -199,16 +313,48 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
return skb;
}
-static int vnet_rx_one(struct vnet_port *port, unsigned int len,
- struct ldc_trans_cookie *cookies, int ncookies)
+static inline void vnet_fullcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ int offset = skb_transport_offset(skb);
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+ if (iph->protocol != IPPROTO_TCP &&
+ iph->protocol != IPPROTO_UDP)
+ return;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_level = 1;
+ skb->csum = 0;
+ if (iph->protocol == IPPROTO_TCP) {
+ struct tcphdr *ptcp = tcp_hdr(skb);
+
+ ptcp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ struct udphdr *pudp = udp_hdr(skb);
+
+ pudp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_UDP,
+ skb->csum);
+ }
+}
+
+static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
{
struct net_device *dev = port->vp->dev;
+ unsigned int len = desc->size;
unsigned int copy_len;
struct sk_buff *skb;
int err;
err = -EMSGSIZE;
- if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
+ if (unlikely(len < ETH_ZLEN || len > port->rmtu)) {
dev->stats.rx_length_errors++;
goto out_dropped;
}
@@ -224,7 +370,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
skb_put(skb, copy_len);
err = ldc_copy(port->vio.lp, LDC_COPY_IN,
skb->data, copy_len, 0,
- cookies, ncookies);
+ desc->cookies, desc->ncookies);
if (unlikely(err < 0)) {
dev->stats.rx_frame_errors++;
goto out_free_skb;
@@ -234,11 +380,33 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
skb_trim(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ if (vio_version_after_eq(&port->vio, 1, 8)) {
+ struct vio_net_dext *dext = vio_net_ext(desc);
- netif_rx(skb);
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
+ if (skb->protocol == ETH_P_IP) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ iph->check = 0;
+ ip_send_check(iph);
+ }
+ }
+ if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
+ skb->ip_summed == CHECKSUM_NONE)
+ vnet_fullcsum(skb);
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_level = 0;
+ if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
+ skb->csum_level = 1;
+ }
+ }
+
+ skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ napi_gro_receive(&port->napi, skb);
return 0;
out_free_skb:
@@ -283,28 +451,19 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
port->raddr[0], port->raddr[1],
port->raddr[2], port->raddr[3],
port->raddr[4], port->raddr[5]);
- err = -ECONNRESET;
+ break;
}
} while (err == -EAGAIN);
- return err;
-}
-
-static u32 next_idx(u32 idx, struct vio_dring_state *dr)
-{
- if (++idx == dr->num_entries)
- idx = 0;
- return idx;
-}
-
-static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
-{
- if (idx == 0)
- idx = dr->num_entries - 1;
- else
- idx--;
+ if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
+ port->stop_rx_idx = end;
+ port->stop_rx = true;
+ } else {
+ port->stop_rx_idx = 0;
+ port->stop_rx = false;
+ }
- return idx;
+ return err;
}
static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
@@ -347,6 +506,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
struct vio_driver_state *vio = &port->vio;
int err;
+ BUG_ON(desc == NULL);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -361,7 +521,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
desc->cookies[0].cookie_addr,
desc->cookies[0].cookie_size);
- err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
+ err = vnet_rx_one(port, desc);
if (err == -ECONNRESET)
return err;
desc->hdr.state = VIO_DESC_DONE;
@@ -373,12 +533,14 @@ static int vnet_walk_rx_one(struct vnet_port *port,
}
static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
- u32 start, u32 end)
+ u32 start, u32 end, int *npkts, int budget)
{
struct vio_driver_state *vio = &port->vio;
int ack_start = -1, ack_end = -1;
+ bool send_ack = true;
- end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
+ end = (end == (u32) -1) ? vio_dring_prev(dr, start)
+ : vio_dring_next(dr, end);
viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
@@ -388,10 +550,11 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
return err;
if (err != 0)
break;
+ (*npkts)++;
if (ack_start == -1)
ack_start = start;
ack_end = start;
- start = next_idx(start, dr);
+ start = vio_dring_next(dr, start);
if (ack && start != end) {
err = vnet_send_ack(port, dr, ack_start, ack_end,
VIO_DRING_ACTIVE);
@@ -399,13 +562,26 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
return err;
ack_start = -1;
}
+ if ((*npkts) >= budget) {
+ send_ack = false;
+ break;
+ }
}
if (unlikely(ack_start == -1))
- ack_start = ack_end = prev_idx(start, dr);
- return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
+ ack_start = ack_end = vio_dring_prev(dr, start);
+ if (send_ack) {
+ port->napi_resume = false;
+ return vnet_send_ack(port, dr, ack_start, ack_end,
+ VIO_DRING_STOPPED);
+ } else {
+ port->napi_resume = true;
+ port->napi_stop_idx = ack_end;
+ return 1;
+ }
}
-static int vnet_rx(struct vnet_port *port, void *msgbuf)
+static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
+ int budget)
{
struct vio_dring_data *pkt = msgbuf;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
@@ -422,11 +598,13 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf)
return 0;
}
- dr->rcv_nxt++;
+ if (!port->napi_resume)
+ dr->rcv_nxt++;
/* XXX Validate pkt->start_idx and pkt->end_idx XXX */
- return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
+ return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
+ npkts, budget);
}
static int idx_is_pending(struct vio_dring_state *dr, u32 end)
@@ -439,7 +617,7 @@ static int idx_is_pending(struct vio_dring_state *dr, u32 end)
found = 1;
break;
}
- idx = next_idx(idx, dr);
+ idx = vio_dring_next(dr, idx);
}
return found;
}
@@ -451,19 +629,42 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
struct net_device *dev;
struct vnet *vp;
u32 end;
+ struct vio_net_desc *desc;
+ struct netdev_queue *txq;
if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
return 0;
end = pkt->end_idx;
- if (unlikely(!idx_is_pending(dr, end)))
+ vp = port->vp;
+ dev = vp->dev;
+ netif_tx_lock(dev);
+ if (unlikely(!idx_is_pending(dr, end))) {
+ netif_tx_unlock(dev);
return 0;
+ }
- dr->cons = next_idx(end, dr);
+ /* sync for race conditions with vnet_start_xmit() and tell xmit it
+ * is time to send a trigger.
+ */
+ dr->cons = vio_dring_next(dr, end);
+ desc = vio_dring_entry(dr, dr->cons);
+ if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
+ /* vnet_start_xmit() just populated this dring but missed
+ * sending the "start" LDC message to the consumer.
+ * Send a "start" trigger on its behalf.
+ */
+ if (__vnet_tx_trigger(port, dr->cons) > 0)
+ port->start_cons = false;
+ else
+ port->start_cons = true;
+ } else {
+ port->start_cons = true;
+ }
+ netif_tx_unlock(dev);
- vp = port->vp;
- dev = vp->dev;
- if (unlikely(netif_queue_stopped(dev) &&
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ if (unlikely(netif_tx_queue_stopped(txq) &&
vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
return 1;
@@ -491,56 +692,64 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
return 0;
}
-static void maybe_tx_wakeup(unsigned long param)
+/* Got back a STOPPED LDC message on port. If the queue is stopped,
+ * wake it up so that we'll send out another START message at the
+ * next TX.
+ */
+static void maybe_tx_wakeup(struct vnet_port *port)
{
- struct vnet *vp = (struct vnet *)param;
- struct net_device *dev = vp->dev;
-
- netif_tx_lock(dev);
- if (likely(netif_queue_stopped(dev))) {
- struct vnet_port *port;
- int wake = 1;
+ struct netdev_queue *txq;
- list_for_each_entry(port, &vp->port_list, list) {
- struct vio_dring_state *dr;
+ txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
+ __netif_tx_lock(txq, smp_processor_id());
+ if (likely(netif_tx_queue_stopped(txq))) {
+ struct vio_dring_state *dr;
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (vnet_tx_dring_avail(dr) <
- VNET_TX_WAKEUP_THRESH(dr)) {
- wake = 0;
- break;
- }
- }
- if (wake)
- netif_wake_queue(dev);
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ netif_tx_wake_queue(txq);
}
- netif_tx_unlock(dev);
+ __netif_tx_unlock(txq);
}
-static void vnet_event(void *arg, int event)
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+ struct vio_driver_state *vio = &vnet->vio;
+
+ return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
+static int vnet_event_napi(struct vnet_port *port, int budget)
{
- struct vnet_port *port = arg;
struct vio_driver_state *vio = &port->vio;
- unsigned long flags;
int tx_wakeup, err;
+ int npkts = 0;
+ int event = (port->rx_event & LDC_EVENT_RESET);
- spin_lock_irqsave(&vio->lock, flags);
-
+ldc_ctrl:
if (unlikely(event == LDC_EVENT_RESET ||
event == LDC_EVENT_UP)) {
vio_link_state_change(vio, event);
- spin_unlock_irqrestore(&vio->lock, flags);
- if (event == LDC_EVENT_RESET)
+ if (event == LDC_EVENT_RESET) {
+ port->rmtu = 0;
+ port->tso = true;
+ port->tsolen = 0;
vio_port_up(vio);
- return;
+ }
+ port->rx_event = 0;
+ return 0;
}
+ /* We may have multiple LDC events in rx_event. Unroll send_events() */
+ event = (port->rx_event & LDC_EVENT_UP);
+ port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
+ if (event == LDC_EVENT_UP)
+ goto ldc_ctrl;
+ event = port->rx_event;
+ if (!(event & LDC_EVENT_DATA_READY))
+ return 0;
- if (unlikely(event != LDC_EVENT_DATA_READY)) {
- pr_warning("Unexpected LDC event %d\n", event);
- spin_unlock_irqrestore(&vio->lock, flags);
- return;
- }
+ /* we dont expect any other bits than RESET, UP, DATA_READY */
+ BUG_ON(event != LDC_EVENT_DATA_READY);
tx_wakeup = err = 0;
while (1) {
@@ -549,6 +758,20 @@ static void vnet_event(void *arg, int event)
u64 raw[8];
} msgbuf;
+ if (port->napi_resume) {
+ struct vio_dring_data *pkt =
+ (struct vio_dring_data *)&msgbuf;
+ struct vio_dring_state *dr =
+ &port->vio.drings[VIO_DRIVER_RX_RING];
+
+ pkt->tag.type = VIO_TYPE_DATA;
+ pkt->tag.stype = VIO_SUBTYPE_INFO;
+ pkt->tag.stype_env = VIO_DRING_DATA;
+ pkt->seq = dr->rcv_nxt;
+ pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
+ pkt->end_idx = -1;
+ goto napi_resume;
+ }
err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
if (unlikely(err < 0)) {
if (err == -ECONNRESET)
@@ -565,10 +788,22 @@ static void vnet_event(void *arg, int event)
err = vio_validate_sid(vio, &msgbuf.tag);
if (err < 0)
break;
-
+napi_resume:
if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
- err = vnet_rx(port, &msgbuf);
+ if (!port_is_up(port)) {
+ /* failures like handshake_failure()
+ * may have cleaned up dring, but
+ * NAPI polling may bring us here.
+ */
+ err = -ECONNRESET;
+ break;
+ }
+ err = vnet_rx(port, &msgbuf, &npkts, budget);
+ if (npkts >= budget)
+ break;
+ if (npkts == 0)
+ break;
} else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
err = vnet_ack(port, &msgbuf);
if (err > 0)
@@ -589,18 +824,37 @@ static void vnet_event(void *arg, int event)
if (err == -ECONNRESET)
break;
}
- spin_unlock(&vio->lock);
- /* Kick off a tasklet to wake the queue. We cannot call
- * maybe_tx_wakeup directly here because we could deadlock on
- * netif_tx_lock() with dev_watchdog()
- */
if (unlikely(tx_wakeup && err != -ECONNRESET))
- tasklet_schedule(&port->vp->vnet_tx_wakeup);
+ maybe_tx_wakeup(port);
+ return npkts;
+}
- local_irq_restore(flags);
+static int vnet_poll(struct napi_struct *napi, int budget)
+{
+ struct vnet_port *port = container_of(napi, struct vnet_port, napi);
+ struct vio_driver_state *vio = &port->vio;
+ int processed = vnet_event_napi(port, budget);
+
+ if (processed < budget) {
+ napi_complete(napi);
+ port->rx_event &= ~LDC_EVENT_DATA_READY;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
+ }
+ return processed;
}
-static int __vnet_tx_trigger(struct vnet_port *port)
+static void vnet_event(void *arg, int event)
+{
+ struct vnet_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+
+ port->rx_event |= event;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
+ napi_schedule(&port->napi);
+
+}
+
+static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
{
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct vio_dring_data hdr = {
@@ -611,12 +865,21 @@ static int __vnet_tx_trigger(struct vnet_port *port)
.sid = vio_send_sid(&port->vio),
},
.dring_ident = dr->ident,
- .start_idx = dr->prod,
+ .start_idx = start,
.end_idx = (u32) -1,
};
int err, delay;
int retries = 0;
+ if (port->stop_rx) {
+ err = vnet_send_ack(port,
+ &port->vio.drings[VIO_DRIVER_RX_RING],
+ port->stop_rx_idx, -1,
+ VIO_DRING_STOPPED);
+ if (err <= 0)
+ return err;
+ }
+
hdr.seq = dr->snd_nxt;
delay = 1;
do {
@@ -635,26 +898,19 @@ static int __vnet_tx_trigger(struct vnet_port *port)
return err;
}
-static inline bool port_is_up(struct vnet_port *vnet)
-{
- struct vio_driver_state *vio = &vnet->vio;
-
- return !!(vio->hs_state & VIO_HS_COMPLETE);
-}
-
struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
struct hlist_head *hp = &vp->port_hash[hash];
struct vnet_port *port;
- hlist_for_each_entry(port, hp, hash) {
+ hlist_for_each_entry_rcu(port, hp, hash) {
if (!port_is_up(port))
continue;
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
- list_for_each_entry(port, &vp->port_list, list) {
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
if (!port->switch_port)
continue;
if (!port_is_up(port))
@@ -664,58 +920,432 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
return NULL;
}
-struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
+static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
+ unsigned *pending)
{
- struct vnet_port *ret;
- unsigned long flags;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct sk_buff *skb = NULL;
+ int i, txi;
- spin_lock_irqsave(&vp->lock, flags);
- ret = __tx_port_find(vp, skb);
- spin_unlock_irqrestore(&vp->lock, flags);
+ *pending = 0;
+
+ txi = dr->prod-1;
+ if (txi < 0)
+ txi = VNET_TX_RING_SIZE-1;
+
+ for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
+ struct vio_net_desc *d;
+
+ d = vio_dring_entry(dr, txi);
+
+ if (d->hdr.state == VIO_DESC_DONE) {
+ if (port->tx_bufs[txi].skb) {
+ BUG_ON(port->tx_bufs[txi].skb->next);
+
+ port->tx_bufs[txi].skb->next = skb;
+ skb = port->tx_bufs[txi].skb;
+ port->tx_bufs[txi].skb = NULL;
+
+ ldc_unmap(port->vio.lp,
+ port->tx_bufs[txi].cookies,
+ port->tx_bufs[txi].ncookies);
+ }
+ d->hdr.state = VIO_DESC_FREE;
+ } else if (d->hdr.state == VIO_DESC_READY) {
+ (*pending)++;
+ } else if (d->hdr.state == VIO_DESC_FREE) {
+ break;
+ }
+ --txi;
+ if (txi < 0)
+ txi = VNET_TX_RING_SIZE-1;
+ }
+ return skb;
+}
+
+static inline void vnet_free_skbs(struct sk_buff *skb)
+{
+ struct sk_buff *next;
+
+ while (skb) {
+ next = skb->next;
+ skb->next = NULL;
+ dev_kfree_skb(skb);
+ skb = next;
+ }
+}
+
+static void vnet_clean_timer_expire(unsigned long port0)
+{
+ struct vnet_port *port = (struct vnet_port *)port0;
+ struct sk_buff *freeskbs;
+ unsigned pending;
+
+ netif_tx_lock(port->vp->dev);
+ freeskbs = vnet_clean_tx_ring(port, &pending);
+ netif_tx_unlock(port->vp->dev);
+
+ vnet_free_skbs(freeskbs);
- return ret;
+ if (pending)
+ (void)mod_timer(&port->clean_timer,
+ jiffies + VNET_CLEAN_TIMEOUT);
+ else
+ del_timer(&port->clean_timer);
+}
+
+static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
+ struct ldc_trans_cookie *cookies, int ncookies,
+ unsigned int map_perm)
+{
+ int i, nc, err, blen;
+
+ /* header */
+ blen = skb_headlen(skb);
+ if (blen < ETH_ZLEN)
+ blen = ETH_ZLEN;
+ blen += VNET_PACKET_SKIP;
+ blen += 8 - (blen & 7);
+
+ err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
+ ncookies, map_perm);
+ if (err < 0)
+ return err;
+ nc = err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ if (nc < ncookies) {
+ vaddr = kmap_atomic(skb_frag_page(f));
+ blen = skb_frag_size(f);
+ blen += 8 - (blen & 7);
+ err = ldc_map_single(lp, vaddr + f->page_offset,
+ blen, cookies + nc, ncookies - nc,
+ map_perm);
+ kunmap_atomic(vaddr);
+ } else {
+ err = -EMSGSIZE;
+ }
+
+ if (err < 0) {
+ ldc_unmap(lp, cookies, nc);
+ return err;
+ }
+ nc += err;
+ }
+ return nc;
+}
+
+static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
+{
+ struct sk_buff *nskb;
+ int i, len, pad, docopy;
+
+ len = skb->len;
+ pad = 0;
+ if (len < ETH_ZLEN) {
+ pad += ETH_ZLEN - skb->len;
+ len += pad;
+ }
+ len += VNET_PACKET_SKIP;
+ pad += 8 - (len & 7);
+
+ /* make sure we have enough cookies and alignment in every frag */
+ docopy = skb_shinfo(skb)->nr_frags >= ncookies;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+
+ docopy |= f->page_offset & 7;
+ }
+ if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
+ skb_tailroom(skb) < pad ||
+ skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
+ int start = 0, offset;
+ __wsum csum;
+
+ len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
+ nskb = alloc_and_align_skb(skb->dev, len);
+ if (nskb == NULL) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ skb_reserve(nskb, VNET_PACKET_SKIP);
+
+ nskb->protocol = skb->protocol;
+ offset = skb_mac_header(skb) - skb->data;
+ skb_set_mac_header(nskb, offset);
+ offset = skb_network_header(skb) - skb->data;
+ skb_set_network_header(nskb, offset);
+ offset = skb_transport_header(skb) - skb->data;
+ skb_set_transport_header(nskb, offset);
+
+ offset = 0;
+ nskb->csum_offset = skb->csum_offset;
+ nskb->ip_summed = skb->ip_summed;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ start = skb_checksum_start_offset(skb);
+ if (start) {
+ struct iphdr *iph = ip_hdr(nskb);
+ int offset = start + nskb->csum_offset;
+
+ if (skb_copy_bits(skb, 0, nskb->data, start)) {
+ dev_kfree_skb(nskb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ *(__sum16 *)(skb->data + offset) = 0;
+ csum = skb_copy_and_csum_bits(skb, start,
+ nskb->data + start,
+ skb->len - start, 0);
+ if (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP) {
+ csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - start,
+ iph->protocol, csum);
+ }
+ *(__sum16 *)(nskb->data + offset) = csum;
+
+ nskb->ip_summed = CHECKSUM_NONE;
+ } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
+ dev_kfree_skb(nskb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ (void)skb_put(nskb, skb->len);
+ if (skb_is_gso(skb)) {
+ skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
+ skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+ }
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+ return skb;
+}
+
+static u16
+vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port = __tx_port_find(vp, skb);
+
+ if (port == NULL)
+ return 0;
+ return port->q_index;
+}
+
+static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
+{
+ struct net_device *dev = port->vp->dev;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct sk_buff *segs;
+ int maclen, datalen;
+ int status;
+ int gso_size, gso_type, gso_segs;
+ int hlen = skb_transport_header(skb) - skb_mac_header(skb);
+ int proto = IPPROTO_IP;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ proto = ipv6_hdr(skb)->nexthdr;
+
+ if (proto == IPPROTO_TCP)
+ hlen += tcp_hdr(skb)->doff * 4;
+ else if (proto == IPPROTO_UDP)
+ hlen += sizeof(struct udphdr);
+ else {
+ pr_err("vnet_handle_offloads GSO with unknown transport "
+ "protocol %d tproto %d\n", skb->protocol, proto);
+ hlen = 128; /* XXX */
+ }
+ datalen = port->tsolen - hlen;
+
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_type = skb_shinfo(skb)->gso_type;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (port->tso && gso_size < datalen)
+ gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
+
+ if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ netif_tx_stop_queue(txq);
+ if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
+ return NETDEV_TX_BUSY;
+ netif_tx_wake_queue(txq);
+ }
+
+ maclen = skb_network_header(skb) - skb_mac_header(skb);
+ skb_pull(skb, maclen);
+
+ if (port->tso && gso_size < datalen) {
+ /* segment to TSO size */
+ skb_shinfo(skb)->gso_size = datalen;
+ skb_shinfo(skb)->gso_segs = gso_segs;
+
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+
+ /* restore gso_size & gso_segs */
+ skb_shinfo(skb)->gso_size = gso_size;
+ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen,
+ gso_size);
+ } else
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+ if (IS_ERR(segs)) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ skb_push(skb, maclen);
+ skb_reset_mac_header(skb);
+
+ status = 0;
+ while (segs) {
+ struct sk_buff *curr = segs;
+
+ segs = segs->next;
+ curr->next = NULL;
+ if (port->tso && curr->len > dev->mtu) {
+ skb_shinfo(curr)->gso_size = gso_size;
+ skb_shinfo(curr)->gso_type = gso_type;
+ skb_shinfo(curr)->gso_segs =
+ DIV_ROUND_UP(curr->len - hlen, gso_size);
+ } else
+ skb_shinfo(curr)->gso_size = 0;
+
+ skb_push(curr, maclen);
+ skb_reset_mac_header(curr);
+ memcpy(skb_mac_header(curr), skb_mac_header(skb),
+ maclen);
+ curr->csum_start = skb_transport_header(curr) - curr->head;
+ if (ip_hdr(curr)->protocol == IPPROTO_TCP)
+ curr->csum_offset = offsetof(struct tcphdr, check);
+ else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
+ curr->csum_offset = offsetof(struct udphdr, check);
+
+ if (!(status & NETDEV_TX_MASK))
+ status = vnet_start_xmit(curr, dev);
+ if (status & NETDEV_TX_MASK)
+ dev_kfree_skb_any(curr);
+ }
+
+ if (!(status & NETDEV_TX_MASK))
+ dev_kfree_skb_any(skb);
+ return status;
}
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
- struct vnet_port *port = tx_port_find(vp, skb);
+ struct vnet_port *port = NULL;
struct vio_dring_state *dr;
struct vio_net_desc *d;
- unsigned long flags;
unsigned int len;
- void *tx_buf;
- int i, err;
+ struct sk_buff *freeskbs = NULL;
+ int i, err, txi;
+ unsigned pending = 0;
+ struct netdev_queue *txq;
+
+ rcu_read_lock();
+ port = __tx_port_find(vp, skb);
+ if (unlikely(!port)) {
+ rcu_read_unlock();
+ goto out_dropped;
+ }
+
+ if (skb_is_gso(skb) && skb->len > port->tsolen) {
+ err = vnet_handle_offloads(port, skb);
+ rcu_read_unlock();
+ return err;
+ }
+
+ if (!skb_is_gso(skb) && skb->len > port->rmtu) {
+ unsigned long localmtu = port->rmtu - ETH_HLEN;
+
+ if (vio_version_after_eq(&port->vio, 1, 3))
+ localmtu -= VLAN_HLEN;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct flowi4 fl4;
+ struct rtable *rt = NULL;
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_oif = dev->ifindex;
+ fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+ fl4.daddr = ip_hdr(skb)->daddr;
+ fl4.saddr = ip_hdr(skb)->saddr;
+
+ rt = ip_route_output_key(dev_net(dev), &fl4);
+ rcu_read_unlock();
+ if (!IS_ERR(rt)) {
+ skb_dst_set(skb, &rt->dst);
+ icmp_send(skb, ICMP_DEST_UNREACH,
+ ICMP_FRAG_NEEDED,
+ htonl(localmtu));
+ }
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
+#endif
+ goto out_dropped;
+ }
+
+ skb = vnet_skb_shape(skb, 2);
- if (unlikely(!port))
+ if (unlikely(!skb))
goto out_dropped;
- spin_lock_irqsave(&port->vio.lock, flags);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vnet_fullcsum(skb);
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ i = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, i);
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
dev->stats.tx_errors++;
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ rcu_read_unlock();
return NETDEV_TX_BUSY;
}
d = vio_dring_cur(dr);
- tx_buf = port->tx_bufs[dr->prod].buf;
- skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
+ txi = dr->prod;
+
+ freeskbs = vnet_clean_tx_ring(port, &pending);
+
+ BUG_ON(port->tx_bufs[txi].skb);
len = skb->len;
- if (len < ETH_ZLEN) {
+ if (len < ETH_ZLEN)
len = ETH_ZLEN;
- memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
+
+ err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
+ (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
+ if (err < 0) {
+ netdev_info(dev, "tx buffer map error %d\n", err);
+ goto out_dropped;
}
+ port->tx_bufs[txi].skb = skb;
+ skb = NULL;
+ port->tx_bufs[txi].ncookies = err;
+
/* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
* thus it is safe to not set VIO_ACK_ENABLE for each transmission:
* the protocol itself does not require it as long as the peer
@@ -726,9 +1356,24 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
d->hdr.ack = VIO_ACK_DISABLE;
d->size = len;
- d->ncookies = port->tx_bufs[dr->prod].ncookies;
+ d->ncookies = port->tx_bufs[txi].ncookies;
for (i = 0; i < d->ncookies; i++)
- d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
+ d->cookies[i] = port->tx_bufs[txi].cookies[i];
+ if (vio_version_after_eq(&port->vio, 1, 7)) {
+ struct vio_net_dext *dext = vio_net_ext(d);
+
+ memset(dext, 0, sizeof(*dext));
+ if (skb_is_gso(port->tx_bufs[txi].skb)) {
+ dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
+ ->gso_size;
+ dext->flags |= VNET_PKT_IPV4_LSO;
+ }
+ if (vio_version_after_eq(&port->vio, 1, 8) &&
+ !port->switch_port) {
+ dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
+ dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
+ }
+ }
/* This has to be a non-SMP write barrier because we are writing
* to memory which is shared with the peer LDOM.
@@ -737,35 +1382,68 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
d->hdr.state = VIO_DESC_READY;
- err = __vnet_tx_trigger(port);
+ /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
+ * to notify the consumer that some descriptors are READY.
+ * After that "start" trigger, no additional triggers are needed until
+ * a DRING_STOPPED is received from the consumer. The dr->cons field
+ * (set up by vnet_ack()) has the value of the next dring index
+ * that has not yet been ack-ed. We send a "start" trigger here
+ * if, and only if, start_cons is true (reset it afterward). Conversely,
+ * vnet_ack() should check if the dring corresponding to cons
+ * is marked READY, but start_cons was false.
+ * If so, vnet_ack() should send out the missed "start" trigger.
+ *
+ * Note that the wmb() above makes sure the cookies et al. are
+ * not globally visible before the VIO_DESC_READY, and that the
+ * stores are ordered correctly by the compiler. The consumer will
+ * not proceed until the VIO_DESC_READY is visible assuring that
+ * the consumer does not observe anything related to descriptors
+ * out of order. The HV trap from the LDC start trigger is the
+ * producer to consumer announcement that work is available to the
+ * consumer
+ */
+ if (!port->start_cons)
+ goto ldc_start_done; /* previous trigger suffices */
+
+ err = __vnet_tx_trigger(port, dr->cons);
if (unlikely(err < 0)) {
netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
dev->stats.tx_carrier_errors++;
- goto out_dropped_unlock;
+ goto out_dropped;
}
+ldc_start_done:
+ port->start_cons = false;
+
dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
- if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
- netif_stop_queue(dev);
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
+ netif_tx_stop_queue(txq);
if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
- netif_wake_queue(dev);
+ netif_tx_wake_queue(txq);
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
+ rcu_read_unlock();
- dev_kfree_skb(skb);
+ vnet_free_skbs(freeskbs);
return NETDEV_TX_OK;
-out_dropped_unlock:
- spin_unlock_irqrestore(&port->vio.lock, flags);
-
out_dropped:
- dev_kfree_skb(skb);
+ if (pending)
+ (void)mod_timer(&port->clean_timer,
+ jiffies + VNET_CLEAN_TIMEOUT);
+ else if (port)
+ del_timer(&port->clean_timer);
+ if (port)
+ rcu_read_unlock();
+ if (skb)
+ dev_kfree_skb(skb);
+ vnet_free_skbs(freeskbs);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
@@ -778,14 +1456,14 @@ static void vnet_tx_timeout(struct net_device *dev)
static int vnet_open(struct net_device *dev)
{
netif_carrier_on(dev);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
static int vnet_close(struct net_device *dev)
{
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
return 0;
@@ -895,23 +1573,22 @@ static void vnet_set_rx_mode(struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port;
- unsigned long flags;
- spin_lock_irqsave(&vp->lock, flags);
- if (!list_empty(&vp->port_list)) {
- port = list_entry(vp->port_list.next, struct vnet_port, list);
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
if (port->switch_port) {
__update_mc_list(vp, dev);
__send_mc_list(vp, port);
+ break;
}
}
- spin_unlock_irqrestore(&vp->lock, flags);
+ rcu_read_unlock();
}
static int vnet_change_mtu(struct net_device *dev, int new_mtu)
{
- if (new_mtu != ETH_DATA_LEN)
+ if (new_mtu < 68 || new_mtu > 65535)
return -EINVAL;
dev->mtu = new_mtu;
@@ -967,60 +1644,39 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
}
for (i = 0; i < VNET_TX_RING_SIZE; i++) {
- void *buf = port->tx_bufs[i].buf;
+ struct vio_net_desc *d;
+ void *skb = port->tx_bufs[i].skb;
- if (!buf)
+ if (!skb)
continue;
+ d = vio_dring_entry(dr, i);
+ if (d->hdr.state == VIO_DESC_READY)
+ pr_warn("active transmit buffers freed\n");
+
ldc_unmap(port->vio.lp,
port->tx_bufs[i].cookies,
port->tx_bufs[i].ncookies);
-
- kfree(buf);
- port->tx_bufs[i].buf = NULL;
+ dev_kfree_skb(skb);
+ port->tx_bufs[i].skb = NULL;
+ d->hdr.state = VIO_DESC_FREE;
}
}
-static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
+static int vnet_port_alloc_tx_ring(struct vnet_port *port)
{
struct vio_dring_state *dr;
- unsigned long len;
+ unsigned long len, elen;
int i, err, ncookies;
void *dring;
- for (i = 0; i < VNET_TX_RING_SIZE; i++) {
- void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
- int map_len = (ETH_FRAME_LEN + 7) & ~7;
-
- err = -ENOMEM;
- if (!buf)
- goto err_out;
-
- err = -EFAULT;
- if ((unsigned long)buf & (8UL - 1)) {
- pr_err("TX buffer misaligned\n");
- kfree(buf);
- goto err_out;
- }
-
- err = ldc_map_single(port->vio.lp, buf, map_len,
- port->tx_bufs[i].cookies, 2,
- (LDC_MAP_SHADOW |
- LDC_MAP_DIRECT |
- LDC_MAP_RW));
- if (err < 0) {
- kfree(buf);
- goto err_out;
- }
- port->tx_bufs[i].buf = buf;
- port->tx_bufs[i].ncookies = err;
- }
-
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- len = (VNET_TX_RING_SIZE *
- (sizeof(struct vio_net_desc) +
- (sizeof(struct ldc_trans_cookie) * 2)));
+ elen = sizeof(struct vio_net_desc) +
+ sizeof(struct ldc_trans_cookie) * 2;
+ if (vio_version_after_eq(&port->vio, 1, 7))
+ elen += sizeof(struct vio_net_dext);
+ len = VNET_TX_RING_SIZE * elen;
ncookies = VIO_MAX_RING_COOKIES;
dring = ldc_alloc_exp_dring(port->vio.lp, len,
@@ -1034,13 +1690,19 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
}
dr->base = dring;
- dr->entry_size = (sizeof(struct vio_net_desc) +
- (sizeof(struct ldc_trans_cookie) * 2));
+ dr->entry_size = elen;
dr->num_entries = VNET_TX_RING_SIZE;
dr->prod = dr->cons = 0;
+ port->start_cons = true; /* need an initial trigger */
dr->pending = VNET_TX_RING_SIZE;
dr->ncookies = ncookies;
+ for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
+ struct vio_net_desc *d;
+
+ d = vio_dring_entry(dr, i);
+ d->hdr.state = VIO_DESC_FREE;
+ }
return 0;
err_out:
@@ -1049,6 +1711,21 @@ err_out:
return err;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vnet_poll_controller(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (!list_empty(&vp->port_list)) {
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+ napi_schedule(&port->napi);
+ }
+ spin_unlock_irqrestore(&vp->lock, flags);
+}
+#endif
static LIST_HEAD(vnet_list);
static DEFINE_MUTEX(vnet_list_mutex);
@@ -1061,6 +1738,10 @@ static const struct net_device_ops vnet_ops = {
.ndo_tx_timeout = vnet_tx_timeout,
.ndo_change_mtu = vnet_change_mtu,
.ndo_start_xmit = vnet_start_xmit,
+ .ndo_select_queue = vnet_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vnet_poll_controller,
+#endif
};
static struct vnet *vnet_new(const u64 *local_mac)
@@ -1069,9 +1750,11 @@ static struct vnet *vnet_new(const u64 *local_mac)
struct vnet *vp;
int err, i;
- dev = alloc_etherdev(sizeof(*vp));
+ dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
if (!dev)
return ERR_PTR(-ENOMEM);
+ dev->needed_headroom = VNET_PACKET_SKIP + 8;
+ dev->needed_tailroom = 8;
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
@@ -1079,7 +1762,6 @@ static struct vnet *vnet_new(const u64 *local_mac)
vp = netdev_priv(dev);
spin_lock_init(&vp->lock);
- tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp);
vp->dev = dev;
INIT_LIST_HEAD(&vp->port_list);
@@ -1092,6 +1774,10 @@ static struct vnet *vnet_new(const u64 *local_mac)
dev->ethtool_ops = &vnet_ethtool_ops;
dev->watchdog_timeo = VNET_TX_TIMEOUT;
+ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->features = dev->hw_features;
+
err = register_netdev(dev);
if (err) {
pr_err("Cannot register net device, aborting\n");
@@ -1139,7 +1825,6 @@ static void vnet_cleanup(void)
vp = list_first_entry(&vnet_list, struct vnet, list);
list_del(&vp->list);
dev = vp->dev;
- tasklet_kill(&vp->vnet_tx_wakeup);
/* vio_unregister_driver() should have cleaned up port_list */
BUG_ON(!list_empty(&vp->port_list));
unregister_netdev(dev);
@@ -1194,6 +1879,25 @@ static void print_version(void)
const char *remote_macaddr_prop = "remote-mac-address";
+static void
+vnet_port_add_txq(struct vnet_port *port)
+{
+ struct vnet *vp = port->vp;
+ int n;
+
+ n = vp->nports++;
+ n = n & (VNET_MAX_TXQS - 1);
+ port->q_index = n;
+ netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
+}
+
+static void
+vnet_port_rm_txq(struct vnet_port *port)
+{
+ port->vp->nports--;
+ netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
+}
+
static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
@@ -1241,9 +1945,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_port;
- err = vnet_port_alloc_tx_bufs(port);
- if (err)
- goto err_out_free_ldc;
+ netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
INIT_HLIST_NODE(&port->hash);
INIT_LIST_HEAD(&port->list);
@@ -1252,13 +1954,17 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
switch_port = 1;
port->switch_port = switch_port;
+ port->tso = true;
+ port->tsolen = 0;
spin_lock_irqsave(&vp->lock, flags);
if (switch_port)
- list_add(&port->list, &vp->port_list);
+ list_add_rcu(&port->list, &vp->port_list);
else
- list_add_tail(&port->list, &vp->port_list);
- hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
+ list_add_tail_rcu(&port->list, &vp->port_list);
+ hlist_add_head_rcu(&port->hash,
+ &vp->port_hash[vnet_hashfn(port->raddr)]);
+ vnet_port_add_txq(port);
spin_unlock_irqrestore(&vp->lock, flags);
dev_set_drvdata(&vdev->dev, port);
@@ -1266,15 +1972,16 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
pr_info("%s: PORT ( remote-mac %pM%s )\n",
vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
+ setup_timer(&port->clean_timer, vnet_clean_timer_expire,
+ (unsigned long)port);
+
+ napi_enable(&port->napi);
vio_port_up(&port->vio);
mdesc_release(hp);
return 0;
-err_out_free_ldc:
- vio_ldc_free(&port->vio);
-
err_out_free_port:
kfree(port);
@@ -1288,16 +1995,18 @@ static int vnet_port_remove(struct vio_dev *vdev)
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- struct vnet *vp = port->vp;
- unsigned long flags;
del_timer_sync(&port->vio.timer);
- spin_lock_irqsave(&vp->lock, flags);
- list_del(&port->list);
- hlist_del(&port->hash);
- spin_unlock_irqrestore(&vp->lock, flags);
+ napi_disable(&port->napi);
+
+ list_del_rcu(&port->list);
+ hlist_del_rcu(&port->hash);
+ synchronize_rcu();
+ del_timer_sync(&port->clean_timer);
+ vnet_port_rm_txq(port);
+ netif_napi_del(&port->napi);
vnet_port_free_tx_bufs(port);
vio_ldc_free(&port->vio);