summaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c169
1 files changed, 136 insertions, 33 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 555b70c8b86..4452306d532 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -41,9 +41,19 @@ struct virtnet_info
struct net_device *dev;
struct napi_struct napi;
+ /* The skb we couldn't send because buffers were full. */
+ struct sk_buff *last_xmit_skb;
+
+ /* If we need to free in a timer, this is it. */
+ struct timer_list xmit_free_timer;
+
/* Number of input buffers, and max we've ever had. */
unsigned int num, max;
+ /* For cleaning up after transmission. */
+ struct tasklet_struct tasklet;
+ bool free_in_tasklet;
+
/* Receive & send queues. */
struct sk_buff_head recv;
struct sk_buff_head send;
@@ -65,8 +75,13 @@ static void skb_xmit_done(struct virtqueue *svq)
/* Suppress further interrupts. */
svq->vq_ops->disable_cb(svq);
- /* We were waiting for more output buffers. */
+
+ /* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
+
+ /* Make sure we re-xmit last_xmit_skb: if there are no more packets
+ * queued, start_xmit won't be called. */
+ tasklet_schedule(&vi->tasklet);
}
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
@@ -83,9 +98,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
BUG_ON(len > MAX_PACKET_LEN);
skb_trim(skb, len);
- skb->protocol = eth_type_trans(skb, dev);
- pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
- ntohs(skb->protocol), skb->len, skb->pkt_type);
+
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
@@ -95,6 +108,10 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
goto frame_err;
}
+ skb->protocol = eth_type_trans(skb, dev);
+ pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
+ ntohs(skb->protocol), skb->len, skb->pkt_type);
+
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -142,10 +159,10 @@ drop:
static void try_fill_recv(struct virtnet_info *vi)
{
struct sk_buff *skb;
- struct scatterlist sg[1+MAX_SKB_FRAGS];
+ struct scatterlist sg[2+MAX_SKB_FRAGS];
int num, err;
- sg_init_table(sg, 1+MAX_SKB_FRAGS);
+ sg_init_table(sg, 2+MAX_SKB_FRAGS);
for (;;) {
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
if (unlikely(!skb))
@@ -221,23 +238,38 @@ static void free_old_xmit_skbs(struct virtnet_info *vi)
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
__skb_unlink(skb, &vi->send);
- vi->dev->stats.tx_bytes += len;
+ vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
kfree_skb(skb);
}
}
-static int start_xmit(struct sk_buff *skb, struct net_device *dev)
+/* If the virtio transport doesn't always notify us when all in-flight packets
+ * are consumed, we fall back to using this function on a timer to free them. */
+static void xmit_free(unsigned long data)
+{
+ struct virtnet_info *vi = (void *)data;
+
+ netif_tx_lock(vi->dev);
+
+ free_old_xmit_skbs(vi);
+
+ if (!skb_queue_empty(&vi->send))
+ mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
+
+ netif_tx_unlock(vi->dev);
+}
+
+static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
- struct virtnet_info *vi = netdev_priv(dev);
int num, err;
- struct scatterlist sg[1+MAX_SKB_FRAGS];
+ struct scatterlist sg[2+MAX_SKB_FRAGS];
struct virtio_net_hdr *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
- sg_init_table(sg, 1+MAX_SKB_FRAGS);
+ sg_init_table(sg, 2+MAX_SKB_FRAGS);
- pr_debug("%s: xmit %p " MAC_FMT "\n", dev->name, skb,
+ pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb,
dest[0], dest[1], dest[2],
dest[3], dest[4], dest[5]);
@@ -272,30 +304,73 @@ static int start_xmit(struct sk_buff *skb, struct net_device *dev)
vnet_hdr_to_sg(sg, skb);
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- __skb_queue_head(&vi->send, skb);
+
+ err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
+ if (!err && !vi->free_in_tasklet)
+ mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
+
+ return err;
+}
+
+static void xmit_tasklet(unsigned long data)
+{
+ struct virtnet_info *vi = (void *)data;
+
+ netif_tx_lock_bh(vi->dev);
+ if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) {
+ vi->svq->vq_ops->kick(vi->svq);
+ vi->last_xmit_skb = NULL;
+ }
+ if (vi->free_in_tasklet)
+ free_old_xmit_skbs(vi);
+ netif_tx_unlock_bh(vi->dev);
+}
+
+static int start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
- err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
- if (err) {
- pr_debug("%s: virtio not prepared to send\n", dev->name);
- netif_stop_queue(dev);
-
- /* Activate callback for using skbs: if this returns false it
- * means some were used in the meantime. */
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
- vi->svq->vq_ops->disable_cb(vi->svq);
- netif_start_queue(dev);
- goto again;
+
+ /* If we has a buffer left over from last time, send it now. */
+ if (unlikely(vi->last_xmit_skb)) {
+ if (xmit_skb(vi, vi->last_xmit_skb) != 0) {
+ /* Drop this skb: we only queue one. */
+ vi->dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ skb = NULL;
+ goto stop_queue;
}
- __skb_unlink(skb, &vi->send);
+ vi->last_xmit_skb = NULL;
+ }
- return NETDEV_TX_BUSY;
+ /* Put new one in send queue and do transmit */
+ if (likely(skb)) {
+ __skb_queue_head(&vi->send, skb);
+ if (xmit_skb(vi, skb) != 0) {
+ vi->last_xmit_skb = skb;
+ skb = NULL;
+ goto stop_queue;
+ }
}
+done:
vi->svq->vq_ops->kick(vi->svq);
-
- return 0;
+ return NETDEV_TX_OK;
+
+stop_queue:
+ pr_debug("%s: virtio not prepared to send\n", dev->name);
+ netif_stop_queue(dev);
+
+ /* Activate callback for using skbs: if this returns false it
+ * means some were used in the meantime. */
+ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ vi->svq->vq_ops->disable_cb(vi->svq);
+ netif_start_queue(dev);
+ goto again;
+ }
+ goto done;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -355,17 +430,26 @@ static int virtnet_probe(struct virtio_device *vdev)
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
- if (csum && vdev->config->feature(vdev, VIRTIO_NET_F_CSUM)) {
+ if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
- if (gso && vdev->config->feature(vdev, VIRTIO_NET_F_GSO)) {
+ if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
+ /* Individual feature bits: what can host handle? */
+ if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
+ dev->features |= NETIF_F_TSO;
+ if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
+ dev->features |= NETIF_F_TSO6;
+ if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ dev->features |= NETIF_F_TSO_ECN;
+ if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+ dev->features |= NETIF_F_UFO;
}
/* Configuration may specify what MAC to use. Otherwise random. */
- if (vdev->config->feature(vdev, VIRTIO_NET_F_MAC)) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
vdev->config->get(vdev,
offsetof(struct virtio_net_config, mac),
dev->dev_addr, dev->addr_len);
@@ -379,6 +463,10 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->vdev = vdev;
vdev->priv = vi;
+ /* If they give us a callback when all buffers are done, we don't need
+ * the timer. */
+ vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
+
/* We expect two virtqueues, receive then send. */
vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
if (IS_ERR(vi->rvq)) {
@@ -396,6 +484,11 @@ static int virtnet_probe(struct virtio_device *vdev)
skb_queue_head_init(&vi->recv);
skb_queue_head_init(&vi->send);
+ tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
+
+ if (!vi->free_in_tasklet)
+ setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
+
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -433,13 +526,15 @@ static void virtnet_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
+ if (!vi->free_in_tasklet)
+ del_timer_sync(&vi->xmit_free_timer);
+
/* Free our skbs in send and recv queues, if any. */
while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
kfree_skb(skb);
vi->num--;
}
- while ((skb = __skb_dequeue(&vi->send)) != NULL)
- kfree_skb(skb);
+ __skb_queue_purge(&vi->send);
BUG_ON(vi->num != 0);
@@ -454,7 +549,15 @@ static struct virtio_device_id id_table[] = {
{ 0 },
};
+static unsigned int features[] = {
+ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
+ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
+ VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY,
+};
+
static struct virtio_driver virtio_net = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,