summaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 09:38:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 09:38:14 -0700
commitae045e2455429c418a418a3376301a9e5753a0a8 (patch)
treeb445bdeecd3f38aa0d0a29c9585cee49e4ccb0f1 /drivers/net/xen-netback
parentf4f142ed4ef835709c7e6d12eaca10d190bcebed (diff)
parentd247b6ab3ce6dd43665780865ec5fa145d9ab6bd (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Steady transitioning of the BPF instructure to a generic spot so all kernel subsystems can make use of it, from Alexei Starovoitov. 2) SFC driver supports busy polling, from Alexandre Rames. 3) Take advantage of hash table in UDP multicast delivery, from David Held. 4) Lighten locking, in particular by getting rid of the LRU lists, in inet frag handling. From Florian Westphal. 5) Add support for various RFC6458 control messages in SCTP, from Geir Ola Vaagland. 6) Allow to filter bridge forwarding database dumps by device, from Jamal Hadi Salim. 7) virtio-net also now supports busy polling, from Jason Wang. 8) Some low level optimization tweaks in pktgen from Jesper Dangaard Brouer. 9) Add support for ipv6 address generation modes, so that userland can have some input into the process. From Jiri Pirko. 10) Consolidate common TCP connection request code in ipv4 and ipv6, from Octavian Purdila. 11) New ARP packet logger in netfilter, from Pablo Neira Ayuso. 12) Generic resizable RCU hash table, with intial users in netlink and nftables. From Thomas Graf. 13) Maintain a name assignment type so that userspace can see where a network device name came from (enumerated by kernel, assigned explicitly by userspace, etc.) From Tom Gundersen. 14) Automatic flow label generation on transmit in ipv6, from Tom Herbert. 15) New packet timestamping facilities from Willem de Bruijn, meant to assist in measuring latencies going into/out-of the packet scheduler, latency from TCP data transmission to ACK, etc" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1536 commits) cxgb4 : Disable recursive mailbox commands when enabling vi net: reduce USB network driver config options. tg3: Modify tg3_tso_bug() to handle multiple TX rings amd-xgbe: Perform phy connect/disconnect at dev open/stop amd-xgbe: Use dma_set_mask_and_coherent to set DMA mask net: sun4i-emac: fix memory leak on bad packet sctp: fix possible seqlock seadlock in sctp_packet_transmit() Revert "net: phy: Set the driver when registering an MDIO bus device" cxgb4vf: Turn off SGE RX/TX Callback Timers and interrupts in PCI shutdown routine team: Simplify return path of team_newlink bridge: Update outdated comment on promiscuous mode net-timestamp: ACK timestamp for bytestreams net-timestamp: TCP timestamping net-timestamp: SCHED timestamp on entering packet scheduler net-timestamp: add key to disambiguate concurrent datagrams net-timestamp: move timestamp flags out of sk_flags net-timestamp: extend SCM_TIMESTAMPING ancillary data struct cxgb4i : Move stray CPL definitions to cxgb4 driver tcp: reduce spurious retransmits due to transient SACK reneging qlcnic: Initialize dcbnl_ops before register_netdev ...
Diffstat (limited to 'drivers/net/xen-netback')
-rw-r--r--drivers/net/xen-netback/common.h30
-rw-r--r--drivers/net/xen-netback/interface.c74
-rw-r--r--drivers/net/xen-netback/netback.c110
-rw-r--r--drivers/net/xen-netback/xenbus.c178
4 files changed, 342 insertions, 50 deletions
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 2532ce85d71..ef3026f46a3 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -44,6 +44,7 @@
#include <xen/interface/grant_table.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>
+#include <linux/debugfs.h>
typedef unsigned int pending_ring_idx_t;
#define INVALID_PENDING_RING_IDX (~0U)
@@ -175,9 +176,9 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
RING_IDX rx_last_skb_slots;
- bool rx_queue_purge;
+ unsigned long status;
- struct timer_list wake_queue;
+ struct timer_list rx_stalled;
struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
@@ -197,6 +198,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct xenvif_stats stats;
};
+enum state_bit_shift {
+ /* This bit marks that the vif is connected */
+ VIF_STATUS_CONNECTED,
+ /* This bit signals the RX thread that queuing was stopped (in
+ * start_xmit), and either the timer fired or an RX interrupt came
+ */
+ QUEUE_STATUS_RX_PURGE_EVENT,
+ /* This bit tells the interrupt handler that this queue was the reason
+ * for the carrier off, so it should kick the thread. Only queues which
+ * brought it down can turn on the carrier.
+ */
+ QUEUE_STATUS_RX_STALLED
+};
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -219,11 +234,16 @@ struct xenvif {
* frontend is rogue.
*/
bool disabled;
+ unsigned long status;
/* Queues */
struct xenvif_queue *queues;
unsigned int num_queues; /* active queues, resource allocated */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *xenvif_dbg_root;
+#endif
+
/* Miscellaneous private stuff. */
struct net_device *dev;
};
@@ -297,10 +317,16 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
/* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+irqreturn_t xenvif_interrupt(int irq, void *dev_id);
+
extern bool separate_tx_rx_irq;
extern unsigned int rx_drain_timeout_msecs;
extern unsigned int rx_drain_timeout_jiffies;
extern unsigned int xenvif_max_queues;
+#ifdef CONFIG_DEBUG_FS
+extern struct dentry *xen_netback_dbg_root;
+#endif
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 9e97c7ca0dd..48a55cda979 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -55,7 +55,8 @@ static inline void xenvif_stop_queue(struct xenvif_queue *queue)
int xenvif_schedulable(struct xenvif *vif)
{
- return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
+ return netif_running(vif->dev) &&
+ test_bit(VIF_STATUS_CONNECTED, &vif->status);
}
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -77,8 +78,12 @@ int xenvif_poll(struct napi_struct *napi, int budget)
/* This vif is rogue, we pretend we've there is nothing to do
* for this vif to deschedule it from NAPI. But this interface
* will be turned off in thread context later.
+ * Also, if a guest doesn't post enough slots to receive data on one of
+ * its queues, the carrier goes down and NAPI is descheduled here so
+ * the guest can't send more packets until it's ready to receive.
*/
- if (unlikely(queue->vif->disabled)) {
+ if (unlikely(queue->vif->disabled ||
+ !netif_carrier_ok(queue->vif->dev))) {
napi_complete(napi);
return 0;
}
@@ -96,13 +101,22 @@ int xenvif_poll(struct napi_struct *napi, int budget)
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
+ struct netdev_queue *net_queue =
+ netdev_get_tx_queue(queue->vif->dev, queue->id);
+ /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
+ * the carrier went down and this queue was previously blocked
+ */
+ if (unlikely(netif_tx_queue_stopped(net_queue) ||
+ (!netif_carrier_ok(queue->vif->dev) &&
+ test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
+ set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
xenvif_kick_thread(queue);
return IRQ_HANDLED;
}
-static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
xenvif_tx_interrupt(irq, dev_id);
xenvif_rx_interrupt(irq, dev_id);
@@ -124,16 +138,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}
-/* Callback to wake the queue and drain it on timeout */
-static void xenvif_wake_queue_callback(unsigned long data)
+/* Callback to wake the queue's thread and turn the carrier off on timeout */
+static void xenvif_rx_stalled(unsigned long data)
{
struct xenvif_queue *queue = (struct xenvif_queue *)data;
if (xenvif_queue_stopped(queue)) {
- netdev_err(queue->vif->dev, "draining TX queue\n");
- queue->rx_queue_purge = true;
+ set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
xenvif_kick_thread(queue);
- xenvif_wake_queue(queue);
}
}
@@ -182,11 +194,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
* drain.
*/
if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
- queue->wake_queue.function = xenvif_wake_queue_callback;
- queue->wake_queue.data = (unsigned long)queue;
+ queue->rx_stalled.function = xenvif_rx_stalled;
+ queue->rx_stalled.data = (unsigned long)queue;
xenvif_stop_queue(queue);
- mod_timer(&queue->wake_queue,
- jiffies + rx_drain_timeout_jiffies);
+ mod_timer(&queue->rx_stalled,
+ jiffies + rx_drain_timeout_jiffies);
}
skb_queue_tail(&queue->rx_queue, skb);
@@ -267,7 +279,7 @@ static void xenvif_down(struct xenvif *vif)
static int xenvif_open(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
- if (netif_carrier_ok(dev))
+ if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
xenvif_up(vif);
netif_tx_start_all_queues(dev);
return 0;
@@ -276,7 +288,7 @@ static int xenvif_open(struct net_device *dev)
static int xenvif_close(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
- if (netif_carrier_ok(dev))
+ if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
xenvif_down(vif);
netif_tx_stop_all_queues(dev);
return 0;
@@ -418,8 +430,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
* When the guest selects the desired number, it will be updated
* via netif_set_real_num_*_queues().
*/
- dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
- xenvif_max_queues);
+ dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
+ ether_setup, xenvif_max_queues);
if (dev == NULL) {
pr_warn("Could not allocate netdev for %s\n", name);
return ERR_PTR(-ENOMEM);
@@ -514,7 +526,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
}
- init_timer(&queue->wake_queue);
+ init_timer(&queue->rx_stalled);
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
XENVIF_NAPI_WEIGHT);
@@ -528,6 +540,7 @@ void xenvif_carrier_on(struct xenvif *vif)
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
+ set_bit(VIF_STATUS_CONNECTED, &vif->status);
netif_carrier_on(vif->dev);
if (netif_running(vif->dev))
xenvif_up(vif);
@@ -625,9 +638,11 @@ void xenvif_carrier_off(struct xenvif *vif)
struct net_device *dev = vif->dev;
rtnl_lock();
- netif_carrier_off(dev); /* discard queued packets */
- if (netif_running(dev))
- xenvif_down(vif);
+ if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
+ netif_carrier_off(dev); /* discard queued packets */
+ if (netif_running(dev))
+ xenvif_down(vif);
+ }
rtnl_unlock();
}
@@ -656,14 +671,13 @@ void xenvif_disconnect(struct xenvif *vif)
unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
- if (netif_carrier_ok(vif->dev))
- xenvif_carrier_off(vif);
+ xenvif_carrier_off(vif);
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
if (queue->task) {
- del_timer_sync(&queue->wake_queue);
+ del_timer_sync(&queue->rx_stalled);
kthread_stop(queue->task);
queue->task = NULL;
}
@@ -705,16 +719,12 @@ void xenvif_free(struct xenvif *vif)
/* Here we want to avoid timeout messages if an skb can be legitimately
* stuck somewhere else. Realistically this could be an another vif's
* internal or QDisc queue. That another vif also has this
- * rx_drain_timeout_msecs timeout, but the timer only ditches the
- * internal queue. After that, the QDisc queue can put in worst case
- * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
- * internal queue, so we need several rounds of such timeouts until we
- * can be sure that no another vif should have skb's from us. We are
- * not sending more skb's, so newly stuck packets are not interesting
- * for us here.
+ * rx_drain_timeout_msecs timeout, so give it time to drain out.
+ * Although if that other guest wakes up just before its timeout happens
+ * and takes only one skb from QDisc, it can hold onto other skbs for a
+ * longer period.
*/
- unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
- DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
+ unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
unregister_netdev(vif->dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c65b636bcab..aa2093325be 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1869,8 +1869,7 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
static inline int rx_work_todo(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue) &&
- xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots)) ||
- queue->rx_queue_purge;
+ xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
}
static inline int tx_work_todo(struct xenvif_queue *queue)
@@ -1935,6 +1934,75 @@ static void xenvif_start_queue(struct xenvif_queue *queue)
xenvif_wake_queue(queue);
}
+/* Only called from the queue's thread, it handles the situation when the guest
+ * doesn't post enough requests on the receiving ring.
+ * First xenvif_start_xmit disables QDisc and start a timer, and then either the
+ * timer fires, or the guest send an interrupt after posting new request. If it
+ * is the timer, the carrier is turned off here.
+ * */
+static void xenvif_rx_purge_event(struct xenvif_queue *queue)
+{
+ /* Either the last unsuccesful skb or at least 1 slot should fit */
+ int needed = queue->rx_last_skb_slots ?
+ queue->rx_last_skb_slots : 1;
+
+ /* It is assumed that if the guest post new slots after this, the RX
+ * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up
+ * the thread again
+ */
+ set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
+ if (!xenvif_rx_ring_slots_available(queue, needed)) {
+ rtnl_lock();
+ if (netif_carrier_ok(queue->vif->dev)) {
+ /* Timer fired and there are still no slots. Turn off
+ * everything except the interrupts
+ */
+ netif_carrier_off(queue->vif->dev);
+ skb_queue_purge(&queue->rx_queue);
+ queue->rx_last_skb_slots = 0;
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
+ } else {
+ /* Probably an another queue already turned the carrier
+ * off, make sure nothing is stucked in the internal
+ * queue of this queue
+ */
+ skb_queue_purge(&queue->rx_queue);
+ queue->rx_last_skb_slots = 0;
+ }
+ rtnl_unlock();
+ } else if (!netif_carrier_ok(queue->vif->dev)) {
+ unsigned int num_queues = queue->vif->num_queues;
+ unsigned int i;
+ /* The carrier was down, but an interrupt kicked
+ * the thread again after new requests were
+ * posted
+ */
+ clear_bit(QUEUE_STATUS_RX_STALLED,
+ &queue->status);
+ rtnl_lock();
+ netif_carrier_on(queue->vif->dev);
+ netif_tx_wake_all_queues(queue->vif->dev);
+ rtnl_unlock();
+
+ for (i = 0; i < num_queues; i++) {
+ struct xenvif_queue *temp = &queue->vif->queues[i];
+
+ xenvif_napi_schedule_or_enable_events(temp);
+ }
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev, "Carrier on again\n");
+ } else {
+ /* Queuing were stopped, but the guest posted
+ * new requests and sent an interrupt
+ */
+ clear_bit(QUEUE_STATUS_RX_STALLED,
+ &queue->status);
+ del_timer_sync(&queue->rx_stalled);
+ xenvif_start_queue(queue);
+ }
+}
+
int xenvif_kthread_guest_rx(void *data)
{
struct xenvif_queue *queue = data;
@@ -1944,8 +2012,12 @@ int xenvif_kthread_guest_rx(void *data)
wait_event_interruptible(queue->wq,
rx_work_todo(queue) ||
queue->vif->disabled ||
+ test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
/* This frontend is found to be rogue, disable it in
* kthread context. Currently this is only set when
* netback finds out frontend sends malformed packet,
@@ -1953,26 +2025,23 @@ int xenvif_kthread_guest_rx(void *data)
* context so we defer it here, if this thread is
* associated with queue 0.
*/
- if (unlikely(queue->vif->disabled && netif_carrier_ok(queue->vif->dev) && queue->id == 0))
+ if (unlikely(queue->vif->disabled && queue->id == 0))
xenvif_carrier_off(queue->vif);
-
- if (kthread_should_stop())
- break;
-
- if (queue->rx_queue_purge) {
+ else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
+ &queue->status))) {
+ xenvif_rx_purge_event(queue);
+ } else if (!netif_carrier_ok(queue->vif->dev)) {
+ /* Another queue stalled and turned the carrier off, so
+ * purge the internal queue of queues which were not
+ * blocked
+ */
skb_queue_purge(&queue->rx_queue);
- queue->rx_queue_purge = false;
+ queue->rx_last_skb_slots = 0;
}
if (!skb_queue_empty(&queue->rx_queue))
xenvif_rx_action(queue);
- if (skb_queue_empty(&queue->rx_queue) &&
- xenvif_queue_stopped(queue)) {
- del_timer_sync(&queue->wake_queue);
- xenvif_start_queue(queue);
- }
-
cond_resched();
}
@@ -2027,6 +2096,13 @@ static int __init netback_init(void)
rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+#ifdef CONFIG_DEBUG_FS
+ xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
+ if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+ pr_warn("Init of debugfs returned %ld!\n",
+ PTR_ERR(xen_netback_dbg_root));
+#endif /* CONFIG_DEBUG_FS */
+
return 0;
failed_init:
@@ -2037,6 +2113,10 @@ module_init(netback_init);
static void __exit netback_fini(void)
{
+#ifdef CONFIG_DEBUG_FS
+ if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
+ debugfs_remove_recursive(xen_netback_dbg_root);
+#endif /* CONFIG_DEBUG_FS */
xenvif_xenbus_fini();
}
module_exit(netback_fini);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3d85acd84ba..580517d857b 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -44,6 +44,175 @@ static void unregister_hotplug_status_watch(struct backend_info *be);
static void set_backend_state(struct backend_info *be,
enum xenbus_state state);
+#ifdef CONFIG_DEBUG_FS
+struct dentry *xen_netback_dbg_root = NULL;
+
+static int xenvif_read_io_ring(struct seq_file *m, void *v)
+{
+ struct xenvif_queue *queue = m->private;
+ struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
+ struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
+
+ if (tx_ring->sring) {
+ struct xen_netif_tx_sring *sring = tx_ring->sring;
+
+ seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
+ tx_ring->nr_ents);
+ seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
+ sring->req_prod,
+ sring->req_prod - sring->rsp_prod,
+ tx_ring->req_cons,
+ tx_ring->req_cons - sring->rsp_prod,
+ sring->req_event,
+ sring->req_event - sring->rsp_prod);
+ seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
+ sring->rsp_prod,
+ tx_ring->rsp_prod_pvt,
+ tx_ring->rsp_prod_pvt - sring->rsp_prod,
+ sring->rsp_event,
+ sring->rsp_event - sring->rsp_prod);
+ seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
+ queue->pending_prod,
+ queue->pending_cons,
+ nr_pending_reqs(queue));
+ seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
+ queue->dealloc_prod,
+ queue->dealloc_cons,
+ queue->dealloc_prod - queue->dealloc_cons);
+ }
+
+ if (rx_ring->sring) {
+ struct xen_netif_rx_sring *sring = rx_ring->sring;
+
+ seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
+ seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
+ sring->req_prod,
+ sring->req_prod - sring->rsp_prod,
+ rx_ring->req_cons,
+ rx_ring->req_cons - sring->rsp_prod,
+ sring->req_event,
+ sring->req_event - sring->rsp_prod);
+ seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
+ sring->rsp_prod,
+ rx_ring->rsp_prod_pvt,
+ rx_ring->rsp_prod_pvt - sring->rsp_prod,
+ sring->rsp_event,
+ sring->rsp_event - sring->rsp_prod);
+ }
+
+ seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
+ "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
+ "remaining: %lu, expires: %lu, now: %lu\n",
+ queue->napi.state, queue->napi.weight,
+ skb_queue_len(&queue->tx_queue),
+ timer_pending(&queue->credit_timeout),
+ queue->credit_bytes,
+ queue->credit_usec,
+ queue->remaining_credit,
+ queue->credit_timeout.expires,
+ jiffies);
+
+ return 0;
+}
+
+#define XENVIF_KICK_STR "kick"
+
+static ssize_t
+xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct xenvif_queue *queue =
+ ((struct seq_file *)filp->private_data)->private;
+ int len;
+ char write[sizeof(XENVIF_KICK_STR)];
+
+ /* don't allow partial writes and check the length */
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(XENVIF_KICK_STR) - 1)
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(write,
+ sizeof(write),
+ ppos,
+ buf,
+ count);
+ if (len < 0)
+ return len;
+
+ if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
+ xenvif_interrupt(0, (void *)queue);
+ else {
+ pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
+ queue->id);
+ count = -EINVAL;
+ }
+ return count;
+}
+
+static int xenvif_dump_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ void *queue = NULL;
+
+ if (inode->i_private)
+ queue = inode->i_private;
+ ret = single_open(filp, xenvif_read_io_ring, queue);
+ filp->f_mode |= FMODE_PWRITE;
+ return ret;
+}
+
+static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
+ .owner = THIS_MODULE,
+ .open = xenvif_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = xenvif_write_io_ring,
+};
+
+static void xenvif_debugfs_addif(struct xenvif_queue *queue)
+{
+ struct dentry *pfile;
+ struct xenvif *vif = queue->vif;
+ int i;
+
+ if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+ return;
+
+ vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
+ xen_netback_dbg_root);
+ if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) {
+ for (i = 0; i < vif->num_queues; ++i) {
+ char filename[sizeof("io_ring_q") + 4];
+
+ snprintf(filename, sizeof(filename), "io_ring_q%d", i);
+ pfile = debugfs_create_file(filename,
+ S_IRUSR | S_IWUSR,
+ vif->xenvif_dbg_root,
+ &vif->queues[i],
+ &xenvif_dbg_io_ring_ops_fops);
+ if (IS_ERR_OR_NULL(pfile))
+ pr_warn("Creation of io_ring file returned %ld!\n",
+ PTR_ERR(pfile));
+ }
+ } else
+ netdev_warn(vif->dev,
+ "Creation of vif debugfs dir returned %ld!\n",
+ PTR_ERR(vif->xenvif_dbg_root));
+}
+
+static void xenvif_debugfs_delif(struct xenvif *vif)
+{
+ if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+ return;
+
+ if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root))
+ debugfs_remove_recursive(vif->xenvif_dbg_root);
+ vif->xenvif_dbg_root = NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
static int netback_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -246,8 +415,12 @@ static void backend_create_xenvif(struct backend_info *be)
static void backend_disconnect(struct backend_info *be)
{
- if (be->vif)
+ if (be->vif) {
+#ifdef CONFIG_DEBUG_FS
+ xenvif_debugfs_delif(be->vif);
+#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect(be->vif);
+ }
}
static void backend_connect(struct backend_info *be)
@@ -560,6 +733,9 @@ static void connect(struct backend_info *be)
be->vif->num_queues = queue_index;
goto err;
}
+#ifdef CONFIG_DEBUG_FS
+ xenvif_debugfs_addif(queue);
+#endif /* CONFIG_DEBUG_FS */
}
/* Initialisation completed, tell core driver the number of