summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-03-10 17:11:43 -0700
committerDavid S. Miller <davem@davemloft.net>2014-03-11 23:54:23 -0400
commitfba373d2bb267eaeba85579dd04b91435df8c83b (patch)
tree64becbfd442aed1f89f4058609eaa042c8e6f417 /net/sched
parent15dc36ebbbea7da35fff2c51b620c8333fc87528 (diff)
pkt_sched: add cond_resched() to class and qdisc dump
We have seen delays of more than 50ms in class or qdisc dumps, in case device is under high TX stress, even with the prior 4KB per skb limit. Add cond_resched() to give a chance to higher prio tasks to get cpu. Signed-off-by; Eric Dumazet <edumazet@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_api.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 272292efa7f..0a99d7ced71 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1303,6 +1303,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
struct gnet_dump d;
struct qdisc_size_table *stab;
+ cond_resched();
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
if (!nlh)
goto out_nlmsg_trim;
@@ -1615,6 +1616,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
struct gnet_dump d;
const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
+ cond_resched();
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
if (!nlh)
goto out_nlmsg_trim;