summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_choke.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_choke.c')
-rw-r--r--net/sched/sch_choke.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index fb666d1e4de..c009eb9045c 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -57,7 +57,7 @@ struct choke_sched_data {
/* Variables */
struct red_vars vars;
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
struct {
u32 prob_drop; /* Early probability drops */
u32 prob_mark; /* Early probability marks */
@@ -127,7 +127,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
if (idx == q->tail)
choke_zap_tail_holes(q);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
qdisc_tree_decrease_qlen(sch, 1);
--sch->q.qlen;
@@ -203,9 +203,11 @@ static bool choke_classify(struct sk_buff *skb,
{
struct choke_sched_data *q = qdisc_priv(sch);
struct tcf_result res;
+ struct tcf_proto *fl;
int result;
- result = tc_classify(skb, q->filter_list, &res);
+ fl = rcu_dereference_bh(q->filter_list);
+ result = tc_classify(skb, fl, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -259,7 +261,7 @@ static bool choke_match_random(const struct choke_sched_data *q,
return false;
oskb = choke_peek_random(q, pidx);
- if (q->filter_list)
+ if (rcu_access_pointer(q->filter_list))
return choke_get_classid(nskb) == choke_get_classid(oskb);
return choke_match_flow(oskb, nskb);
@@ -267,11 +269,11 @@ static bool choke_match_random(const struct choke_sched_data *q,
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
+ int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
struct choke_sched_data *q = qdisc_priv(sch);
const struct red_parms *p = &q->parms;
- int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- if (q->filter_list) {
+ if (rcu_access_pointer(q->filter_list)) {
/* If using external classifiers, get result and record it. */
if (!choke_classify(skb, sch, &ret))
goto other_drop; /* Packet was eaten by filter */
@@ -300,7 +302,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (q->vars.qavg > p->qth_max) {
q->vars.qcount = -1;
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (use_harddrop(q) || !use_ecn(q) ||
!INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
@@ -313,7 +315,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->vars.qcount = 0;
q->vars.qR = red_random(p);
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++;
goto congestion_drop;
@@ -330,7 +332,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->tab[q->tail] = skb;
q->tail = (q->tail + 1) & q->tab_mask;
++sch->q.qlen;
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -343,7 +345,7 @@ congestion_drop:
other_drop:
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
@@ -363,7 +365,7 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
q->tab[q->head] = NULL;
choke_zap_head_holes(q);
--sch->q.qlen;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
return skb;
@@ -458,7 +460,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
ntab[tail++] = skb;
continue;
}
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
--sch->q.qlen;
qdisc_drop(skb, sch);
}
@@ -564,7 +566,8 @@ static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
return 0;
}
-static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **choke_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct choke_sched_data *q = qdisc_priv(sch);