5fc75a0742cbcc3ad5e248d2058cda52b6ca1561
[15.05/openwrt.git] / target / linux / generic / patches-3.3 / 046-fq_codel-qdisc-backlog.patch
1 From: Eric Dumazet <edumazet@google.com>
2
3 codel_should_drop() logic allows a packet being not dropped if queue
4 size is under max packet size.
5
6 In fq_codel, we have two possible backlogs : The qdisc global one, and
7 the flow local one.
8
9 The meaningful one for codel_should_drop() should be the global backlog,
10 not the per flow one, so that thin flows can have a non zero drop/mark
11 probability.
12
13 Signed-off-by: Eric Dumazet <edumazet@google.com>
14 Cc: Dave Taht <dave.taht@bufferbloat.net>
15 Cc: Kathleen Nichols <nichols@pollere.com>
16 Cc: Van Jacobson <van@pollere.net>
17 ---
18  include/net/codel.h      |   15 +++++++--------
19  net/sched/sch_codel.c    |    4 ++--
20  net/sched/sch_fq_codel.c |    5 +++--
21  3 files changed, 12 insertions(+), 12 deletions(-)
22
23 diff --git a/include/net/codel.h b/include/net/codel.h
24 index 7546517..550debf 100644
25 --- a/include/net/codel.h
26 +++ b/include/net/codel.h
27 @@ -205,7 +205,7 @@ static codel_time_t codel_control_law(codel_time_t t,
28  
29  
30  static bool codel_should_drop(const struct sk_buff *skb,
31 -                             unsigned int *backlog,
32 +                             struct Qdisc *sch,
33                               struct codel_vars *vars,
34                               struct codel_params *params,
35                               struct codel_stats *stats,
36 @@ -219,13 +219,13 @@ static bool codel_should_drop(const struct sk_buff *skb,
37         }
38  
39         vars->ldelay = now - codel_get_enqueue_time(skb);
40 -       *backlog -= qdisc_pkt_len(skb);
41 +       sch->qstats.backlog -= qdisc_pkt_len(skb);
42  
43         if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
44                 stats->maxpacket = qdisc_pkt_len(skb);
45  
46         if (codel_time_before(vars->ldelay, params->target) ||
47 -           *backlog <= stats->maxpacket) {
48 +           sch->qstats.backlog <= stats->maxpacket) {
49                 /* went below - stay below for at least interval */
50                 vars->first_above_time = 0;
51                 return false;
52 @@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
53                                      struct codel_params *params,
54                                      struct codel_vars *vars,
55                                      struct codel_stats *stats,
56 -                                    codel_skb_dequeue_t dequeue_func,
57 -                                    u32 *backlog)
58 +                                    codel_skb_dequeue_t dequeue_func)
59  {
60         struct sk_buff *skb = dequeue_func(vars, sch);
61         codel_time_t now;
62 @@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
63                 return skb;
64         }
65         now = codel_get_time();
66 -       drop = codel_should_drop(skb, backlog, vars, params, stats, now);
67 +       drop = codel_should_drop(skb, sch, vars, params, stats, now);
68         if (vars->dropping) {
69                 if (!drop) {
70                         /* sojourn time below target - leave dropping state */
71 @@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
72                                 qdisc_drop(skb, sch);
73                                 stats->drop_count++;
74                                 skb = dequeue_func(vars, sch);
75 -                               if (!codel_should_drop(skb, backlog,
76 +                               if (!codel_should_drop(skb, sch,
77                                                        vars, params, stats, now)) {
78                                         /* leave dropping state */
79                                         vars->dropping = false;
80 @@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
81                         stats->drop_count++;
82  
83                         skb = dequeue_func(vars, sch);
84 -                       drop = codel_should_drop(skb, backlog, vars, params,
85 +                       drop = codel_should_drop(skb, sch, vars, params,
86                                                  stats, now);
87                 }
88                 vars->dropping = true;
89 diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
90 index 213ef60..2f9ab17 100644
91 --- a/net/sched/sch_codel.c
92 +++ b/net/sched/sch_codel.c
93 @@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
94         struct codel_sched_data *q = qdisc_priv(sch);
95         struct sk_buff *skb;
96  
97 -       skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
98 -                           dequeue, &sch->qstats.backlog);
99 +       skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
100 +
101         /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
102          * or HTB crashes. Defer it for next round.
103          */
104 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
105 index 337ff20..9fc1c62 100644
106 --- a/net/sched/sch_fq_codel.c
107 +++ b/net/sched/sch_fq_codel.c
108 @@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
109   */
110  static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
111  {
112 +       struct fq_codel_sched_data *q = qdisc_priv(sch);
113         struct fq_codel_flow *flow;
114         struct sk_buff *skb = NULL;
115  
116         flow = container_of(vars, struct fq_codel_flow, cvars);
117         if (flow->head) {
118                 skb = dequeue_head(flow);
119 -               sch->qstats.backlog -= qdisc_pkt_len(skb);
120 +               q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
121                 sch->q.qlen--;
122         }
123         return skb;
124 @@ -256,7 +257,7 @@ begin:
125         prev_ecn_mark = q->cstats.ecn_mark;
126  
127         skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
128 -                           dequeue, &q->backlogs[flow - q->flows]);
129 +                           dequeue);
130  
131         flow->dropped += q->cstats.drop_count - prev_drop_count;
132         flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
133