Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / net / sched / sch_gred.c
1 /*
2  * net/sched/sch_gred.c Generic Random Early Detection queue.
3  *
4  *
5  *              This program is free software; you can redistribute it and/or
6  *              modify it under the terms of the GNU General Public License
7  *              as published by the Free Software Foundation; either version
8  *              2 of the License, or (at your option) any later version.
9  *
10  * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
11  *
12  *             991129: -  Bug fix with grio mode
13  *                     - a better sing. AvgQ mode with Grio(WRED)
14  *                     - A finer grained VQ dequeue based on sugestion
15  *                       from Ren Liu
16  *                     - More error checks
17  *
18  *  For all the glorious comments look at include/net/red.h
19  */
20
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <net/pkt_sched.h>
27 #include <net/red.h>
28
29 #define GRED_DEF_PRIO (MAX_DPs / 2)
30 #define GRED_VQ_MASK (MAX_DPs - 1)
31
32 struct gred_sched_data;
33 struct gred_sched;
34
35 struct gred_sched_data
36 {
37         u32             limit;          /* HARD maximal queue length    */
38         u32             DP;             /* the drop pramaters */
39         u32             bytesin;        /* bytes seen on virtualQ so far*/
40         u32             packetsin;      /* packets seen on virtualQ so far*/
41         u32             backlog;        /* bytes on the virtualQ */
42         u8              prio;           /* the prio of this vq */
43
44         struct red_parms parms;
45         struct red_stats stats;
46 };
47
48 enum {
49         GRED_WRED_MODE = 1,
50         GRED_RIO_MODE,
51 };
52
53 struct gred_sched
54 {
55         struct gred_sched_data *tab[MAX_DPs];
56         unsigned long   flags;
57         u32             red_flags;
58         u32             DPs;
59         u32             def;
60         struct red_parms wred_set;
61 };
62
63 static inline int gred_wred_mode(struct gred_sched *table)
64 {
65         return test_bit(GRED_WRED_MODE, &table->flags);
66 }
67
68 static inline void gred_enable_wred_mode(struct gred_sched *table)
69 {
70         __set_bit(GRED_WRED_MODE, &table->flags);
71 }
72
73 static inline void gred_disable_wred_mode(struct gred_sched *table)
74 {
75         __clear_bit(GRED_WRED_MODE, &table->flags);
76 }
77
78 static inline int gred_rio_mode(struct gred_sched *table)
79 {
80         return test_bit(GRED_RIO_MODE, &table->flags);
81 }
82
83 static inline void gred_enable_rio_mode(struct gred_sched *table)
84 {
85         __set_bit(GRED_RIO_MODE, &table->flags);
86 }
87
88 static inline void gred_disable_rio_mode(struct gred_sched *table)
89 {
90         __clear_bit(GRED_RIO_MODE, &table->flags);
91 }
92
93 static inline int gred_wred_mode_check(struct Qdisc *sch)
94 {
95         struct gred_sched *table = qdisc_priv(sch);
96         int i;
97
98         /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
99         for (i = 0; i < table->DPs; i++) {
100                 struct gred_sched_data *q = table->tab[i];
101                 int n;
102
103                 if (q == NULL)
104                         continue;
105
106                 for (n = 0; n < table->DPs; n++)
107                         if (table->tab[n] && table->tab[n] != q &&
108                             table->tab[n]->prio == q->prio)
109                                 return 1;
110         }
111
112         return 0;
113 }
114
115 static inline unsigned int gred_backlog(struct gred_sched *table,
116                                         struct gred_sched_data *q,
117                                         struct Qdisc *sch)
118 {
119         if (gred_wred_mode(table))
120                 return sch->qstats.backlog;
121         else
122                 return q->backlog;
123 }
124
125 static inline u16 tc_index_to_dp(struct sk_buff *skb)
126 {
127         return skb->tc_index & GRED_VQ_MASK;
128 }
129
130 static inline void gred_load_wred_set(struct gred_sched *table,
131                                       struct gred_sched_data *q)
132 {
133         q->parms.qavg = table->wred_set.qavg;
134         q->parms.qidlestart = table->wred_set.qidlestart;
135 }
136
137 static inline void gred_store_wred_set(struct gred_sched *table,
138                                        struct gred_sched_data *q)
139 {
140         table->wred_set.qavg = q->parms.qavg;
141 }
142
143 static inline int gred_use_ecn(struct gred_sched *t)
144 {
145         return t->red_flags & TC_RED_ECN;
146 }
147
148 static inline int gred_use_harddrop(struct gred_sched *t)
149 {
150         return t->red_flags & TC_RED_HARDDROP;
151 }
152
153 static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
154 {
155         struct gred_sched_data *q=NULL;
156         struct gred_sched *t= qdisc_priv(sch);
157         unsigned long qavg = 0;
158         u16 dp = tc_index_to_dp(skb);
159
160         if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
161                 dp = t->def;
162
163                 if ((q = t->tab[dp]) == NULL) {
164                         /* Pass through packets not assigned to a DP
165                          * if no default DP has been configured. This
166                          * allows for DP flows to be left untouched.
167                          */
168                         if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
169                                 return qdisc_enqueue_tail(skb, sch);
170                         else
171                                 goto drop;
172                 }
173
174                 /* fix tc_index? --could be controvesial but needed for
175                    requeueing */
176                 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
177         }
178
179         /* sum up all the qaves of prios <= to ours to get the new qave */
180         if (!gred_wred_mode(t) && gred_rio_mode(t)) {
181                 int i;
182
183                 for (i = 0; i < t->DPs; i++) {
184                         if (t->tab[i] && t->tab[i]->prio < q->prio &&
185                             !red_is_idling(&t->tab[i]->parms))
186                                 qavg +=t->tab[i]->parms.qavg;
187                 }
188
189         }
190
191         q->packetsin++;
192         q->bytesin += qdisc_pkt_len(skb);
193
194         if (gred_wred_mode(t))
195                 gred_load_wred_set(t, q);
196
197         q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
198
199         if (red_is_idling(&q->parms))
200                 red_end_of_idle_period(&q->parms);
201
202         if (gred_wred_mode(t))
203                 gred_store_wred_set(t, q);
204
205         switch (red_action(&q->parms, q->parms.qavg + qavg)) {
206                 case RED_DONT_MARK:
207                         break;
208
209                 case RED_PROB_MARK:
210                         sch->qstats.overlimits++;
211                         if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
212                                 q->stats.prob_drop++;
213                                 goto congestion_drop;
214                         }
215
216                         q->stats.prob_mark++;
217                         break;
218
219                 case RED_HARD_MARK:
220                         sch->qstats.overlimits++;
221                         if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
222                             !INET_ECN_set_ce(skb)) {
223                                 q->stats.forced_drop++;
224                                 goto congestion_drop;
225                         }
226                         q->stats.forced_mark++;
227                         break;
228         }
229
230         if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
231                 q->backlog += qdisc_pkt_len(skb);
232                 return qdisc_enqueue_tail(skb, sch);
233         }
234
235         q->stats.pdrop++;
236 drop:
237         return qdisc_drop(skb, sch);
238
239 congestion_drop:
240         qdisc_drop(skb, sch);
241         return NET_XMIT_CN;
242 }
243
244 static struct sk_buff *gred_dequeue(struct Qdisc* sch)
245 {
246         struct sk_buff *skb;
247         struct gred_sched *t = qdisc_priv(sch);
248
249         skb = qdisc_dequeue_head(sch);
250
251         if (skb) {
252                 struct gred_sched_data *q;
253                 u16 dp = tc_index_to_dp(skb);
254
255                 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
256                         if (net_ratelimit())
257                                 printk(KERN_WARNING "GRED: Unable to relocate "
258                                        "VQ 0x%x after dequeue, screwing up "
259                                        "backlog.\n", tc_index_to_dp(skb));
260                 } else {
261                         q->backlog -= qdisc_pkt_len(skb);
262
263                         if (!q->backlog && !gred_wred_mode(t))
264                                 red_start_of_idle_period(&q->parms);
265                 }
266
267                 return skb;
268         }
269
270         if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
271                 red_start_of_idle_period(&t->wred_set);
272
273         return NULL;
274 }
275
276 static unsigned int gred_drop(struct Qdisc* sch)
277 {
278         struct sk_buff *skb;
279         struct gred_sched *t = qdisc_priv(sch);
280
281         skb = qdisc_dequeue_tail(sch);
282         if (skb) {
283                 unsigned int len = qdisc_pkt_len(skb);
284                 struct gred_sched_data *q;
285                 u16 dp = tc_index_to_dp(skb);
286
287                 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
288                         if (net_ratelimit())
289                                 printk(KERN_WARNING "GRED: Unable to relocate "
290                                        "VQ 0x%x while dropping, screwing up "
291                                        "backlog.\n", tc_index_to_dp(skb));
292                 } else {
293                         q->backlog -= len;
294                         q->stats.other++;
295
296                         if (!q->backlog && !gred_wred_mode(t))
297                                 red_start_of_idle_period(&q->parms);
298                 }
299
300                 qdisc_drop(skb, sch);
301                 return len;
302         }
303
304         if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
305                 red_start_of_idle_period(&t->wred_set);
306
307         return 0;
308
309 }
310
311 static void gred_reset(struct Qdisc* sch)
312 {
313         int i;
314         struct gred_sched *t = qdisc_priv(sch);
315
316         qdisc_reset_queue(sch);
317
318         for (i = 0; i < t->DPs; i++) {
319                 struct gred_sched_data *q = t->tab[i];
320
321                 if (!q)
322                         continue;
323
324                 red_restart(&q->parms);
325                 q->backlog = 0;
326         }
327 }
328
329 static inline void gred_destroy_vq(struct gred_sched_data *q)
330 {
331         kfree(q);
332 }
333
334 static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
335 {
336         struct gred_sched *table = qdisc_priv(sch);
337         struct tc_gred_sopt *sopt;
338         int i;
339
340         if (dps == NULL)
341                 return -EINVAL;
342
343         sopt = nla_data(dps);
344
345         if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
346                 return -EINVAL;
347
348         sch_tree_lock(sch);
349         table->DPs = sopt->DPs;
350         table->def = sopt->def_DP;
351         table->red_flags = sopt->flags;
352
353         /*
354          * Every entry point to GRED is synchronized with the above code
355          * and the DP is checked against DPs, i.e. shadowed VQs can no
356          * longer be found so we can unlock right here.
357          */
358         sch_tree_unlock(sch);
359
360         if (sopt->grio) {
361                 gred_enable_rio_mode(table);
362                 gred_disable_wred_mode(table);
363                 if (gred_wred_mode_check(sch))
364                         gred_enable_wred_mode(table);
365         } else {
366                 gred_disable_rio_mode(table);
367                 gred_disable_wred_mode(table);
368         }
369
370         for (i = table->DPs; i < MAX_DPs; i++) {
371                 if (table->tab[i]) {
372                         printk(KERN_WARNING "GRED: Warning: Destroying "
373                                "shadowed VQ 0x%x\n", i);
374                         gred_destroy_vq(table->tab[i]);
375                         table->tab[i] = NULL;
376                 }
377         }
378
379         return 0;
380 }
381
382 static inline int gred_change_vq(struct Qdisc *sch, int dp,
383                                  struct tc_gred_qopt *ctl, int prio, u8 *stab)
384 {
385         struct gred_sched *table = qdisc_priv(sch);
386         struct gred_sched_data *q;
387
388         if (table->tab[dp] == NULL) {
389                 table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
390                 if (table->tab[dp] == NULL)
391                         return -ENOMEM;
392         }
393
394         q = table->tab[dp];
395         q->DP = dp;
396         q->prio = prio;
397         q->limit = ctl->limit;
398
399         if (q->backlog == 0)
400                 red_end_of_idle_period(&q->parms);
401
402         red_set_parms(&q->parms,
403                       ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
404                       ctl->Scell_log, stab);
405
406         return 0;
407 }
408
409 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
410         [TCA_GRED_PARMS]        = { .len = sizeof(struct tc_gred_qopt) },
411         [TCA_GRED_STAB]         = { .len = 256 },
412         [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
413 };
414
415 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
416 {
417         struct gred_sched *table = qdisc_priv(sch);
418         struct tc_gred_qopt *ctl;
419         struct nlattr *tb[TCA_GRED_MAX + 1];
420         int err, prio = GRED_DEF_PRIO;
421         u8 *stab;
422
423         if (opt == NULL)
424                 return -EINVAL;
425
426         err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
427         if (err < 0)
428                 return err;
429
430         if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
431                 return gred_change_table_def(sch, opt);
432
433         if (tb[TCA_GRED_PARMS] == NULL ||
434             tb[TCA_GRED_STAB] == NULL)
435                 return -EINVAL;
436
437         err = -EINVAL;
438         ctl = nla_data(tb[TCA_GRED_PARMS]);
439         stab = nla_data(tb[TCA_GRED_STAB]);
440
441         if (ctl->DP >= table->DPs)
442                 goto errout;
443
444         if (gred_rio_mode(table)) {
445                 if (ctl->prio == 0) {
446                         int def_prio = GRED_DEF_PRIO;
447
448                         if (table->tab[table->def])
449                                 def_prio = table->tab[table->def]->prio;
450
451                         printk(KERN_DEBUG "GRED: DP %u does not have a prio "
452                                "setting default to %d\n", ctl->DP, def_prio);
453
454                         prio = def_prio;
455                 } else
456                         prio = ctl->prio;
457         }
458
459         sch_tree_lock(sch);
460
461         err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
462         if (err < 0)
463                 goto errout_locked;
464
465         if (gred_rio_mode(table)) {
466                 gred_disable_wred_mode(table);
467                 if (gred_wred_mode_check(sch))
468                         gred_enable_wred_mode(table);
469         }
470
471         err = 0;
472
473 errout_locked:
474         sch_tree_unlock(sch);
475 errout:
476         return err;
477 }
478
479 static int gred_init(struct Qdisc *sch, struct nlattr *opt)
480 {
481         struct nlattr *tb[TCA_GRED_MAX + 1];
482         int err;
483
484         if (opt == NULL)
485                 return -EINVAL;
486
487         err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
488         if (err < 0)
489                 return err;
490
491         if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
492                 return -EINVAL;
493
494         return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
495 }
496
497 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
498 {
499         struct gred_sched *table = qdisc_priv(sch);
500         struct nlattr *parms, *opts = NULL;
501         int i;
502         struct tc_gred_sopt sopt = {
503                 .DPs    = table->DPs,
504                 .def_DP = table->def,
505                 .grio   = gred_rio_mode(table),
506                 .flags  = table->red_flags,
507         };
508
509         opts = nla_nest_start(skb, TCA_OPTIONS);
510         if (opts == NULL)
511                 goto nla_put_failure;
512         NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
513         parms = nla_nest_start(skb, TCA_GRED_PARMS);
514         if (parms == NULL)
515                 goto nla_put_failure;
516
517         for (i = 0; i < MAX_DPs; i++) {
518                 struct gred_sched_data *q = table->tab[i];
519                 struct tc_gred_qopt opt;
520
521                 memset(&opt, 0, sizeof(opt));
522
523                 if (!q) {
524                         /* hack -- fix at some point with proper message
525                            This is how we indicate to tc that there is no VQ
526                            at this DP */
527
528                         opt.DP = MAX_DPs + i;
529                         goto append_opt;
530                 }
531
532                 opt.limit       = q->limit;
533                 opt.DP          = q->DP;
534                 opt.backlog     = q->backlog;
535                 opt.prio        = q->prio;
536                 opt.qth_min     = q->parms.qth_min >> q->parms.Wlog;
537                 opt.qth_max     = q->parms.qth_max >> q->parms.Wlog;
538                 opt.Wlog        = q->parms.Wlog;
539                 opt.Plog        = q->parms.Plog;
540                 opt.Scell_log   = q->parms.Scell_log;
541                 opt.other       = q->stats.other;
542                 opt.early       = q->stats.prob_drop;
543                 opt.forced      = q->stats.forced_drop;
544                 opt.pdrop       = q->stats.pdrop;
545                 opt.packets     = q->packetsin;
546                 opt.bytesin     = q->bytesin;
547
548                 if (gred_wred_mode(table)) {
549                         q->parms.qidlestart =
550                                 table->tab[table->def]->parms.qidlestart;
551                         q->parms.qavg = table->tab[table->def]->parms.qavg;
552                 }
553
554                 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
555
556 append_opt:
557                 if (nla_append(skb, sizeof(opt), &opt) < 0)
558                         goto nla_put_failure;
559         }
560
561         nla_nest_end(skb, parms);
562
563         return nla_nest_end(skb, opts);
564
565 nla_put_failure:
566         nla_nest_cancel(skb, opts);
567         return -EMSGSIZE;
568 }
569
570 static void gred_destroy(struct Qdisc *sch)
571 {
572         struct gred_sched *table = qdisc_priv(sch);
573         int i;
574
575         for (i = 0; i < table->DPs; i++) {
576                 if (table->tab[i])
577                         gred_destroy_vq(table->tab[i]);
578         }
579 }
580
581 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
582         .id             =       "gred",
583         .priv_size      =       sizeof(struct gred_sched),
584         .enqueue        =       gred_enqueue,
585         .dequeue        =       gred_dequeue,
586         .peek           =       qdisc_peek_head,
587         .drop           =       gred_drop,
588         .init           =       gred_init,
589         .reset          =       gred_reset,
590         .destroy        =       gred_destroy,
591         .change         =       gred_change,
592         .dump           =       gred_dump,
593         .owner          =       THIS_MODULE,
594 };
595
596 static int __init gred_module_init(void)
597 {
598         return register_qdisc(&gred_qdisc_ops);
599 }
600
601 static void __exit gred_module_exit(void)
602 {
603         unregister_qdisc(&gred_qdisc_ops);
604 }
605
606 module_init(gred_module_init)
607 module_exit(gred_module_exit)
608
609 MODULE_LICENSE("GPL");