Merge branch 'acpi-ec'
[linux-drm-fsl-dcu.git] / net / sched / sch_fq.c
1 /*
2  * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
3  *
4  *  Copyright (C) 2013 Eric Dumazet <edumazet@google.com>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  *  Meant to be mostly used for localy generated traffic :
12  *  Fast classification depends on skb->sk being set before reaching us.
13  *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14  *  All packets belonging to a socket are considered as a 'flow'.
15  *
16  *  Flows are dynamically allocated and stored in a hash table of RB trees
17  *  They are also part of one Round Robin 'queues' (new or old flows)
18  *
19  *  Burst avoidance (aka pacing) capability :
20  *
21  *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22  *  bunch of packets, and this packet scheduler adds delay between
23  *  packets to respect rate limitation.
24  *
25  *  enqueue() :
26  *   - lookup one RB tree (out of 1024 or more) to find the flow.
27  *     If non existent flow, create it, add it to the tree.
28  *     Add skb to the per flow list of skb (fifo).
29  *   - Use a special fifo for high prio packets
30  *
31  *  dequeue() : serves flows in Round Robin
32  *  Note : When a flow becomes empty, we do not immediately remove it from
33  *  rb trees, for performance reasons (its expected to send additional packets,
34  *  or SLAB cache will reuse socket for another flow)
35  */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
53 #include <net/sock.h>
54 #include <net/tcp_states.h>
55
56 /*
57  * Per flow structure, dynamically allocated
58  */
59 struct fq_flow {
60         struct sk_buff  *head;          /* list of skbs for this flow : first skb */
61         union {
62                 struct sk_buff *tail;   /* last skb in the list */
63                 unsigned long  age;     /* jiffies when flow was emptied, for gc */
64         };
65         struct rb_node  fq_node;        /* anchor in fq_root[] trees */
66         struct sock     *sk;
67         int             qlen;           /* number of packets in flow queue */
68         int             credit;
69         u32             socket_hash;    /* sk_hash */
70         struct fq_flow *next;           /* next pointer in RR lists, or &detached */
71
72         struct rb_node  rate_node;      /* anchor in q->delayed tree */
73         u64             time_next_packet;
74 };
75
76 struct fq_flow_head {
77         struct fq_flow *first;
78         struct fq_flow *last;
79 };
80
81 struct fq_sched_data {
82         struct fq_flow_head new_flows;
83
84         struct fq_flow_head old_flows;
85
86         struct rb_root  delayed;        /* for rate limited flows */
87         u64             time_next_delayed_flow;
88
89         struct fq_flow  internal;       /* for non classified or high prio packets */
90         u32             quantum;
91         u32             initial_quantum;
92         u32             flow_refill_delay;
93         u32             flow_max_rate;  /* optional max rate per flow */
94         u32             flow_plimit;    /* max packets per flow */
95         struct rb_root  *fq_root;
96         u8              rate_enable;
97         u8              fq_trees_log;
98
99         u32             flows;
100         u32             inactive_flows;
101         u32             throttled_flows;
102
103         u64             stat_gc_flows;
104         u64             stat_internal_packets;
105         u64             stat_tcp_retrans;
106         u64             stat_throttled;
107         u64             stat_flows_plimit;
108         u64             stat_pkts_too_long;
109         u64             stat_allocation_errors;
110         struct qdisc_watchdog watchdog;
111 };
112
113 /* special value to mark a detached flow (not on old/new list) */
114 static struct fq_flow detached, throttled;
115
116 static void fq_flow_set_detached(struct fq_flow *f)
117 {
118         f->next = &detached;
119         f->age = jiffies;
120 }
121
122 static bool fq_flow_is_detached(const struct fq_flow *f)
123 {
124         return f->next == &detached;
125 }
126
127 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
128 {
129         struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
130
131         while (*p) {
132                 struct fq_flow *aux;
133
134                 parent = *p;
135                 aux = container_of(parent, struct fq_flow, rate_node);
136                 if (f->time_next_packet >= aux->time_next_packet)
137                         p = &parent->rb_right;
138                 else
139                         p = &parent->rb_left;
140         }
141         rb_link_node(&f->rate_node, parent, p);
142         rb_insert_color(&f->rate_node, &q->delayed);
143         q->throttled_flows++;
144         q->stat_throttled++;
145
146         f->next = &throttled;
147         if (q->time_next_delayed_flow > f->time_next_packet)
148                 q->time_next_delayed_flow = f->time_next_packet;
149 }
150
151
152 static struct kmem_cache *fq_flow_cachep __read_mostly;
153
154 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
155 {
156         if (head->first)
157                 head->last->next = flow;
158         else
159                 head->first = flow;
160         head->last = flow;
161         flow->next = NULL;
162 }
163
164 /* limit number of collected flows per round */
165 #define FQ_GC_MAX 8
166 #define FQ_GC_AGE (3*HZ)
167
168 static bool fq_gc_candidate(const struct fq_flow *f)
169 {
170         return fq_flow_is_detached(f) &&
171                time_after(jiffies, f->age + FQ_GC_AGE);
172 }
173
174 static void fq_gc(struct fq_sched_data *q,
175                   struct rb_root *root,
176                   struct sock *sk)
177 {
178         struct fq_flow *f, *tofree[FQ_GC_MAX];
179         struct rb_node **p, *parent;
180         int fcnt = 0;
181
182         p = &root->rb_node;
183         parent = NULL;
184         while (*p) {
185                 parent = *p;
186
187                 f = container_of(parent, struct fq_flow, fq_node);
188                 if (f->sk == sk)
189                         break;
190
191                 if (fq_gc_candidate(f)) {
192                         tofree[fcnt++] = f;
193                         if (fcnt == FQ_GC_MAX)
194                                 break;
195                 }
196
197                 if (f->sk > sk)
198                         p = &parent->rb_right;
199                 else
200                         p = &parent->rb_left;
201         }
202
203         q->flows -= fcnt;
204         q->inactive_flows -= fcnt;
205         q->stat_gc_flows += fcnt;
206         while (fcnt) {
207                 struct fq_flow *f = tofree[--fcnt];
208
209                 rb_erase(&f->fq_node, root);
210                 kmem_cache_free(fq_flow_cachep, f);
211         }
212 }
213
214 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
215 {
216         struct rb_node **p, *parent;
217         struct sock *sk = skb->sk;
218         struct rb_root *root;
219         struct fq_flow *f;
220
221         /* warning: no starvation prevention... */
222         if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
223                 return &q->internal;
224
225         if (unlikely(!sk)) {
226                 /* By forcing low order bit to 1, we make sure to not
227                  * collide with a local flow (socket pointers are word aligned)
228                  */
229                 sk = (struct sock *)(skb_get_hash(skb) | 1L);
230         }
231
232         root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
233
234         if (q->flows >= (2U << q->fq_trees_log) &&
235             q->inactive_flows > q->flows/2)
236                 fq_gc(q, root, sk);
237
238         p = &root->rb_node;
239         parent = NULL;
240         while (*p) {
241                 parent = *p;
242
243                 f = container_of(parent, struct fq_flow, fq_node);
244                 if (f->sk == sk) {
245                         /* socket might have been reallocated, so check
246                          * if its sk_hash is the same.
247                          * It not, we need to refill credit with
248                          * initial quantum
249                          */
250                         if (unlikely(skb->sk &&
251                                      f->socket_hash != sk->sk_hash)) {
252                                 f->credit = q->initial_quantum;
253                                 f->socket_hash = sk->sk_hash;
254                                 f->time_next_packet = 0ULL;
255                         }
256                         return f;
257                 }
258                 if (f->sk > sk)
259                         p = &parent->rb_right;
260                 else
261                         p = &parent->rb_left;
262         }
263
264         f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
265         if (unlikely(!f)) {
266                 q->stat_allocation_errors++;
267                 return &q->internal;
268         }
269         fq_flow_set_detached(f);
270         f->sk = sk;
271         if (skb->sk)
272                 f->socket_hash = sk->sk_hash;
273         f->credit = q->initial_quantum;
274
275         rb_link_node(&f->fq_node, parent, p);
276         rb_insert_color(&f->fq_node, root);
277
278         q->flows++;
279         q->inactive_flows++;
280         return f;
281 }
282
283
284 /* remove one skb from head of flow queue */
285 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
286 {
287         struct sk_buff *skb = flow->head;
288
289         if (skb) {
290                 flow->head = skb->next;
291                 skb->next = NULL;
292                 flow->qlen--;
293                 qdisc_qstats_backlog_dec(sch, skb);
294                 sch->q.qlen--;
295         }
296         return skb;
297 }
298
299 /* We might add in the future detection of retransmits
300  * For the time being, just return false
301  */
302 static bool skb_is_retransmit(struct sk_buff *skb)
303 {
304         return false;
305 }
306
307 /* add skb to flow queue
308  * flow queue is a linked list, kind of FIFO, except for TCP retransmits
309  * We special case tcp retransmits to be transmitted before other packets.
310  * We rely on fact that TCP retransmits are unlikely, so we do not waste
311  * a separate queue or a pointer.
312  * head->  [retrans pkt 1]
313  *         [retrans pkt 2]
314  *         [ normal pkt 1]
315  *         [ normal pkt 2]
316  *         [ normal pkt 3]
317  * tail->  [ normal pkt 4]
318  */
319 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
320 {
321         struct sk_buff *prev, *head = flow->head;
322
323         skb->next = NULL;
324         if (!head) {
325                 flow->head = skb;
326                 flow->tail = skb;
327                 return;
328         }
329         if (likely(!skb_is_retransmit(skb))) {
330                 flow->tail->next = skb;
331                 flow->tail = skb;
332                 return;
333         }
334
335         /* This skb is a tcp retransmit,
336          * find the last retrans packet in the queue
337          */
338         prev = NULL;
339         while (skb_is_retransmit(head)) {
340                 prev = head;
341                 head = head->next;
342                 if (!head)
343                         break;
344         }
345         if (!prev) { /* no rtx packet in queue, become the new head */
346                 skb->next = flow->head;
347                 flow->head = skb;
348         } else {
349                 if (prev == flow->tail)
350                         flow->tail = skb;
351                 else
352                         skb->next = prev->next;
353                 prev->next = skb;
354         }
355 }
356
357 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
358 {
359         struct fq_sched_data *q = qdisc_priv(sch);
360         struct fq_flow *f;
361
362         if (unlikely(sch->q.qlen >= sch->limit))
363                 return qdisc_drop(skb, sch);
364
365         f = fq_classify(skb, q);
366         if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
367                 q->stat_flows_plimit++;
368                 return qdisc_drop(skb, sch);
369         }
370
371         f->qlen++;
372         if (skb_is_retransmit(skb))
373                 q->stat_tcp_retrans++;
374         qdisc_qstats_backlog_inc(sch, skb);
375         if (fq_flow_is_detached(f)) {
376                 fq_flow_add_tail(&q->new_flows, f);
377                 if (time_after(jiffies, f->age + q->flow_refill_delay))
378                         f->credit = max_t(u32, f->credit, q->quantum);
379                 q->inactive_flows--;
380         }
381
382         /* Note: this overwrites f->age */
383         flow_queue_add(f, skb);
384
385         if (unlikely(f == &q->internal)) {
386                 q->stat_internal_packets++;
387         }
388         sch->q.qlen++;
389
390         return NET_XMIT_SUCCESS;
391 }
392
393 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
394 {
395         struct rb_node *p;
396
397         if (q->time_next_delayed_flow > now)
398                 return;
399
400         q->time_next_delayed_flow = ~0ULL;
401         while ((p = rb_first(&q->delayed)) != NULL) {
402                 struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
403
404                 if (f->time_next_packet > now) {
405                         q->time_next_delayed_flow = f->time_next_packet;
406                         break;
407                 }
408                 rb_erase(p, &q->delayed);
409                 q->throttled_flows--;
410                 fq_flow_add_tail(&q->old_flows, f);
411         }
412 }
413
414 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
415 {
416         struct fq_sched_data *q = qdisc_priv(sch);
417         u64 now = ktime_get_ns();
418         struct fq_flow_head *head;
419         struct sk_buff *skb;
420         struct fq_flow *f;
421         u32 rate;
422
423         skb = fq_dequeue_head(sch, &q->internal);
424         if (skb)
425                 goto out;
426         fq_check_throttled(q, now);
427 begin:
428         head = &q->new_flows;
429         if (!head->first) {
430                 head = &q->old_flows;
431                 if (!head->first) {
432                         if (q->time_next_delayed_flow != ~0ULL)
433                                 qdisc_watchdog_schedule_ns(&q->watchdog,
434                                                            q->time_next_delayed_flow,
435                                                            false);
436                         return NULL;
437                 }
438         }
439         f = head->first;
440
441         if (f->credit <= 0) {
442                 f->credit += q->quantum;
443                 head->first = f->next;
444                 fq_flow_add_tail(&q->old_flows, f);
445                 goto begin;
446         }
447
448         if (unlikely(f->head && now < f->time_next_packet)) {
449                 head->first = f->next;
450                 fq_flow_set_throttled(q, f);
451                 goto begin;
452         }
453
454         skb = fq_dequeue_head(sch, f);
455         if (!skb) {
456                 head->first = f->next;
457                 /* force a pass through old_flows to prevent starvation */
458                 if ((head == &q->new_flows) && q->old_flows.first) {
459                         fq_flow_add_tail(&q->old_flows, f);
460                 } else {
461                         fq_flow_set_detached(f);
462                         q->inactive_flows++;
463                 }
464                 goto begin;
465         }
466         prefetch(&skb->end);
467         f->time_next_packet = now;
468         f->credit -= qdisc_pkt_len(skb);
469
470         if (f->credit > 0 || !q->rate_enable)
471                 goto out;
472
473         rate = q->flow_max_rate;
474         if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
475                 rate = min(skb->sk->sk_pacing_rate, rate);
476
477         if (rate != ~0U) {
478                 u32 plen = max(qdisc_pkt_len(skb), q->quantum);
479                 u64 len = (u64)plen * NSEC_PER_SEC;
480
481                 if (likely(rate))
482                         do_div(len, rate);
483                 /* Since socket rate can change later,
484                  * clamp the delay to 1 second.
485                  * Really, providers of too big packets should be fixed !
486                  */
487                 if (unlikely(len > NSEC_PER_SEC)) {
488                         len = NSEC_PER_SEC;
489                         q->stat_pkts_too_long++;
490                 }
491
492                 f->time_next_packet = now + len;
493         }
494 out:
495         qdisc_bstats_update(sch, skb);
496         return skb;
497 }
498
499 static void fq_reset(struct Qdisc *sch)
500 {
501         struct fq_sched_data *q = qdisc_priv(sch);
502         struct rb_root *root;
503         struct sk_buff *skb;
504         struct rb_node *p;
505         struct fq_flow *f;
506         unsigned int idx;
507
508         while ((skb = fq_dequeue_head(sch, &q->internal)) != NULL)
509                 kfree_skb(skb);
510
511         if (!q->fq_root)
512                 return;
513
514         for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
515                 root = &q->fq_root[idx];
516                 while ((p = rb_first(root)) != NULL) {
517                         f = container_of(p, struct fq_flow, fq_node);
518                         rb_erase(p, root);
519
520                         while ((skb = fq_dequeue_head(sch, f)) != NULL)
521                                 kfree_skb(skb);
522
523                         kmem_cache_free(fq_flow_cachep, f);
524                 }
525         }
526         q->new_flows.first      = NULL;
527         q->old_flows.first      = NULL;
528         q->delayed              = RB_ROOT;
529         q->flows                = 0;
530         q->inactive_flows       = 0;
531         q->throttled_flows      = 0;
532 }
533
534 static void fq_rehash(struct fq_sched_data *q,
535                       struct rb_root *old_array, u32 old_log,
536                       struct rb_root *new_array, u32 new_log)
537 {
538         struct rb_node *op, **np, *parent;
539         struct rb_root *oroot, *nroot;
540         struct fq_flow *of, *nf;
541         int fcnt = 0;
542         u32 idx;
543
544         for (idx = 0; idx < (1U << old_log); idx++) {
545                 oroot = &old_array[idx];
546                 while ((op = rb_first(oroot)) != NULL) {
547                         rb_erase(op, oroot);
548                         of = container_of(op, struct fq_flow, fq_node);
549                         if (fq_gc_candidate(of)) {
550                                 fcnt++;
551                                 kmem_cache_free(fq_flow_cachep, of);
552                                 continue;
553                         }
554                         nroot = &new_array[hash_32((u32)(long)of->sk, new_log)];
555
556                         np = &nroot->rb_node;
557                         parent = NULL;
558                         while (*np) {
559                                 parent = *np;
560
561                                 nf = container_of(parent, struct fq_flow, fq_node);
562                                 BUG_ON(nf->sk == of->sk);
563
564                                 if (nf->sk > of->sk)
565                                         np = &parent->rb_right;
566                                 else
567                                         np = &parent->rb_left;
568                         }
569
570                         rb_link_node(&of->fq_node, parent, np);
571                         rb_insert_color(&of->fq_node, nroot);
572                 }
573         }
574         q->flows -= fcnt;
575         q->inactive_flows -= fcnt;
576         q->stat_gc_flows += fcnt;
577 }
578
579 static void *fq_alloc_node(size_t sz, int node)
580 {
581         void *ptr;
582
583         ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
584         if (!ptr)
585                 ptr = vmalloc_node(sz, node);
586         return ptr;
587 }
588
589 static void fq_free(void *addr)
590 {
591         kvfree(addr);
592 }
593
594 static int fq_resize(struct Qdisc *sch, u32 log)
595 {
596         struct fq_sched_data *q = qdisc_priv(sch);
597         struct rb_root *array;
598         void *old_fq_root;
599         u32 idx;
600
601         if (q->fq_root && log == q->fq_trees_log)
602                 return 0;
603
604         /* If XPS was setup, we can allocate memory on right NUMA node */
605         array = fq_alloc_node(sizeof(struct rb_root) << log,
606                               netdev_queue_numa_node_read(sch->dev_queue));
607         if (!array)
608                 return -ENOMEM;
609
610         for (idx = 0; idx < (1U << log); idx++)
611                 array[idx] = RB_ROOT;
612
613         sch_tree_lock(sch);
614
615         old_fq_root = q->fq_root;
616         if (old_fq_root)
617                 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
618
619         q->fq_root = array;
620         q->fq_trees_log = log;
621
622         sch_tree_unlock(sch);
623
624         fq_free(old_fq_root);
625
626         return 0;
627 }
628
629 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
630         [TCA_FQ_PLIMIT]                 = { .type = NLA_U32 },
631         [TCA_FQ_FLOW_PLIMIT]            = { .type = NLA_U32 },
632         [TCA_FQ_QUANTUM]                = { .type = NLA_U32 },
633         [TCA_FQ_INITIAL_QUANTUM]        = { .type = NLA_U32 },
634         [TCA_FQ_RATE_ENABLE]            = { .type = NLA_U32 },
635         [TCA_FQ_FLOW_DEFAULT_RATE]      = { .type = NLA_U32 },
636         [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
637         [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
638         [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
639 };
640
641 static int fq_change(struct Qdisc *sch, struct nlattr *opt)
642 {
643         struct fq_sched_data *q = qdisc_priv(sch);
644         struct nlattr *tb[TCA_FQ_MAX + 1];
645         int err, drop_count = 0;
646         u32 fq_log;
647
648         if (!opt)
649                 return -EINVAL;
650
651         err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
652         if (err < 0)
653                 return err;
654
655         sch_tree_lock(sch);
656
657         fq_log = q->fq_trees_log;
658
659         if (tb[TCA_FQ_BUCKETS_LOG]) {
660                 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
661
662                 if (nval >= 1 && nval <= ilog2(256*1024))
663                         fq_log = nval;
664                 else
665                         err = -EINVAL;
666         }
667         if (tb[TCA_FQ_PLIMIT])
668                 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
669
670         if (tb[TCA_FQ_FLOW_PLIMIT])
671                 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
672
673         if (tb[TCA_FQ_QUANTUM]) {
674                 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
675
676                 if (quantum > 0)
677                         q->quantum = quantum;
678                 else
679                         err = -EINVAL;
680         }
681
682         if (tb[TCA_FQ_INITIAL_QUANTUM])
683                 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
684
685         if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
686                 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
687                                     nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
688
689         if (tb[TCA_FQ_FLOW_MAX_RATE])
690                 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
691
692         if (tb[TCA_FQ_RATE_ENABLE]) {
693                 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
694
695                 if (enable <= 1)
696                         q->rate_enable = enable;
697                 else
698                         err = -EINVAL;
699         }
700
701         if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
702                 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
703
704                 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
705         }
706
707         if (!err) {
708                 sch_tree_unlock(sch);
709                 err = fq_resize(sch, fq_log);
710                 sch_tree_lock(sch);
711         }
712         while (sch->q.qlen > sch->limit) {
713                 struct sk_buff *skb = fq_dequeue(sch);
714
715                 if (!skb)
716                         break;
717                 kfree_skb(skb);
718                 drop_count++;
719         }
720         qdisc_tree_decrease_qlen(sch, drop_count);
721
722         sch_tree_unlock(sch);
723         return err;
724 }
725
726 static void fq_destroy(struct Qdisc *sch)
727 {
728         struct fq_sched_data *q = qdisc_priv(sch);
729
730         fq_reset(sch);
731         fq_free(q->fq_root);
732         qdisc_watchdog_cancel(&q->watchdog);
733 }
734
735 static int fq_init(struct Qdisc *sch, struct nlattr *opt)
736 {
737         struct fq_sched_data *q = qdisc_priv(sch);
738         int err;
739
740         sch->limit              = 10000;
741         q->flow_plimit          = 100;
742         q->quantum              = 2 * psched_mtu(qdisc_dev(sch));
743         q->initial_quantum      = 10 * psched_mtu(qdisc_dev(sch));
744         q->flow_refill_delay    = msecs_to_jiffies(40);
745         q->flow_max_rate        = ~0U;
746         q->rate_enable          = 1;
747         q->new_flows.first      = NULL;
748         q->old_flows.first      = NULL;
749         q->delayed              = RB_ROOT;
750         q->fq_root              = NULL;
751         q->fq_trees_log         = ilog2(1024);
752         qdisc_watchdog_init(&q->watchdog, sch);
753
754         if (opt)
755                 err = fq_change(sch, opt);
756         else
757                 err = fq_resize(sch, q->fq_trees_log);
758
759         return err;
760 }
761
762 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
763 {
764         struct fq_sched_data *q = qdisc_priv(sch);
765         struct nlattr *opts;
766
767         opts = nla_nest_start(skb, TCA_OPTIONS);
768         if (opts == NULL)
769                 goto nla_put_failure;
770
771         /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
772
773         if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
774             nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
775             nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
776             nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
777             nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
778             nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
779             nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
780                         jiffies_to_usecs(q->flow_refill_delay)) ||
781             nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
782                 goto nla_put_failure;
783
784         return nla_nest_end(skb, opts);
785
786 nla_put_failure:
787         return -1;
788 }
789
790 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
791 {
792         struct fq_sched_data *q = qdisc_priv(sch);
793         u64 now = ktime_get_ns();
794         struct tc_fq_qd_stats st = {
795                 .gc_flows               = q->stat_gc_flows,
796                 .highprio_packets       = q->stat_internal_packets,
797                 .tcp_retrans            = q->stat_tcp_retrans,
798                 .throttled              = q->stat_throttled,
799                 .flows_plimit           = q->stat_flows_plimit,
800                 .pkts_too_long          = q->stat_pkts_too_long,
801                 .allocation_errors      = q->stat_allocation_errors,
802                 .flows                  = q->flows,
803                 .inactive_flows         = q->inactive_flows,
804                 .throttled_flows        = q->throttled_flows,
805                 .time_next_delayed_flow = q->time_next_delayed_flow - now,
806         };
807
808         return gnet_stats_copy_app(d, &st, sizeof(st));
809 }
810
811 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
812         .id             =       "fq",
813         .priv_size      =       sizeof(struct fq_sched_data),
814
815         .enqueue        =       fq_enqueue,
816         .dequeue        =       fq_dequeue,
817         .peek           =       qdisc_peek_dequeued,
818         .init           =       fq_init,
819         .reset          =       fq_reset,
820         .destroy        =       fq_destroy,
821         .change         =       fq_change,
822         .dump           =       fq_dump,
823         .dump_stats     =       fq_dump_stats,
824         .owner          =       THIS_MODULE,
825 };
826
827 static int __init fq_module_init(void)
828 {
829         int ret;
830
831         fq_flow_cachep = kmem_cache_create("fq_flow_cache",
832                                            sizeof(struct fq_flow),
833                                            0, 0, NULL);
834         if (!fq_flow_cachep)
835                 return -ENOMEM;
836
837         ret = register_qdisc(&fq_qdisc_ops);
838         if (ret)
839                 kmem_cache_destroy(fq_flow_cachep);
840         return ret;
841 }
842
843 static void __exit fq_module_exit(void)
844 {
845         unregister_qdisc(&fq_qdisc_ops);
846         kmem_cache_destroy(fq_flow_cachep);
847 }
848
849 module_init(fq_module_init)
850 module_exit(fq_module_exit)
851 MODULE_AUTHOR("Eric Dumazet");
852 MODULE_LICENSE("GPL");