initramfs: fix initramfs size calculation
[linux-drm-fsl-dcu.git] / net / sched / sch_generic.c
1 /*
2  * net/sched/sch_generic.c      Generic packet scheduler routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11  *              - Ingress support
12  */
13
14 #include <linux/bitops.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/init.h>
25 #include <linux/rcupdate.h>
26 #include <linux/list.h>
27 #include <linux/slab.h>
28 #include <net/pkt_sched.h>
29 #include <net/dst.h>
30
31 /* Main transmission queue. */
32
33 /* Modifications to data participating in scheduling must be protected with
34  * qdisc_lock(qdisc) spinlock.
35  *
36  * The idea is the following:
37  * - enqueue, dequeue are serialized via qdisc root lock
38  * - ingress filtering is also serialized via qdisc root lock
39  * - updates to tree and tree walking are only done under the rtnl mutex.
40  */
41
42 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
43 {
44         skb_dst_force(skb);
45         q->gso_skb = skb;
46         q->qstats.requeues++;
47         q->q.qlen++;    /* it's still part of the queue */
48         __netif_schedule(q);
49
50         return 0;
51 }
52
53 static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
54 {
55         struct sk_buff *skb = q->gso_skb;
56
57         if (unlikely(skb)) {
58                 struct net_device *dev = qdisc_dev(q);
59                 struct netdev_queue *txq;
60
61                 /* check the reason of requeuing without tx lock first */
62                 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63                 if (!netif_tx_queue_stopped(txq) &&
64                     !netif_tx_queue_frozen(txq)) {
65                         q->gso_skb = NULL;
66                         q->q.qlen--;
67                 } else
68                         skb = NULL;
69         } else {
70                 skb = q->dequeue(q);
71         }
72
73         return skb;
74 }
75
76 static inline int handle_dev_cpu_collision(struct sk_buff *skb,
77                                            struct netdev_queue *dev_queue,
78                                            struct Qdisc *q)
79 {
80         int ret;
81
82         if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
83                 /*
84                  * Same CPU holding the lock. It may be a transient
85                  * configuration error, when hard_start_xmit() recurses. We
86                  * detect it by checking xmit owner and drop the packet when
87                  * deadloop is detected. Return OK to try the next skb.
88                  */
89                 kfree_skb(skb);
90                 if (net_ratelimit())
91                         printk(KERN_WARNING "Dead loop on netdevice %s, "
92                                "fix it urgently!\n", dev_queue->dev->name);
93                 ret = qdisc_qlen(q);
94         } else {
95                 /*
96                  * Another cpu is holding lock, requeue & delay xmits for
97                  * some time.
98                  */
99                 __get_cpu_var(softnet_data).cpu_collision++;
100                 ret = dev_requeue_skb(skb, q);
101         }
102
103         return ret;
104 }
105
106 /*
107  * Transmit one skb, and handle the return status as required. Holding the
108  * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
109  * function.
110  *
111  * Returns to the caller:
112  *                              0  - queue is empty or throttled.
113  *                              >0 - queue is not empty.
114  */
115 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
116                     struct net_device *dev, struct netdev_queue *txq,
117                     spinlock_t *root_lock)
118 {
119         int ret = NETDEV_TX_BUSY;
120
121         /* And release qdisc */
122         spin_unlock(root_lock);
123
124         HARD_TX_LOCK(dev, txq, smp_processor_id());
125         if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
126                 ret = dev_hard_start_xmit(skb, dev, txq);
127
128         HARD_TX_UNLOCK(dev, txq);
129
130         spin_lock(root_lock);
131
132         if (dev_xmit_complete(ret)) {
133                 /* Driver sent out skb successfully or skb was consumed */
134                 ret = qdisc_qlen(q);
135         } else if (ret == NETDEV_TX_LOCKED) {
136                 /* Driver try lock failed */
137                 ret = handle_dev_cpu_collision(skb, txq, q);
138         } else {
139                 /* Driver returned NETDEV_TX_BUSY - requeue skb */
140                 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
141                         printk(KERN_WARNING "BUG %s code %d qlen %d\n",
142                                dev->name, ret, q->q.qlen);
143
144                 ret = dev_requeue_skb(skb, q);
145         }
146
147         if (ret && (netif_tx_queue_stopped(txq) ||
148                     netif_tx_queue_frozen(txq)))
149                 ret = 0;
150
151         return ret;
152 }
153
154 /*
155  * NOTE: Called under qdisc_lock(q) with locally disabled BH.
156  *
157  * __QDISC_STATE_RUNNING guarantees only one CPU can process
158  * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
159  * this queue.
160  *
161  *  netif_tx_lock serializes accesses to device driver.
162  *
163  *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
164  *  if one is grabbed, another must be free.
165  *
166  * Note, that this procedure can be called by a watchdog timer
167  *
168  * Returns to the caller:
169  *                              0  - queue is empty or throttled.
170  *                              >0 - queue is not empty.
171  *
172  */
173 static inline int qdisc_restart(struct Qdisc *q)
174 {
175         struct netdev_queue *txq;
176         struct net_device *dev;
177         spinlock_t *root_lock;
178         struct sk_buff *skb;
179
180         /* Dequeue packet */
181         skb = dequeue_skb(q);
182         if (unlikely(!skb))
183                 return 0;
184         WARN_ON_ONCE(skb_dst_is_noref(skb));
185         root_lock = qdisc_lock(q);
186         dev = qdisc_dev(q);
187         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
188
189         return sch_direct_xmit(skb, q, dev, txq, root_lock);
190 }
191
192 void __qdisc_run(struct Qdisc *q)
193 {
194         unsigned long start_time = jiffies;
195
196         while (qdisc_restart(q)) {
197                 /*
198                  * Postpone processing if
199                  * 1. another process needs the CPU;
200                  * 2. we've been doing it for too long.
201                  */
202                 if (need_resched() || jiffies != start_time) {
203                         __netif_schedule(q);
204                         break;
205                 }
206         }
207
208         clear_bit(__QDISC_STATE_RUNNING, &q->state);
209 }
210
211 unsigned long dev_trans_start(struct net_device *dev)
212 {
213         unsigned long val, res = dev->trans_start;
214         unsigned int i;
215
216         for (i = 0; i < dev->num_tx_queues; i++) {
217                 val = netdev_get_tx_queue(dev, i)->trans_start;
218                 if (val && time_after(val, res))
219                         res = val;
220         }
221         dev->trans_start = res;
222         return res;
223 }
224 EXPORT_SYMBOL(dev_trans_start);
225
226 static void dev_watchdog(unsigned long arg)
227 {
228         struct net_device *dev = (struct net_device *)arg;
229
230         netif_tx_lock(dev);
231         if (!qdisc_tx_is_noop(dev)) {
232                 if (netif_device_present(dev) &&
233                     netif_running(dev) &&
234                     netif_carrier_ok(dev)) {
235                         int some_queue_timedout = 0;
236                         unsigned int i;
237                         unsigned long trans_start;
238
239                         for (i = 0; i < dev->num_tx_queues; i++) {
240                                 struct netdev_queue *txq;
241
242                                 txq = netdev_get_tx_queue(dev, i);
243                                 /*
244                                  * old device drivers set dev->trans_start
245                                  */
246                                 trans_start = txq->trans_start ? : dev->trans_start;
247                                 if (netif_tx_queue_stopped(txq) &&
248                                     time_after(jiffies, (trans_start +
249                                                          dev->watchdog_timeo))) {
250                                         some_queue_timedout = 1;
251                                         break;
252                                 }
253                         }
254
255                         if (some_queue_timedout) {
256                                 char drivername[64];
257                                 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
258                                        dev->name, netdev_drivername(dev, drivername, 64), i);
259                                 dev->netdev_ops->ndo_tx_timeout(dev);
260                         }
261                         if (!mod_timer(&dev->watchdog_timer,
262                                        round_jiffies(jiffies +
263                                                      dev->watchdog_timeo)))
264                                 dev_hold(dev);
265                 }
266         }
267         netif_tx_unlock(dev);
268
269         dev_put(dev);
270 }
271
272 void __netdev_watchdog_up(struct net_device *dev)
273 {
274         if (dev->netdev_ops->ndo_tx_timeout) {
275                 if (dev->watchdog_timeo <= 0)
276                         dev->watchdog_timeo = 5*HZ;
277                 if (!mod_timer(&dev->watchdog_timer,
278                                round_jiffies(jiffies + dev->watchdog_timeo)))
279                         dev_hold(dev);
280         }
281 }
282
283 static void dev_watchdog_up(struct net_device *dev)
284 {
285         __netdev_watchdog_up(dev);
286 }
287
288 static void dev_watchdog_down(struct net_device *dev)
289 {
290         netif_tx_lock_bh(dev);
291         if (del_timer(&dev->watchdog_timer))
292                 dev_put(dev);
293         netif_tx_unlock_bh(dev);
294 }
295
296 /**
297  *      netif_carrier_on - set carrier
298  *      @dev: network device
299  *
300  * Device has detected that carrier.
301  */
302 void netif_carrier_on(struct net_device *dev)
303 {
304         if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
305                 if (dev->reg_state == NETREG_UNINITIALIZED)
306                         return;
307                 linkwatch_fire_event(dev);
308                 if (netif_running(dev))
309                         __netdev_watchdog_up(dev);
310         }
311 }
312 EXPORT_SYMBOL(netif_carrier_on);
313
314 /**
315  *      netif_carrier_off - clear carrier
316  *      @dev: network device
317  *
318  * Device has detected loss of carrier.
319  */
320 void netif_carrier_off(struct net_device *dev)
321 {
322         if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
323                 if (dev->reg_state == NETREG_UNINITIALIZED)
324                         return;
325                 linkwatch_fire_event(dev);
326         }
327 }
328 EXPORT_SYMBOL(netif_carrier_off);
329
330 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
331    under all circumstances. It is difficult to invent anything faster or
332    cheaper.
333  */
334
335 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
336 {
337         kfree_skb(skb);
338         return NET_XMIT_CN;
339 }
340
341 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
342 {
343         return NULL;
344 }
345
346 struct Qdisc_ops noop_qdisc_ops __read_mostly = {
347         .id             =       "noop",
348         .priv_size      =       0,
349         .enqueue        =       noop_enqueue,
350         .dequeue        =       noop_dequeue,
351         .peek           =       noop_dequeue,
352         .owner          =       THIS_MODULE,
353 };
354
355 static struct netdev_queue noop_netdev_queue = {
356         .qdisc          =       &noop_qdisc,
357         .qdisc_sleeping =       &noop_qdisc,
358 };
359
360 struct Qdisc noop_qdisc = {
361         .enqueue        =       noop_enqueue,
362         .dequeue        =       noop_dequeue,
363         .flags          =       TCQ_F_BUILTIN,
364         .ops            =       &noop_qdisc_ops,
365         .list           =       LIST_HEAD_INIT(noop_qdisc.list),
366         .q.lock         =       __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
367         .dev_queue      =       &noop_netdev_queue,
368 };
369 EXPORT_SYMBOL(noop_qdisc);
370
371 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
372         .id             =       "noqueue",
373         .priv_size      =       0,
374         .enqueue        =       noop_enqueue,
375         .dequeue        =       noop_dequeue,
376         .peek           =       noop_dequeue,
377         .owner          =       THIS_MODULE,
378 };
379
380 static struct Qdisc noqueue_qdisc;
381 static struct netdev_queue noqueue_netdev_queue = {
382         .qdisc          =       &noqueue_qdisc,
383         .qdisc_sleeping =       &noqueue_qdisc,
384 };
385
386 static struct Qdisc noqueue_qdisc = {
387         .enqueue        =       NULL,
388         .dequeue        =       noop_dequeue,
389         .flags          =       TCQ_F_BUILTIN,
390         .ops            =       &noqueue_qdisc_ops,
391         .list           =       LIST_HEAD_INIT(noqueue_qdisc.list),
392         .q.lock         =       __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
393         .dev_queue      =       &noqueue_netdev_queue,
394 };
395
396
397 static const u8 prio2band[TC_PRIO_MAX+1] =
398         { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
399
400 /* 3-band FIFO queue: old style, but should be a bit faster than
401    generic prio+fifo combination.
402  */
403
404 #define PFIFO_FAST_BANDS 3
405
406 /*
407  * Private data for a pfifo_fast scheduler containing:
408  *      - queues for the three band
409  *      - bitmap indicating which of the bands contain skbs
410  */
411 struct pfifo_fast_priv {
412         u32 bitmap;
413         struct sk_buff_head q[PFIFO_FAST_BANDS];
414 };
415
416 /*
417  * Convert a bitmap to the first band number where an skb is queued, where:
418  *      bitmap=0 means there are no skbs on any band.
419  *      bitmap=1 means there is an skb on band 0.
420  *      bitmap=7 means there are skbs on all 3 bands, etc.
421  */
422 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
423
424 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
425                                              int band)
426 {
427         return priv->q + band;
428 }
429
430 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
431 {
432         if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
433                 int band = prio2band[skb->priority & TC_PRIO_MAX];
434                 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
435                 struct sk_buff_head *list = band2list(priv, band);
436
437                 priv->bitmap |= (1 << band);
438                 qdisc->q.qlen++;
439                 return __qdisc_enqueue_tail(skb, qdisc, list);
440         }
441
442         return qdisc_drop(skb, qdisc);
443 }
444
445 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
446 {
447         struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
448         int band = bitmap2band[priv->bitmap];
449
450         if (likely(band >= 0)) {
451                 struct sk_buff_head *list = band2list(priv, band);
452                 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
453
454                 qdisc->q.qlen--;
455                 if (skb_queue_empty(list))
456                         priv->bitmap &= ~(1 << band);
457
458                 return skb;
459         }
460
461         return NULL;
462 }
463
464 static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
465 {
466         struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
467         int band = bitmap2band[priv->bitmap];
468
469         if (band >= 0) {
470                 struct sk_buff_head *list = band2list(priv, band);
471
472                 return skb_peek(list);
473         }
474
475         return NULL;
476 }
477
478 static void pfifo_fast_reset(struct Qdisc* qdisc)
479 {
480         int prio;
481         struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
482
483         for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
484                 __qdisc_reset_queue(qdisc, band2list(priv, prio));
485
486         priv->bitmap = 0;
487         qdisc->qstats.backlog = 0;
488         qdisc->q.qlen = 0;
489 }
490
491 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
492 {
493         struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
494
495         memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
496         NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
497         return skb->len;
498
499 nla_put_failure:
500         return -1;
501 }
502
503 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
504 {
505         int prio;
506         struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
507
508         for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
509                 skb_queue_head_init(band2list(priv, prio));
510
511         return 0;
512 }
513
514 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
515         .id             =       "pfifo_fast",
516         .priv_size      =       sizeof(struct pfifo_fast_priv),
517         .enqueue        =       pfifo_fast_enqueue,
518         .dequeue        =       pfifo_fast_dequeue,
519         .peek           =       pfifo_fast_peek,
520         .init           =       pfifo_fast_init,
521         .reset          =       pfifo_fast_reset,
522         .dump           =       pfifo_fast_dump,
523         .owner          =       THIS_MODULE,
524 };
525
526 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
527                           struct Qdisc_ops *ops)
528 {
529         void *p;
530         struct Qdisc *sch;
531         unsigned int size;
532         int err = -ENOBUFS;
533
534         /* ensure that the Qdisc and the private data are 64-byte aligned */
535         size = QDISC_ALIGN(sizeof(*sch));
536         size += ops->priv_size + (QDISC_ALIGNTO - 1);
537
538         p = kzalloc(size, GFP_KERNEL);
539         if (!p)
540                 goto errout;
541         sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
542         sch->padded = (char *) sch - (char *) p;
543
544         INIT_LIST_HEAD(&sch->list);
545         skb_queue_head_init(&sch->q);
546         sch->ops = ops;
547         sch->enqueue = ops->enqueue;
548         sch->dequeue = ops->dequeue;
549         sch->dev_queue = dev_queue;
550         dev_hold(qdisc_dev(sch));
551         atomic_set(&sch->refcnt, 1);
552
553         return sch;
554 errout:
555         return ERR_PTR(err);
556 }
557
558 struct Qdisc * qdisc_create_dflt(struct net_device *dev,
559                                  struct netdev_queue *dev_queue,
560                                  struct Qdisc_ops *ops,
561                                  unsigned int parentid)
562 {
563         struct Qdisc *sch;
564
565         sch = qdisc_alloc(dev_queue, ops);
566         if (IS_ERR(sch))
567                 goto errout;
568         sch->parent = parentid;
569
570         if (!ops->init || ops->init(sch, NULL) == 0)
571                 return sch;
572
573         qdisc_destroy(sch);
574 errout:
575         return NULL;
576 }
577 EXPORT_SYMBOL(qdisc_create_dflt);
578
579 /* Under qdisc_lock(qdisc) and BH! */
580
581 void qdisc_reset(struct Qdisc *qdisc)
582 {
583         const struct Qdisc_ops *ops = qdisc->ops;
584
585         if (ops->reset)
586                 ops->reset(qdisc);
587
588         if (qdisc->gso_skb) {
589                 kfree_skb(qdisc->gso_skb);
590                 qdisc->gso_skb = NULL;
591                 qdisc->q.qlen = 0;
592         }
593 }
594 EXPORT_SYMBOL(qdisc_reset);
595
596 static void qdisc_rcu_free(struct rcu_head *head)
597 {
598         struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
599
600         kfree((char *) qdisc - qdisc->padded);
601 }
602
603 void qdisc_destroy(struct Qdisc *qdisc)
604 {
605         const struct Qdisc_ops  *ops = qdisc->ops;
606
607         if (qdisc->flags & TCQ_F_BUILTIN ||
608             !atomic_dec_and_test(&qdisc->refcnt))
609                 return;
610
611 #ifdef CONFIG_NET_SCHED
612         qdisc_list_del(qdisc);
613
614         qdisc_put_stab(qdisc->stab);
615 #endif
616         gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
617         if (ops->reset)
618                 ops->reset(qdisc);
619         if (ops->destroy)
620                 ops->destroy(qdisc);
621
622         module_put(ops->owner);
623         dev_put(qdisc_dev(qdisc));
624
625         kfree_skb(qdisc->gso_skb);
626         /*
627          * gen_estimator est_timer() might access qdisc->q.lock,
628          * wait a RCU grace period before freeing qdisc.
629          */
630         call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
631 }
632 EXPORT_SYMBOL(qdisc_destroy);
633
634 /* Attach toplevel qdisc to device queue. */
635 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
636                               struct Qdisc *qdisc)
637 {
638         struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
639         spinlock_t *root_lock;
640
641         root_lock = qdisc_lock(oqdisc);
642         spin_lock_bh(root_lock);
643
644         /* Prune old scheduler */
645         if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
646                 qdisc_reset(oqdisc);
647
648         /* ... and graft new one */
649         if (qdisc == NULL)
650                 qdisc = &noop_qdisc;
651         dev_queue->qdisc_sleeping = qdisc;
652         rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
653
654         spin_unlock_bh(root_lock);
655
656         return oqdisc;
657 }
658
659 static void attach_one_default_qdisc(struct net_device *dev,
660                                      struct netdev_queue *dev_queue,
661                                      void *_unused)
662 {
663         struct Qdisc *qdisc;
664
665         if (dev->tx_queue_len) {
666                 qdisc = qdisc_create_dflt(dev, dev_queue,
667                                           &pfifo_fast_ops, TC_H_ROOT);
668                 if (!qdisc) {
669                         printk(KERN_INFO "%s: activation failed\n", dev->name);
670                         return;
671                 }
672
673                 /* Can by-pass the queue discipline for default qdisc */
674                 qdisc->flags |= TCQ_F_CAN_BYPASS;
675         } else {
676                 qdisc =  &noqueue_qdisc;
677         }
678         dev_queue->qdisc_sleeping = qdisc;
679 }
680
681 static void attach_default_qdiscs(struct net_device *dev)
682 {
683         struct netdev_queue *txq;
684         struct Qdisc *qdisc;
685
686         txq = netdev_get_tx_queue(dev, 0);
687
688         if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
689                 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
690                 dev->qdisc = txq->qdisc_sleeping;
691                 atomic_inc(&dev->qdisc->refcnt);
692         } else {
693                 qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT);
694                 if (qdisc) {
695                         qdisc->ops->attach(qdisc);
696                         dev->qdisc = qdisc;
697                 }
698         }
699 }
700
701 static void transition_one_qdisc(struct net_device *dev,
702                                  struct netdev_queue *dev_queue,
703                                  void *_need_watchdog)
704 {
705         struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
706         int *need_watchdog_p = _need_watchdog;
707
708         if (!(new_qdisc->flags & TCQ_F_BUILTIN))
709                 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
710
711         rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
712         if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
713                 dev_queue->trans_start = 0;
714                 *need_watchdog_p = 1;
715         }
716 }
717
718 void dev_activate(struct net_device *dev)
719 {
720         int need_watchdog;
721
722         /* No queueing discipline is attached to device;
723            create default one i.e. pfifo_fast for devices,
724            which need queueing and noqueue_qdisc for
725            virtual interfaces
726          */
727
728         if (dev->qdisc == &noop_qdisc)
729                 attach_default_qdiscs(dev);
730
731         if (!netif_carrier_ok(dev))
732                 /* Delay activation until next carrier-on event */
733                 return;
734
735         need_watchdog = 0;
736         netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
737         transition_one_qdisc(dev, &dev->rx_queue, NULL);
738
739         if (need_watchdog) {
740                 dev->trans_start = jiffies;
741                 dev_watchdog_up(dev);
742         }
743 }
744
745 static void dev_deactivate_queue(struct net_device *dev,
746                                  struct netdev_queue *dev_queue,
747                                  void *_qdisc_default)
748 {
749         struct Qdisc *qdisc_default = _qdisc_default;
750         struct Qdisc *qdisc;
751
752         qdisc = dev_queue->qdisc;
753         if (qdisc) {
754                 spin_lock_bh(qdisc_lock(qdisc));
755
756                 if (!(qdisc->flags & TCQ_F_BUILTIN))
757                         set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
758
759                 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
760                 qdisc_reset(qdisc);
761
762                 spin_unlock_bh(qdisc_lock(qdisc));
763         }
764 }
765
766 static bool some_qdisc_is_busy(struct net_device *dev)
767 {
768         unsigned int i;
769
770         for (i = 0; i < dev->num_tx_queues; i++) {
771                 struct netdev_queue *dev_queue;
772                 spinlock_t *root_lock;
773                 struct Qdisc *q;
774                 int val;
775
776                 dev_queue = netdev_get_tx_queue(dev, i);
777                 q = dev_queue->qdisc_sleeping;
778                 root_lock = qdisc_lock(q);
779
780                 spin_lock_bh(root_lock);
781
782                 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
783                        test_bit(__QDISC_STATE_SCHED, &q->state));
784
785                 spin_unlock_bh(root_lock);
786
787                 if (val)
788                         return true;
789         }
790         return false;
791 }
792
793 void dev_deactivate(struct net_device *dev)
794 {
795         netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
796         dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
797
798         dev_watchdog_down(dev);
799
800         /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
801         synchronize_rcu();
802
803         /* Wait for outstanding qdisc_run calls. */
804         while (some_qdisc_is_busy(dev))
805                 yield();
806 }
807
808 static void dev_init_scheduler_queue(struct net_device *dev,
809                                      struct netdev_queue *dev_queue,
810                                      void *_qdisc)
811 {
812         struct Qdisc *qdisc = _qdisc;
813
814         dev_queue->qdisc = qdisc;
815         dev_queue->qdisc_sleeping = qdisc;
816 }
817
818 void dev_init_scheduler(struct net_device *dev)
819 {
820         dev->qdisc = &noop_qdisc;
821         netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
822         dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
823
824         setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
825 }
826
827 static void shutdown_scheduler_queue(struct net_device *dev,
828                                      struct netdev_queue *dev_queue,
829                                      void *_qdisc_default)
830 {
831         struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
832         struct Qdisc *qdisc_default = _qdisc_default;
833
834         if (qdisc) {
835                 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
836                 dev_queue->qdisc_sleeping = qdisc_default;
837
838                 qdisc_destroy(qdisc);
839         }
840 }
841
842 void dev_shutdown(struct net_device *dev)
843 {
844         netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
845         shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
846         qdisc_destroy(dev->qdisc);
847         dev->qdisc = &noop_qdisc;
848
849         WARN_ON(timer_pending(&dev->watchdog_timer));
850 }