2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/sch_generic.h>
23 #include <net/pkt_sched.h>
27 /* Simple Token Bucket Filter.
28 =======================================
38 A data flow obeys TBF with rate R and depth B, if for any
39 time interval t_i...t_f the number of transmitted bits
40 does not exceed B + R*(t_f-t_i).
42 Packetized version of this definition:
43 The sequence of packets of sizes s_i served at moments t_i
44 obeys TBF, if for any i<=k:
46 s_i+....+s_k <= B + R*(t_k - t_i)
51 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
53 N(t+delta) = min{B/R, N(t) + delta}
55 If the first packet in queue has length S, it may be
56 transmitted only at the time t_* when S/R <= N(t_*),
57 and in this case N(t) jumps:
59 N(t_* + 0) = N(t_* - 0) - S/R.
63 Actually, QoS requires two TBF to be applied to a data stream.
64 One of them controls steady state burst size, another
65 one with rate P (peak rate) and depth M (equal to link MTU)
66 limits bursts at a smaller time scale.
68 It is easy to see that P>R, and B>M. If P is infinity, this double
69 TBF is equivalent to a single one.
71 When TBF works in reshaping mode, latency is estimated as:
73 lat = max ((L-B)/R, (L-M)/P)
79 If TBF throttles, it starts a watchdog timer, which will wake it up
80 when it is ready to transmit.
81 Note that the minimal timer resolution is 1/HZ.
82 If no new packets arrive during this period,
83 or if the device is not awaken by EOI for some previous packet,
84 TBF can stop its activity for 1/HZ.
87 This means, that with depth B, the maximal rate is
91 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
93 Note that the peak rate TBF is much more tough: with MTU 1500
94 P_crit = 150Kbytes/sec. So, if you need greater peak
95 rates, use alpha with HZ=1000 :-)
97 With classful TBF, limit is just kept for backwards compatibility.
98 It is passed to the default bfifo qdisc - if the inner qdisc is
99 changed the limit is not effective anymore.
102 struct tbf_sched_data {
104 u32 limit; /* Maximal length of backlog: bytes */
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
108 struct psched_ratecfg rate;
109 struct psched_ratecfg peak;
113 s64 tokens; /* Current number of B tokens */
114 s64 ptokens; /* Current number of P tokens */
115 s64 t_c; /* Time check-point */
116 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
117 struct qdisc_watchdog watchdog; /* Watchdog timer */
121 /* Time to Length, convert time in ns to length in bytes
122 * to determinate how many bytes can be sent in given time.
124 static u64 psched_ns_t2l(const struct psched_ratecfg *r,
128 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
130 u64 len = time_in_ns * r->rate_bytes_ps;
132 do_div(len, NSEC_PER_SEC);
134 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
135 len = (len / 53) * 48;
137 if (len > r->overhead)
146 * Return length of individual segments of a gso packet,
147 * including all headers (MAC, IP, TCP/UDP)
149 static unsigned int skb_gso_seglen(const struct sk_buff *skb)
151 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
152 const struct skb_shared_info *shinfo = skb_shinfo(skb);
154 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
155 hdr_len += tcp_hdrlen(skb);
157 hdr_len += sizeof(struct udphdr);
158 return hdr_len + shinfo->gso_size;
161 /* GSO packet is too big, segment it so that tbf can transmit
162 * each segment in time
164 static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
166 struct tbf_sched_data *q = qdisc_priv(sch);
167 struct sk_buff *segs, *nskb;
168 netdev_features_t features = netif_skb_features(skb);
171 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
173 if (IS_ERR_OR_NULL(segs))
174 return qdisc_reshape_fail(skb, sch);
180 qdisc_skb_cb(segs)->pkt_len = segs->len;
181 ret = qdisc_enqueue(segs, q->qdisc);
182 if (ret != NET_XMIT_SUCCESS) {
183 if (net_xmit_drop_count(ret))
192 qdisc_tree_decrease_qlen(sch, 1 - nb);
194 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
197 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
199 struct tbf_sched_data *q = qdisc_priv(sch);
202 if (qdisc_pkt_len(skb) > q->max_size) {
203 if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
204 return tbf_segment(skb, sch);
205 return qdisc_reshape_fail(skb, sch);
207 ret = qdisc_enqueue(skb, q->qdisc);
208 if (ret != NET_XMIT_SUCCESS) {
209 if (net_xmit_drop_count(ret))
215 return NET_XMIT_SUCCESS;
218 static unsigned int tbf_drop(struct Qdisc *sch)
220 struct tbf_sched_data *q = qdisc_priv(sch);
221 unsigned int len = 0;
223 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
230 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
232 struct tbf_sched_data *q = qdisc_priv(sch);
235 skb = q->qdisc->ops->peek(q->qdisc);
241 unsigned int len = qdisc_pkt_len(skb);
243 now = ktime_to_ns(ktime_get());
244 toks = min_t(s64, now - q->t_c, q->buffer);
246 if (q->peak_present) {
247 ptoks = toks + q->ptokens;
250 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
253 if (toks > q->buffer)
255 toks -= (s64) psched_l2t_ns(&q->rate, len);
257 if ((toks|ptoks) >= 0) {
258 skb = qdisc_dequeue_peeked(q->qdisc);
266 qdisc_unthrottled(sch);
267 qdisc_bstats_update(sch, skb);
271 qdisc_watchdog_schedule_ns(&q->watchdog,
272 now + max_t(long, -toks, -ptoks));
274 /* Maybe we have a shorter packet in the queue,
275 which can be sent now. It sounds cool,
276 but, however, this is wrong in principle.
277 We MUST NOT reorder packets under these circumstances.
279 Really, if we split the flow into independent
280 subflows, it would be a very good solution.
281 This is the main idea of all FQ algorithms
282 (cf. CSZ, HPFQ, HFSC)
285 sch->qstats.overlimits++;
290 static void tbf_reset(struct Qdisc *sch)
292 struct tbf_sched_data *q = qdisc_priv(sch);
294 qdisc_reset(q->qdisc);
296 q->t_c = ktime_to_ns(ktime_get());
297 q->tokens = q->buffer;
299 qdisc_watchdog_cancel(&q->watchdog);
302 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
303 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
304 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
305 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
306 [TCA_TBF_RATE64] = { .type = NLA_U64 },
307 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
310 static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
313 struct tbf_sched_data *q = qdisc_priv(sch);
314 struct nlattr *tb[TCA_TBF_MAX + 1];
315 struct tc_tbf_qopt *qopt;
316 struct Qdisc *child = NULL;
317 struct psched_ratecfg rate;
318 struct psched_ratecfg peak;
321 u64 rate64 = 0, prate64 = 0;
323 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
328 if (tb[TCA_TBF_PARMS] == NULL)
331 qopt = nla_data(tb[TCA_TBF_PARMS]);
332 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
333 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
336 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
337 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
340 if (q->qdisc != &noop_qdisc) {
341 err = fifo_set_limit(q->qdisc, qopt->limit);
344 } else if (qopt->limit > 0) {
345 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
347 err = PTR_ERR(child);
352 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
353 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
355 if (tb[TCA_TBF_RATE64])
356 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
357 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
359 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
361 if (qopt->peakrate.rate) {
362 if (tb[TCA_TBF_PRATE64])
363 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
364 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
365 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
366 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
367 peak.rate_bytes_ps, rate.rate_bytes_ps);
372 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
375 if (max_size < psched_mtu(qdisc_dev(sch)))
376 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
377 max_size, qdisc_dev(sch)->name,
378 psched_mtu(qdisc_dev(sch)));
387 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
388 qdisc_destroy(q->qdisc);
391 q->limit = qopt->limit;
392 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
393 q->max_size = max_size;
394 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
395 q->tokens = q->buffer;
398 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
399 if (qopt->peakrate.rate) {
400 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
401 q->peak_present = true;
403 q->peak_present = false;
406 sch_tree_unlock(sch);
412 static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
414 struct tbf_sched_data *q = qdisc_priv(sch);
419 q->t_c = ktime_to_ns(ktime_get());
420 qdisc_watchdog_init(&q->watchdog, sch);
421 q->qdisc = &noop_qdisc;
423 return tbf_change(sch, opt);
426 static void tbf_destroy(struct Qdisc *sch)
428 struct tbf_sched_data *q = qdisc_priv(sch);
430 qdisc_watchdog_cancel(&q->watchdog);
431 qdisc_destroy(q->qdisc);
434 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
436 struct tbf_sched_data *q = qdisc_priv(sch);
438 struct tc_tbf_qopt opt;
440 sch->qstats.backlog = q->qdisc->qstats.backlog;
441 nest = nla_nest_start(skb, TCA_OPTIONS);
443 goto nla_put_failure;
445 opt.limit = q->limit;
446 psched_ratecfg_getrate(&opt.rate, &q->rate);
448 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
450 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
451 opt.mtu = PSCHED_NS2TICKS(q->mtu);
452 opt.buffer = PSCHED_NS2TICKS(q->buffer);
453 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
454 goto nla_put_failure;
455 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
456 nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
457 goto nla_put_failure;
458 if (q->peak_present &&
459 q->peak.rate_bytes_ps >= (1ULL << 32) &&
460 nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
461 goto nla_put_failure;
463 nla_nest_end(skb, nest);
467 nla_nest_cancel(skb, nest);
471 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
472 struct sk_buff *skb, struct tcmsg *tcm)
474 struct tbf_sched_data *q = qdisc_priv(sch);
476 tcm->tcm_handle |= TC_H_MIN(1);
477 tcm->tcm_info = q->qdisc->handle;
482 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
485 struct tbf_sched_data *q = qdisc_priv(sch);
493 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
495 sch_tree_unlock(sch);
500 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
502 struct tbf_sched_data *q = qdisc_priv(sch);
506 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
511 static void tbf_put(struct Qdisc *sch, unsigned long arg)
515 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
518 if (walker->count >= walker->skip)
519 if (walker->fn(sch, 1, walker) < 0) {
527 static const struct Qdisc_class_ops tbf_class_ops = {
533 .dump = tbf_dump_class,
536 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
538 .cl_ops = &tbf_class_ops,
540 .priv_size = sizeof(struct tbf_sched_data),
541 .enqueue = tbf_enqueue,
542 .dequeue = tbf_dequeue,
543 .peek = qdisc_peek_dequeued,
547 .destroy = tbf_destroy,
548 .change = tbf_change,
550 .owner = THIS_MODULE,
553 static int __init tbf_module_init(void)
555 return register_qdisc(&tbf_qdisc_ops);
558 static void __exit tbf_module_exit(void)
560 unregister_qdisc(&tbf_qdisc_ops);
562 module_init(tbf_module_init)
563 module_exit(tbf_module_exit)
564 MODULE_LICENSE("GPL");