sch_tbf: segment too big GSO packets
authorEric Dumazet <edumazet@google.com>
Tue, 21 May 2013 08:16:46 +0000 (08:16 +0000)
committerDavid S. Miller <davem@davemloft.net>
Thu, 23 May 2013 07:06:40 +0000 (00:06 -0700)
If a GSO packet has a length above tbf burst limit, the packet
is currently silently dropped.

Current way to handle this is to set the device in non GSO/TSO mode, or
setting high bursts, and its sub optimal.

We can actually segment too big GSO packets, and send individual
segments as tbf parameters allow, allowing for better interoperability.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Jiri Pirko <jiri@resnulli.us>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Reviewed-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/sch_tbf.c

index c8388f3c3426ab862414d1ce67e5ec3e9d13ecde..38008b0980d9f90d89a84ac217b3a03d44e80442 100644 (file)
@@ -116,14 +116,57 @@ struct tbf_sched_data {
        struct qdisc_watchdog watchdog; /* Watchdog timer */
 };
 
+
+/* GSO packet is too big, segment it so that tbf can transmit
+ * each segment in time
+ */
+static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct tbf_sched_data *q = qdisc_priv(sch);
+       struct sk_buff *segs, *nskb;
+       netdev_features_t features = netif_skb_features(skb);
+       int ret, nb;
+
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+       if (IS_ERR_OR_NULL(segs))
+               return qdisc_reshape_fail(skb, sch);
+
+       nb = 0;
+       while (segs) {
+               nskb = segs->next;
+               segs->next = NULL;
+               if (likely(segs->len <= q->max_size)) {
+                       qdisc_skb_cb(segs)->pkt_len = segs->len;
+                       ret = qdisc_enqueue(segs, q->qdisc);
+               } else {
+                       ret = qdisc_reshape_fail(skb, sch);
+               }
+               if (ret != NET_XMIT_SUCCESS) {
+                       if (net_xmit_drop_count(ret))
+                               sch->qstats.drops++;
+               } else {
+                       nb++;
+               }
+               segs = nskb;
+       }
+       sch->q.qlen += nb;
+       if (nb > 1)
+               qdisc_tree_decrease_qlen(sch, 1 - nb);
+       consume_skb(skb);
+       return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+}
+
 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
        int ret;
 
-       if (qdisc_pkt_len(skb) > q->max_size)
+       if (qdisc_pkt_len(skb) > q->max_size) {
+               if (skb_is_gso(skb))
+                       return tbf_segment(skb, sch);
                return qdisc_reshape_fail(skb, sch);
-
+       }
        ret = qdisc_enqueue(skb, q->qdisc);
        if (ret != NET_XMIT_SUCCESS) {
                if (net_xmit_drop_count(ret))