Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / net / core / dev.c
index 7e00a7342ee6aa4e9d7ab408423694fecb2966db..c95d664b2b423e24c22596e29e0d81ef082340b0 100644 (file)
@@ -2145,30 +2145,42 @@ void __netif_schedule(struct Qdisc *q)
 }
 EXPORT_SYMBOL(__netif_schedule);
 
-void dev_kfree_skb_irq(struct sk_buff *skb)
+struct dev_kfree_skb_cb {
+       enum skb_free_reason reason;
+};
+
+static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
 {
-       if (atomic_dec_and_test(&skb->users)) {
-               struct softnet_data *sd;
-               unsigned long flags;
+       return (struct dev_kfree_skb_cb *)skb->cb;
+}
 
-               local_irq_save(flags);
-               sd = &__get_cpu_var(softnet_data);
-               skb->next = sd->completion_queue;
-               sd->completion_queue = skb;
-               raise_softirq_irqoff(NET_TX_SOFTIRQ);
-               local_irq_restore(flags);
+void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+{
+       unsigned long flags;
+
+       if (likely(atomic_read(&skb->users) == 1)) {
+               smp_rmb();
+               atomic_set(&skb->users, 0);
+       } else if (likely(!atomic_dec_and_test(&skb->users))) {
+               return;
        }
+       get_kfree_skb_cb(skb)->reason = reason;
+       local_irq_save(flags);
+       skb->next = __this_cpu_read(softnet_data.completion_queue);
+       __this_cpu_write(softnet_data.completion_queue, skb);
+       raise_softirq_irqoff(NET_TX_SOFTIRQ);
+       local_irq_restore(flags);
 }
-EXPORT_SYMBOL(dev_kfree_skb_irq);
+EXPORT_SYMBOL(__dev_kfree_skb_irq);
 
-void dev_kfree_skb_any(struct sk_buff *skb)
+void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
 {
        if (in_irq() || irqs_disabled())
-               dev_kfree_skb_irq(skb);
+               __dev_kfree_skb_irq(skb, reason);
        else
                dev_kfree_skb(skb);
 }
-EXPORT_SYMBOL(dev_kfree_skb_any);
+EXPORT_SYMBOL(__dev_kfree_skb_any);
 
 
 /**
@@ -2523,21 +2535,6 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(netif_skb_features);
 
-/*
- * Returns true if either:
- *     1. skb has frag_list and the device doesn't support FRAGLIST, or
- *     2. skb is fragmented and the device does not support SG.
- */
-static inline int skb_needs_linearize(struct sk_buff *skb,
-                                     netdev_features_t features)
-{
-       return skb_is_nonlinear(skb) &&
-                       ((skb_has_frag_list(skb) &&
-                               !(features & NETIF_F_FRAGLIST)) ||
-                       (skb_shinfo(skb)->nr_frags &&
-                               !(features & NETIF_F_SG)));
-}
-
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq, void *accel_priv)
 {
@@ -3306,7 +3303,10 @@ static void net_tx_action(struct softirq_action *h)
                        clist = clist->next;
 
                        WARN_ON(atomic_read(&skb->users));
-                       trace_kfree_skb(skb, net_tx_action);
+                       if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
+                               trace_consume_skb(skb);
+                       else
+                               trace_kfree_skb(skb, net_tx_action);
                        __kfree_skb(skb);
                }
        }
@@ -3752,7 +3752,7 @@ static int napi_gro_complete(struct sk_buff *skb)
                if (ptype->type != type || !ptype->callbacks.gro_complete)
                        continue;
 
-               err = ptype->callbacks.gro_complete(skb);
+               err = ptype->callbacks.gro_complete(skb, 0);
                break;
        }
        rcu_read_unlock();
@@ -3818,6 +3818,23 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
        }
 }
 
+static void skb_gro_reset_offset(struct sk_buff *skb)
+{
+       const struct skb_shared_info *pinfo = skb_shinfo(skb);
+       const skb_frag_t *frag0 = &pinfo->frags[0];
+
+       NAPI_GRO_CB(skb)->data_offset = 0;
+       NAPI_GRO_CB(skb)->frag0 = NULL;
+       NAPI_GRO_CB(skb)->frag0_len = 0;
+
+       if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
+           pinfo->nr_frags &&
+           !PageHighMem(skb_frag_page(frag0))) {
+               NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+               NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
+       }
+}
+
 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct sk_buff **pp = NULL;
@@ -3833,6 +3850,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (skb_is_gso(skb) || skb_has_frag_list(skb))
                goto normal;
 
+       skb_gro_reset_offset(skb);
        gro_list_prepare(napi, skb);
 
        rcu_read_lock();
@@ -3938,27 +3956,8 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
        return ret;
 }
 
-static void skb_gro_reset_offset(struct sk_buff *skb)
-{
-       const struct skb_shared_info *pinfo = skb_shinfo(skb);
-       const skb_frag_t *frag0 = &pinfo->frags[0];
-
-       NAPI_GRO_CB(skb)->data_offset = 0;
-       NAPI_GRO_CB(skb)->frag0 = NULL;
-       NAPI_GRO_CB(skb)->frag0_len = 0;
-
-       if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
-           pinfo->nr_frags &&
-           !PageHighMem(skb_frag_page(frag0))) {
-               NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
-               NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
-       }
-}
-
 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
-       skb_gro_reset_offset(skb);
-
        return napi_skb_finish(dev_gro_receive(napi, skb), skb);
 }
 EXPORT_SYMBOL(napi_gro_receive);
@@ -3981,8 +3980,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
 
        if (!skb) {
                skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
-               if (skb)
-                       napi->skb = skb;
+               napi->skb = skb;
        }
        return skb;
 }
@@ -3993,12 +3991,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
 {
        switch (ret) {
        case GRO_NORMAL:
-       case GRO_HELD:
-               skb->protocol = eth_type_trans(skb, skb->dev);
-
-               if (ret == GRO_HELD)
-                       skb_gro_pull(skb, -ETH_HLEN);
-               else if (netif_receive_skb(skb))
+               if (netif_receive_skb(skb))
                        ret = GRO_DROP;
                break;
 
@@ -4007,6 +4000,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
                napi_reuse_skb(napi, skb);
                break;
 
+       case GRO_HELD:
        case GRO_MERGED:
                break;
        }
@@ -4017,36 +4011,15 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
 {
        struct sk_buff *skb = napi->skb;
-       struct ethhdr *eth;
-       unsigned int hlen;
-       unsigned int off;
 
        napi->skb = NULL;
 
-       skb_reset_mac_header(skb);
-       skb_gro_reset_offset(skb);
-
-       off = skb_gro_offset(skb);
-       hlen = off + sizeof(*eth);
-       eth = skb_gro_header_fast(skb, off);
-       if (skb_gro_header_hard(skb, hlen)) {
-               eth = skb_gro_header_slow(skb, hlen, off);
-               if (unlikely(!eth)) {
-                       napi_reuse_skb(napi, skb);
-                       skb = NULL;
-                       goto out;
-               }
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
+               napi_reuse_skb(napi, skb);
+               return NULL;
        }
+       skb->protocol = eth_type_trans(skb, skb->dev);
 
-       skb_gro_pull(skb, sizeof(*eth));
-
-       /*
-        * This works because the only protocols we care about don't require
-        * special handling.  We'll fix it up properly at the end.
-        */
-       skb->protocol = eth->h_proto;
-
-out:
        return skb;
 }
 
@@ -4996,7 +4969,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
 
-       if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
+       if (ops->ndo_change_rx_flags)
                ops->ndo_change_rx_flags(dev, flags);
 }