Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / net / xen-netback / netback.c
index 919b6509455cfbaf45ba63c7d8b378fa13a8e758..43341b82649c2451a3ea7378714dc45599541ca9 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/udp.h>
 
 #include <net/tcp.h>
+#include <net/ip6_checksum.h>
 
 #include <xen/xen.h>
 #include <xen/events.h>
@@ -137,36 +138,26 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
                vif->pending_prod + vif->pending_cons;
 }
 
-static int max_required_rx_slots(struct xenvif *vif)
+bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
 {
-       int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+       RING_IDX prod, cons;
 
-       /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-       if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
-               max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
+       do {
+               prod = vif->rx.sring->req_prod;
+               cons = vif->rx.req_cons;
 
-       return max;
-}
+               if (prod - cons >= needed)
+                       return true;
 
-int xenvif_rx_ring_full(struct xenvif *vif)
-{
-       RING_IDX peek   = vif->rx_req_cons_peek;
-       RING_IDX needed = max_required_rx_slots(vif);
-
-       return ((vif->rx.sring->req_prod - peek) < needed) ||
-              ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
-}
+               vif->rx.sring->req_event = prod + 1;
 
-int xenvif_must_stop_queue(struct xenvif *vif)
-{
-       if (!xenvif_rx_ring_full(vif))
-               return 0;
-
-       vif->rx.sring->req_event = vif->rx_req_cons_peek +
-               max_required_rx_slots(vif);
-       mb(); /* request notification /then/ check the queue */
+               /* Make sure event is visible before we check prod
+                * again.
+                */
+               mb();
+       } while (vif->rx.sring->req_prod != prod);
 
-       return xenvif_rx_ring_full(vif);
+       return false;
 }
 
 /*
@@ -209,93 +200,6 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
        return false;
 }
 
-struct xenvif_count_slot_state {
-       unsigned long copy_off;
-       bool head;
-};
-
-unsigned int xenvif_count_frag_slots(struct xenvif *vif,
-                                    unsigned long offset, unsigned long size,
-                                    struct xenvif_count_slot_state *state)
-{
-       unsigned count = 0;
-
-       offset &= ~PAGE_MASK;
-
-       while (size > 0) {
-               unsigned long bytes;
-
-               bytes = PAGE_SIZE - offset;
-
-               if (bytes > size)
-                       bytes = size;
-
-               if (start_new_rx_buffer(state->copy_off, bytes, state->head)) {
-                       count++;
-                       state->copy_off = 0;
-               }
-
-               if (state->copy_off + bytes > MAX_BUFFER_OFFSET)
-                       bytes = MAX_BUFFER_OFFSET - state->copy_off;
-
-               state->copy_off += bytes;
-
-               offset += bytes;
-               size -= bytes;
-
-               if (offset == PAGE_SIZE)
-                       offset = 0;
-
-               state->head = false;
-       }
-
-       return count;
-}
-
-/*
- * Figure out how many ring slots we're going to need to send @skb to
- * the guest. This function is essentially a dry run of
- * xenvif_gop_frag_copy.
- */
-unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
-{
-       struct xenvif_count_slot_state state;
-       unsigned int count;
-       unsigned char *data;
-       unsigned i;
-
-       state.head = true;
-       state.copy_off = 0;
-
-       /* Slot for the first (partial) page of data. */
-       count = 1;
-
-       /* Need a slot for the GSO prefix for GSO extra data? */
-       if (skb_shinfo(skb)->gso_size)
-               count++;
-
-       data = skb->data;
-       while (data < skb_tail_pointer(skb)) {
-               unsigned long offset = offset_in_page(data);
-               unsigned long size = PAGE_SIZE - offset;
-
-               if (data + size > skb_tail_pointer(skb))
-                       size = skb_tail_pointer(skb) - data;
-
-               count += xenvif_count_frag_slots(vif, offset, size, &state);
-
-               data += size;
-       }
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
-               unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
-
-               count += xenvif_count_frag_slots(vif, offset, size, &state);
-       }
-       return count;
-}
-
 struct netrx_pending_operations {
        unsigned copy_prod, copy_cons;
        unsigned meta_prod, meta_cons;
@@ -556,12 +460,12 @@ struct skb_cb_overlay {
        int meta_slots_used;
 };
 
-static void xenvif_kick_thread(struct xenvif *vif)
+void xenvif_kick_thread(struct xenvif *vif)
 {
        wake_up(&vif->wq);
 }
 
-void xenvif_rx_action(struct xenvif *vif)
+static void xenvif_rx_action(struct xenvif *vif)
 {
        s8 status;
        u16 flags;
@@ -570,8 +474,6 @@ void xenvif_rx_action(struct xenvif *vif)
        struct sk_buff *skb;
        LIST_HEAD(notify);
        int ret;
-       int nr_frags;
-       int count;
        unsigned long offset;
        struct skb_cb_overlay *sco;
        int need_to_notify = 0;
@@ -583,29 +485,44 @@ void xenvif_rx_action(struct xenvif *vif)
 
        skb_queue_head_init(&rxq);
 
-       count = 0;
-
        while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
-               vif = netdev_priv(skb->dev);
-               nr_frags = skb_shinfo(skb)->nr_frags;
+               int max_slots_needed;
+               int i;
+
+               /* We need a cheap worse case estimate for the number of
+                * slots we'll use.
+                */
+
+               max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
+                                               skb_headlen(skb),
+                                               PAGE_SIZE);
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       unsigned int size;
+                       size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+                       max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
+               }
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+                   skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+                       max_slots_needed++;
+
+               /* If the skb may not fit then bail out now */
+               if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
+                       skb_queue_head(&vif->rx_queue, skb);
+                       need_to_notify = 1;
+                       break;
+               }
 
                sco = (struct skb_cb_overlay *)skb->cb;
                sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
-
-               count += nr_frags + 1;
+               BUG_ON(sco->meta_slots_used > max_slots_needed);
 
                __skb_queue_tail(&rxq, skb);
-
-               /* Filled the batch queue? */
-               /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-               if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
-                       break;
        }
 
        BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
 
        if (!npo.copy_prod)
-               return;
+               goto done;
 
        BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
        gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
@@ -613,8 +530,6 @@ void xenvif_rx_action(struct xenvif *vif)
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
                sco = (struct skb_cb_overlay *)skb->cb;
 
-               vif = netdev_priv(skb->dev);
-
                if ((1 << vif->meta[npo.meta_cons].gso_type) &
                    vif->gso_prefix_mask) {
                        resp = RING_GET_RESPONSE(&vif->rx,
@@ -680,25 +595,13 @@ void xenvif_rx_action(struct xenvif *vif)
                if (ret)
                        need_to_notify = 1;
 
-               xenvif_notify_tx_completion(vif);
-
                npo.meta_cons += sco->meta_slots_used;
                dev_kfree_skb(skb);
        }
 
+done:
        if (need_to_notify)
                notify_remote_via_irq(vif->rx_irq);
-
-       /* More work to do? */
-       if (!skb_queue_empty(&vif->rx_queue))
-               xenvif_kick_thread(vif);
-}
-
-void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
-{
-       skb_queue_tail(&vif->rx_queue, skb);
-
-       xenvif_kick_thread(vif);
 }
 
 void xenvif_check_rx_xenvif(struct xenvif *vif)
@@ -1148,49 +1051,72 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
        return 0;
 }
 
-static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
+                                 unsigned int max)
 {
-       if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
-               /* If we need to pullup then pullup to the max, so we
-                * won't need to do it again.
-                */
-               int target = min_t(int, skb->len, MAX_TCP_HEADER);
-               __pskb_pull_tail(skb, target - skb_headlen(skb));
-       }
+       if (skb_headlen(skb) >= len)
+               return 0;
+
+       /* If we need to pullup then pullup to the max, so we
+        * won't need to do it again.
+        */
+       if (max > skb->len)
+               max = skb->len;
+
+       if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
+               return -ENOMEM;
+
+       if (skb_headlen(skb) < len)
+               return -EPROTO;
+
+       return 0;
 }
 
+/* This value should be large enough to cover a tagged ethernet header plus
+ * maximally sized IP and TCP or UDP headers.
+ */
+#define MAX_IP_HDR_LEN 128
+
 static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
                             int recalculate_partial_csum)
 {
-       struct iphdr *iph = (void *)skb->data;
-       unsigned int header_size;
        unsigned int off;
-       int err = -EPROTO;
+       bool fragment;
+       int err;
+
+       fragment = false;
+
+       err = maybe_pull_tail(skb,
+                             sizeof(struct iphdr),
+                             MAX_IP_HDR_LEN);
+       if (err < 0)
+               goto out;
 
-       off = sizeof(struct iphdr);
+       if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
+               fragment = true;
 
-       header_size = skb->network_header + off + MAX_IPOPTLEN;
-       maybe_pull_tail(skb, header_size);
+       off = ip_hdrlen(skb);
 
-       off = iph->ihl * 4;
+       err = -EPROTO;
 
-       switch (iph->protocol) {
+       switch (ip_hdr(skb)->protocol) {
        case IPPROTO_TCP:
                if (!skb_partial_csum_set(skb, off,
                                          offsetof(struct tcphdr, check)))
                        goto out;
 
                if (recalculate_partial_csum) {
-                       struct tcphdr *tcph = tcp_hdr(skb);
-
-                       header_size = skb->network_header +
-                               off +
-                               sizeof(struct tcphdr);
-                       maybe_pull_tail(skb, header_size);
-
-                       tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                        skb->len - off,
-                                                        IPPROTO_TCP, 0);
+                       err = maybe_pull_tail(skb,
+                                             off + sizeof(struct tcphdr),
+                                             MAX_IP_HDR_LEN);
+                       if (err < 0)
+                               goto out;
+
+                       tcp_hdr(skb)->check =
+                               ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+                                                  ip_hdr(skb)->daddr,
+                                                  skb->len - off,
+                                                  IPPROTO_TCP, 0);
                }
                break;
        case IPPROTO_UDP:
@@ -1199,24 +1125,20 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (recalculate_partial_csum) {
-                       struct udphdr *udph = udp_hdr(skb);
-
-                       header_size = skb->network_header +
-                               off +
-                               sizeof(struct udphdr);
-                       maybe_pull_tail(skb, header_size);
-
-                       udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                        skb->len - off,
-                                                        IPPROTO_UDP, 0);
+                       err = maybe_pull_tail(skb,
+                                             off + sizeof(struct udphdr),
+                                             MAX_IP_HDR_LEN);
+                       if (err < 0)
+                               goto out;
+
+                       udp_hdr(skb)->check =
+                               ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+                                                  ip_hdr(skb)->daddr,
+                                                  skb->len - off,
+                                                  IPPROTO_UDP, 0);
                }
                break;
        default:
-               if (net_ratelimit())
-                       netdev_err(vif->dev,
-                                  "Attempting to checksum a non-TCP/UDP packet, "
-                                  "dropping a protocol %d packet\n",
-                                  iph->protocol);
                goto out;
        }
 
@@ -1226,75 +1148,99 @@ out:
        return err;
 }
 
+/* This value should be large enough to cover a tagged ethernet header plus
+ * an IPv6 header, all options, and a maximal TCP or UDP header.
+ */
+#define MAX_IPV6_HDR_LEN 256
+
+#define OPT_HDR(type, skb, off) \
+       (type *)(skb_network_header(skb) + (off))
+
 static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
                               int recalculate_partial_csum)
 {
-       int err = -EPROTO;
-       struct ipv6hdr *ipv6h = (void *)skb->data;
+       int err;
        u8 nexthdr;
-       unsigned int header_size;
        unsigned int off;
+       unsigned int len;
        bool fragment;
        bool done;
 
+       fragment = false;
        done = false;
 
        off = sizeof(struct ipv6hdr);
 
-       header_size = skb->network_header + off;
-       maybe_pull_tail(skb, header_size);
+       err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
+       if (err < 0)
+               goto out;
 
-       nexthdr = ipv6h->nexthdr;
+       nexthdr = ipv6_hdr(skb)->nexthdr;
 
-       while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
-              !done) {
+       len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
+       while (off <= len && !done) {
                switch (nexthdr) {
                case IPPROTO_DSTOPTS:
                case IPPROTO_HOPOPTS:
                case IPPROTO_ROUTING: {
-                       struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+                       struct ipv6_opt_hdr *hp;
 
-                       header_size = skb->network_header +
-                               off +
-                               sizeof(struct ipv6_opt_hdr);
-                       maybe_pull_tail(skb, header_size);
+                       err = maybe_pull_tail(skb,
+                                             off +
+                                             sizeof(struct ipv6_opt_hdr),
+                                             MAX_IPV6_HDR_LEN);
+                       if (err < 0)
+                               goto out;
 
+                       hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
                        nexthdr = hp->nexthdr;
                        off += ipv6_optlen(hp);
                        break;
                }
                case IPPROTO_AH: {
-                       struct ip_auth_hdr *hp = (void *)(skb->data + off);
+                       struct ip_auth_hdr *hp;
+
+                       err = maybe_pull_tail(skb,
+                                             off +
+                                             sizeof(struct ip_auth_hdr),
+                                             MAX_IPV6_HDR_LEN);
+                       if (err < 0)
+                               goto out;
+
+                       hp = OPT_HDR(struct ip_auth_hdr, skb, off);
+                       nexthdr = hp->nexthdr;
+                       off += ipv6_authlen(hp);
+                       break;
+               }
+               case IPPROTO_FRAGMENT: {
+                       struct frag_hdr *hp;
 
-                       header_size = skb->network_header +
-                               off +
-                               sizeof(struct ip_auth_hdr);
-                       maybe_pull_tail(skb, header_size);
+                       err = maybe_pull_tail(skb,
+                                             off +
+                                             sizeof(struct frag_hdr),
+                                             MAX_IPV6_HDR_LEN);
+                       if (err < 0)
+                               goto out;
+
+                       hp = OPT_HDR(struct frag_hdr, skb, off);
+
+                       if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
+                               fragment = true;
 
                        nexthdr = hp->nexthdr;
-                       off += (hp->hdrlen+2)<<2;
+                       off += sizeof(struct frag_hdr);
                        break;
                }
-               case IPPROTO_FRAGMENT:
-                       fragment = true;
-                       /* fall through */
                default:
                        done = true;
                        break;
                }
        }
 
-       if (!done) {
-               if (net_ratelimit())
-                       netdev_err(vif->dev, "Failed to parse packet header\n");
-               goto out;
-       }
+       err = -EPROTO;
 
-       if (fragment) {
-               if (net_ratelimit())
-                       netdev_err(vif->dev, "Packet is a fragment!\n");
+       if (!done || fragment)
                goto out;
-       }
 
        switch (nexthdr) {
        case IPPROTO_TCP:
@@ -1303,17 +1249,17 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (recalculate_partial_csum) {
-                       struct tcphdr *tcph = tcp_hdr(skb);
-
-                       header_size = skb->network_header +
-                               off +
-                               sizeof(struct tcphdr);
-                       maybe_pull_tail(skb, header_size);
-
-                       tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
-                                                      &ipv6h->daddr,
-                                                      skb->len - off,
-                                                      IPPROTO_TCP, 0);
+                       err = maybe_pull_tail(skb,
+                                             off + sizeof(struct tcphdr),
+                                             MAX_IPV6_HDR_LEN);
+                       if (err < 0)
+                               goto out;
+
+                       tcp_hdr(skb)->check =
+                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                                &ipv6_hdr(skb)->daddr,
+                                                skb->len - off,
+                                                IPPROTO_TCP, 0);
                }
                break;
        case IPPROTO_UDP:
@@ -1322,25 +1268,20 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
                        goto out;
 
                if (recalculate_partial_csum) {
-                       struct udphdr *udph = udp_hdr(skb);
-
-                       header_size = skb->network_header +
-                               off +
-                               sizeof(struct udphdr);
-                       maybe_pull_tail(skb, header_size);
-
-                       udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
-                                                      &ipv6h->daddr,
-                                                      skb->len - off,
-                                                      IPPROTO_UDP, 0);
+                       err = maybe_pull_tail(skb,
+                                             off + sizeof(struct udphdr),
+                                             MAX_IPV6_HDR_LEN);
+                       if (err < 0)
+                               goto out;
+
+                       udp_hdr(skb)->check =
+                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                                &ipv6_hdr(skb)->daddr,
+                                                skb->len - off,
+                                                IPPROTO_UDP, 0);
                }
                break;
        default:
-               if (net_ratelimit())
-                       netdev_err(vif->dev,
-                                  "Attempting to checksum a non-TCP/UDP packet, "
-                                  "dropping a protocol %d packet\n",
-                                  nexthdr);
                goto out;
        }
 
@@ -1765,7 +1706,7 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
 
 static inline int rx_work_todo(struct xenvif *vif)
 {
-       return !skb_queue_empty(&vif->rx_queue);
+       return !skb_queue_empty(&vif->rx_queue) || vif->rx_event;
 }
 
 static inline int tx_work_todo(struct xenvif *vif)
@@ -1815,8 +1756,6 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
        rxs = (struct xen_netif_rx_sring *)addr;
        BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
 
-       vif->rx_req_cons_peek = 0;
-
        return 0;
 
 err:
@@ -1824,9 +1763,24 @@ err:
        return err;
 }
 
+void xenvif_stop_queue(struct xenvif *vif)
+{
+       if (!vif->can_queue)
+               return;
+
+       netif_stop_queue(vif->dev);
+}
+
+static void xenvif_start_queue(struct xenvif *vif)
+{
+       if (xenvif_schedulable(vif))
+               netif_wake_queue(vif->dev);
+}
+
 int xenvif_kthread(void *data)
 {
        struct xenvif *vif = data;
+       struct sk_buff *skb;
 
        while (!kthread_should_stop()) {
                wait_event_interruptible(vif->wq,
@@ -1835,12 +1789,22 @@ int xenvif_kthread(void *data)
                if (kthread_should_stop())
                        break;
 
-               if (rx_work_todo(vif))
+               if (!skb_queue_empty(&vif->rx_queue))
                        xenvif_rx_action(vif);
 
+               vif->rx_event = false;
+
+               if (skb_queue_empty(&vif->rx_queue) &&
+                   netif_queue_stopped(vif->dev))
+                       xenvif_start_queue(vif);
+
                cond_resched();
        }
 
+       /* Bin any remaining skbs */
+       while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
+               dev_kfree_skb(skb);
+
        return 0;
 }