Merge remote-tracking branches 'asoc/fix/atmel', 'asoc/fix/fsl', 'asoc/fix/tegra...
[linux-drm-fsl-dcu.git] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 #include <linux/slab.h>
29 #include <linux/cpu.h>
30
31 static int napi_weight = NAPI_POLL_WEIGHT;
32 module_param(napi_weight, int, 0444);
33
34 static bool csum = true, gso = true;
35 module_param(csum, bool, 0444);
36 module_param(gso, bool, 0444);
37
38 /* FIXME: MTU in config. */
39 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40 #define MERGE_BUFFER_LEN (ALIGN(GOOD_PACKET_LEN + \
41                                 sizeof(struct virtio_net_hdr_mrg_rxbuf), \
42                                 L1_CACHE_BYTES))
43 #define GOOD_COPY_LEN   128
44
45 #define VIRTNET_DRIVER_VERSION "1.0.0"
46
47 struct virtnet_stats {
48         struct u64_stats_sync tx_syncp;
49         struct u64_stats_sync rx_syncp;
50         u64 tx_bytes;
51         u64 tx_packets;
52
53         u64 rx_bytes;
54         u64 rx_packets;
55 };
56
57 /* Internal representation of a send virtqueue */
58 struct send_queue {
59         /* Virtqueue associated with this send _queue */
60         struct virtqueue *vq;
61
62         /* TX: fragments + linear part + virtio header */
63         struct scatterlist sg[MAX_SKB_FRAGS + 2];
64
65         /* Name of the send queue: output.$index */
66         char name[40];
67 };
68
69 /* Internal representation of a receive virtqueue */
70 struct receive_queue {
71         /* Virtqueue associated with this receive_queue */
72         struct virtqueue *vq;
73
74         struct napi_struct napi;
75
76         /* Number of input buffers, and max we've ever had. */
77         unsigned int num, max;
78
79         /* Chain pages by the private ptr. */
80         struct page *pages;
81
82         /* RX: fragments + linear part + virtio header */
83         struct scatterlist sg[MAX_SKB_FRAGS + 2];
84
85         /* Name of this receive queue: input.$index */
86         char name[40];
87 };
88
89 struct virtnet_info {
90         struct virtio_device *vdev;
91         struct virtqueue *cvq;
92         struct net_device *dev;
93         struct send_queue *sq;
94         struct receive_queue *rq;
95         unsigned int status;
96
97         /* Max # of queue pairs supported by the device */
98         u16 max_queue_pairs;
99
100         /* # of queue pairs currently used by the driver */
101         u16 curr_queue_pairs;
102
103         /* I like... big packets and I cannot lie! */
104         bool big_packets;
105
106         /* Host will merge rx buffers for big packets (shake it! shake it!) */
107         bool mergeable_rx_bufs;
108
109         /* Has control virtqueue */
110         bool has_cvq;
111
112         /* Host can handle any s/g split between our header and packet data */
113         bool any_header_sg;
114
115         /* enable config space updates */
116         bool config_enable;
117
118         /* Active statistics */
119         struct virtnet_stats __percpu *stats;
120
121         /* Work struct for refilling if we run low on memory. */
122         struct delayed_work refill;
123
124         /* Work struct for config space updates */
125         struct work_struct config_work;
126
127         /* Lock for config space updates */
128         struct mutex config_lock;
129
130         /* Page_frag for GFP_KERNEL packet buffer allocation when we run
131          * low on memory.
132          */
133         struct page_frag alloc_frag;
134
135         /* Does the affinity hint is set for virtqueues? */
136         bool affinity_hint_set;
137
138         /* CPU hot plug notifier */
139         struct notifier_block nb;
140 };
141
142 struct skb_vnet_hdr {
143         union {
144                 struct virtio_net_hdr hdr;
145                 struct virtio_net_hdr_mrg_rxbuf mhdr;
146         };
147 };
148
149 struct padded_vnet_hdr {
150         struct virtio_net_hdr hdr;
151         /*
152          * virtio_net_hdr should be in a separated sg buffer because of a
153          * QEMU bug, and data sg buffer shares same page with this header sg.
154          * This padding makes next sg 16 byte aligned after virtio_net_hdr.
155          */
156         char padding[6];
157 };
158
159 /* Converting between virtqueue no. and kernel tx/rx queue no.
160  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
161  */
162 static int vq2txq(struct virtqueue *vq)
163 {
164         return (vq->index - 1) / 2;
165 }
166
167 static int txq2vq(int txq)
168 {
169         return txq * 2 + 1;
170 }
171
172 static int vq2rxq(struct virtqueue *vq)
173 {
174         return vq->index / 2;
175 }
176
177 static int rxq2vq(int rxq)
178 {
179         return rxq * 2;
180 }
181
182 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
183 {
184         return (struct skb_vnet_hdr *)skb->cb;
185 }
186
187 /*
188  * private is used to chain pages for big packets, put the whole
189  * most recent used list in the beginning for reuse
190  */
191 static void give_pages(struct receive_queue *rq, struct page *page)
192 {
193         struct page *end;
194
195         /* Find end of list, sew whole thing into vi->rq.pages. */
196         for (end = page; end->private; end = (struct page *)end->private);
197         end->private = (unsigned long)rq->pages;
198         rq->pages = page;
199 }
200
201 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
202 {
203         struct page *p = rq->pages;
204
205         if (p) {
206                 rq->pages = (struct page *)p->private;
207                 /* clear private here, it is used to chain pages */
208                 p->private = 0;
209         } else
210                 p = alloc_page(gfp_mask);
211         return p;
212 }
213
214 static void skb_xmit_done(struct virtqueue *vq)
215 {
216         struct virtnet_info *vi = vq->vdev->priv;
217
218         /* Suppress further interrupts. */
219         virtqueue_disable_cb(vq);
220
221         /* We were probably waiting for more output buffers. */
222         netif_wake_subqueue(vi->dev, vq2txq(vq));
223 }
224
225 /* Called from bottom half context */
226 static struct sk_buff *page_to_skb(struct receive_queue *rq,
227                                    struct page *page, unsigned int offset,
228                                    unsigned int len, unsigned int truesize)
229 {
230         struct virtnet_info *vi = rq->vq->vdev->priv;
231         struct sk_buff *skb;
232         struct skb_vnet_hdr *hdr;
233         unsigned int copy, hdr_len, hdr_padded_len;
234         char *p;
235
236         p = page_address(page) + offset;
237
238         /* copy small packet so we can reuse these pages for small data */
239         skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
240         if (unlikely(!skb))
241                 return NULL;
242
243         hdr = skb_vnet_hdr(skb);
244
245         if (vi->mergeable_rx_bufs) {
246                 hdr_len = sizeof hdr->mhdr;
247                 hdr_padded_len = sizeof hdr->mhdr;
248         } else {
249                 hdr_len = sizeof hdr->hdr;
250                 hdr_padded_len = sizeof(struct padded_vnet_hdr);
251         }
252
253         memcpy(hdr, p, hdr_len);
254
255         len -= hdr_len;
256         offset += hdr_padded_len;
257         p += hdr_padded_len;
258
259         copy = len;
260         if (copy > skb_tailroom(skb))
261                 copy = skb_tailroom(skb);
262         memcpy(skb_put(skb, copy), p, copy);
263
264         len -= copy;
265         offset += copy;
266
267         if (vi->mergeable_rx_bufs) {
268                 if (len)
269                         skb_add_rx_frag(skb, 0, page, offset, len, truesize);
270                 else
271                         put_page(page);
272                 return skb;
273         }
274
275         /*
276          * Verify that we can indeed put this data into a skb.
277          * This is here to handle cases when the device erroneously
278          * tries to receive more than is possible. This is usually
279          * the case of a broken device.
280          */
281         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
282                 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
283                 dev_kfree_skb(skb);
284                 return NULL;
285         }
286         BUG_ON(offset >= PAGE_SIZE);
287         while (len) {
288                 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
289                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
290                                 frag_size, truesize);
291                 len -= frag_size;
292                 page = (struct page *)page->private;
293                 offset = 0;
294         }
295
296         if (page)
297                 give_pages(rq, page);
298
299         return skb;
300 }
301
302 static struct sk_buff *receive_small(void *buf, unsigned int len)
303 {
304         struct sk_buff * skb = buf;
305
306         len -= sizeof(struct virtio_net_hdr);
307         skb_trim(skb, len);
308
309         return skb;
310 }
311
312 static struct sk_buff *receive_big(struct net_device *dev,
313                                    struct receive_queue *rq,
314                                    void *buf,
315                                    unsigned int len)
316 {
317         struct page *page = buf;
318         struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
319
320         if (unlikely(!skb))
321                 goto err;
322
323         return skb;
324
325 err:
326         dev->stats.rx_dropped++;
327         give_pages(rq, page);
328         return NULL;
329 }
330
331 static struct sk_buff *receive_mergeable(struct net_device *dev,
332                                          struct receive_queue *rq,
333                                          void *buf,
334                                          unsigned int len)
335 {
336         struct skb_vnet_hdr *hdr = buf;
337         int num_buf = hdr->mhdr.num_buffers;
338         struct page *page = virt_to_head_page(buf);
339         int offset = buf - page_address(page);
340         struct sk_buff *head_skb = page_to_skb(rq, page, offset, len,
341                                                MERGE_BUFFER_LEN);
342         struct sk_buff *curr_skb = head_skb;
343
344         if (unlikely(!curr_skb))
345                 goto err_skb;
346
347         while (--num_buf) {
348                 int num_skb_frags;
349
350                 buf = virtqueue_get_buf(rq->vq, &len);
351                 if (unlikely(!buf)) {
352                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
353                                  dev->name, num_buf, hdr->mhdr.num_buffers);
354                         dev->stats.rx_length_errors++;
355                         goto err_buf;
356                 }
357                 if (unlikely(len > MERGE_BUFFER_LEN)) {
358                         pr_debug("%s: rx error: merge buffer too long\n",
359                                  dev->name);
360                         len = MERGE_BUFFER_LEN;
361                 }
362
363                 page = virt_to_head_page(buf);
364                 --rq->num;
365
366                 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
367                 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
368                         struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
369
370                         if (unlikely(!nskb))
371                                 goto err_skb;
372                         if (curr_skb == head_skb)
373                                 skb_shinfo(curr_skb)->frag_list = nskb;
374                         else
375                                 curr_skb->next = nskb;
376                         curr_skb = nskb;
377                         head_skb->truesize += nskb->truesize;
378                         num_skb_frags = 0;
379                 }
380                 if (curr_skb != head_skb) {
381                         head_skb->data_len += len;
382                         head_skb->len += len;
383                         head_skb->truesize += MERGE_BUFFER_LEN;
384                 }
385                 offset = buf - page_address(page);
386                 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
387                         put_page(page);
388                         skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
389                                              len, MERGE_BUFFER_LEN);
390                 } else {
391                         skb_add_rx_frag(curr_skb, num_skb_frags, page,
392                                         offset, len, MERGE_BUFFER_LEN);
393                 }
394         }
395
396         return head_skb;
397
398 err_skb:
399         put_page(page);
400         while (--num_buf) {
401                 buf = virtqueue_get_buf(rq->vq, &len);
402                 if (unlikely(!buf)) {
403                         pr_debug("%s: rx error: %d buffers missing\n",
404                                  dev->name, num_buf);
405                         dev->stats.rx_length_errors++;
406                         break;
407                 }
408                 page = virt_to_head_page(buf);
409                 put_page(page);
410                 --rq->num;
411         }
412 err_buf:
413         dev->stats.rx_dropped++;
414         dev_kfree_skb(head_skb);
415         return NULL;
416 }
417
418 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
419 {
420         struct virtnet_info *vi = rq->vq->vdev->priv;
421         struct net_device *dev = vi->dev;
422         struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
423         struct sk_buff *skb;
424         struct skb_vnet_hdr *hdr;
425
426         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
427                 pr_debug("%s: short packet %i\n", dev->name, len);
428                 dev->stats.rx_length_errors++;
429                 if (vi->big_packets)
430                         give_pages(rq, buf);
431                 else if (vi->mergeable_rx_bufs)
432                         put_page(virt_to_head_page(buf));
433                 else
434                         dev_kfree_skb(buf);
435                 return;
436         }
437
438         if (vi->mergeable_rx_bufs)
439                 skb = receive_mergeable(dev, rq, buf, len);
440         else if (vi->big_packets)
441                 skb = receive_big(dev, rq, buf, len);
442         else
443                 skb = receive_small(buf, len);
444
445         if (unlikely(!skb))
446                 return;
447
448         hdr = skb_vnet_hdr(skb);
449
450         u64_stats_update_begin(&stats->rx_syncp);
451         stats->rx_bytes += skb->len;
452         stats->rx_packets++;
453         u64_stats_update_end(&stats->rx_syncp);
454
455         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
456                 pr_debug("Needs csum!\n");
457                 if (!skb_partial_csum_set(skb,
458                                           hdr->hdr.csum_start,
459                                           hdr->hdr.csum_offset))
460                         goto frame_err;
461         } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
462                 skb->ip_summed = CHECKSUM_UNNECESSARY;
463         }
464
465         skb->protocol = eth_type_trans(skb, dev);
466         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
467                  ntohs(skb->protocol), skb->len, skb->pkt_type);
468
469         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
470                 pr_debug("GSO!\n");
471                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
472                 case VIRTIO_NET_HDR_GSO_TCPV4:
473                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
474                         break;
475                 case VIRTIO_NET_HDR_GSO_UDP:
476                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
477                         break;
478                 case VIRTIO_NET_HDR_GSO_TCPV6:
479                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
480                         break;
481                 default:
482                         net_warn_ratelimited("%s: bad gso type %u.\n",
483                                              dev->name, hdr->hdr.gso_type);
484                         goto frame_err;
485                 }
486
487                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
488                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
489
490                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
491                 if (skb_shinfo(skb)->gso_size == 0) {
492                         net_warn_ratelimited("%s: zero gso size.\n", dev->name);
493                         goto frame_err;
494                 }
495
496                 /* Header must be checked, and gso_segs computed. */
497                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
498                 skb_shinfo(skb)->gso_segs = 0;
499         }
500
501         netif_receive_skb(skb);
502         return;
503
504 frame_err:
505         dev->stats.rx_frame_errors++;
506         dev_kfree_skb(skb);
507 }
508
509 static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
510 {
511         struct virtnet_info *vi = rq->vq->vdev->priv;
512         struct sk_buff *skb;
513         struct skb_vnet_hdr *hdr;
514         int err;
515
516         skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
517         if (unlikely(!skb))
518                 return -ENOMEM;
519
520         skb_put(skb, GOOD_PACKET_LEN);
521
522         hdr = skb_vnet_hdr(skb);
523         sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
524
525         skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
526
527         err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
528         if (err < 0)
529                 dev_kfree_skb(skb);
530
531         return err;
532 }
533
534 static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
535 {
536         struct page *first, *list = NULL;
537         char *p;
538         int i, err, offset;
539
540         /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
541         for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
542                 first = get_a_page(rq, gfp);
543                 if (!first) {
544                         if (list)
545                                 give_pages(rq, list);
546                         return -ENOMEM;
547                 }
548                 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
549
550                 /* chain new page in list head to match sg */
551                 first->private = (unsigned long)list;
552                 list = first;
553         }
554
555         first = get_a_page(rq, gfp);
556         if (!first) {
557                 give_pages(rq, list);
558                 return -ENOMEM;
559         }
560         p = page_address(first);
561
562         /* rq->sg[0], rq->sg[1] share the same page */
563         /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
564         sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
565
566         /* rq->sg[1] for data packet, from offset */
567         offset = sizeof(struct padded_vnet_hdr);
568         sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
569
570         /* chain first in list head */
571         first->private = (unsigned long)list;
572         err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
573                                   first, gfp);
574         if (err < 0)
575                 give_pages(rq, first);
576
577         return err;
578 }
579
580 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
581 {
582         struct virtnet_info *vi = rq->vq->vdev->priv;
583         char *buf = NULL;
584         int err;
585
586         if (gfp & __GFP_WAIT) {
587                 if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
588                                          gfp)) {
589                         buf = (char *)page_address(vi->alloc_frag.page) +
590                               vi->alloc_frag.offset;
591                         get_page(vi->alloc_frag.page);
592                         vi->alloc_frag.offset += MERGE_BUFFER_LEN;
593                 }
594         } else {
595                 buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
596         }
597         if (!buf)
598                 return -ENOMEM;
599
600         sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN);
601         err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
602         if (err < 0)
603                 put_page(virt_to_head_page(buf));
604
605         return err;
606 }
607
608 /*
609  * Returns false if we couldn't fill entirely (OOM).
610  *
611  * Normally run in the receive path, but can also be run from ndo_open
612  * before we're receiving packets, or from refill_work which is
613  * careful to disable receiving (using napi_disable).
614  */
615 static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
616 {
617         struct virtnet_info *vi = rq->vq->vdev->priv;
618         int err;
619         bool oom;
620
621         do {
622                 if (vi->mergeable_rx_bufs)
623                         err = add_recvbuf_mergeable(rq, gfp);
624                 else if (vi->big_packets)
625                         err = add_recvbuf_big(rq, gfp);
626                 else
627                         err = add_recvbuf_small(rq, gfp);
628
629                 oom = err == -ENOMEM;
630                 if (err)
631                         break;
632                 ++rq->num;
633         } while (rq->vq->num_free);
634         if (unlikely(rq->num > rq->max))
635                 rq->max = rq->num;
636         if (unlikely(!virtqueue_kick(rq->vq)))
637                 return false;
638         return !oom;
639 }
640
641 static void skb_recv_done(struct virtqueue *rvq)
642 {
643         struct virtnet_info *vi = rvq->vdev->priv;
644         struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
645
646         /* Schedule NAPI, Suppress further interrupts if successful. */
647         if (napi_schedule_prep(&rq->napi)) {
648                 virtqueue_disable_cb(rvq);
649                 __napi_schedule(&rq->napi);
650         }
651 }
652
653 static void virtnet_napi_enable(struct receive_queue *rq)
654 {
655         napi_enable(&rq->napi);
656
657         /* If all buffers were filled by other side before we napi_enabled, we
658          * won't get another interrupt, so process any outstanding packets
659          * now.  virtnet_poll wants re-enable the queue, so we disable here.
660          * We synchronize against interrupts via NAPI_STATE_SCHED */
661         if (napi_schedule_prep(&rq->napi)) {
662                 virtqueue_disable_cb(rq->vq);
663                 local_bh_disable();
664                 __napi_schedule(&rq->napi);
665                 local_bh_enable();
666         }
667 }
668
669 static void refill_work(struct work_struct *work)
670 {
671         struct virtnet_info *vi =
672                 container_of(work, struct virtnet_info, refill.work);
673         bool still_empty;
674         int i;
675
676         for (i = 0; i < vi->curr_queue_pairs; i++) {
677                 struct receive_queue *rq = &vi->rq[i];
678
679                 napi_disable(&rq->napi);
680                 still_empty = !try_fill_recv(rq, GFP_KERNEL);
681                 virtnet_napi_enable(rq);
682
683                 /* In theory, this can happen: if we don't get any buffers in
684                  * we will *never* try to fill again.
685                  */
686                 if (still_empty)
687                         schedule_delayed_work(&vi->refill, HZ/2);
688         }
689 }
690
691 static int virtnet_poll(struct napi_struct *napi, int budget)
692 {
693         struct receive_queue *rq =
694                 container_of(napi, struct receive_queue, napi);
695         struct virtnet_info *vi = rq->vq->vdev->priv;
696         void *buf;
697         unsigned int r, len, received = 0;
698
699 again:
700         while (received < budget &&
701                (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
702                 receive_buf(rq, buf, len);
703                 --rq->num;
704                 received++;
705         }
706
707         if (rq->num < rq->max / 2) {
708                 if (!try_fill_recv(rq, GFP_ATOMIC))
709                         schedule_delayed_work(&vi->refill, 0);
710         }
711
712         /* Out of packets? */
713         if (received < budget) {
714                 r = virtqueue_enable_cb_prepare(rq->vq);
715                 napi_complete(napi);
716                 if (unlikely(virtqueue_poll(rq->vq, r)) &&
717                     napi_schedule_prep(napi)) {
718                         virtqueue_disable_cb(rq->vq);
719                         __napi_schedule(napi);
720                         goto again;
721                 }
722         }
723
724         return received;
725 }
726
727 static int virtnet_open(struct net_device *dev)
728 {
729         struct virtnet_info *vi = netdev_priv(dev);
730         int i;
731
732         for (i = 0; i < vi->max_queue_pairs; i++) {
733                 if (i < vi->curr_queue_pairs)
734                         /* Make sure we have some buffers: if oom use wq. */
735                         if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
736                                 schedule_delayed_work(&vi->refill, 0);
737                 virtnet_napi_enable(&vi->rq[i]);
738         }
739
740         return 0;
741 }
742
743 static void free_old_xmit_skbs(struct send_queue *sq)
744 {
745         struct sk_buff *skb;
746         unsigned int len;
747         struct virtnet_info *vi = sq->vq->vdev->priv;
748         struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
749
750         while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
751                 pr_debug("Sent skb %p\n", skb);
752
753                 u64_stats_update_begin(&stats->tx_syncp);
754                 stats->tx_bytes += skb->len;
755                 stats->tx_packets++;
756                 u64_stats_update_end(&stats->tx_syncp);
757
758                 dev_kfree_skb_any(skb);
759         }
760 }
761
762 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
763 {
764         struct skb_vnet_hdr *hdr;
765         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
766         struct virtnet_info *vi = sq->vq->vdev->priv;
767         unsigned num_sg;
768         unsigned hdr_len;
769         bool can_push;
770
771         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
772         if (vi->mergeable_rx_bufs)
773                 hdr_len = sizeof hdr->mhdr;
774         else
775                 hdr_len = sizeof hdr->hdr;
776
777         can_push = vi->any_header_sg &&
778                 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
779                 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
780         /* Even if we can, don't push here yet as this would skew
781          * csum_start offset below. */
782         if (can_push)
783                 hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
784         else
785                 hdr = skb_vnet_hdr(skb);
786
787         if (skb->ip_summed == CHECKSUM_PARTIAL) {
788                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
789                 hdr->hdr.csum_start = skb_checksum_start_offset(skb);
790                 hdr->hdr.csum_offset = skb->csum_offset;
791         } else {
792                 hdr->hdr.flags = 0;
793                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
794         }
795
796         if (skb_is_gso(skb)) {
797                 hdr->hdr.hdr_len = skb_headlen(skb);
798                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
799                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
800                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
801                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
802                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
803                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
804                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
805                 else
806                         BUG();
807                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
808                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
809         } else {
810                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
811                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
812         }
813
814         if (vi->mergeable_rx_bufs)
815                 hdr->mhdr.num_buffers = 0;
816
817         if (can_push) {
818                 __skb_push(skb, hdr_len);
819                 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
820                 /* Pull header back to avoid skew in tx bytes calculations. */
821                 __skb_pull(skb, hdr_len);
822         } else {
823                 sg_set_buf(sq->sg, hdr, hdr_len);
824                 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
825         }
826         return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
827 }
828
829 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
830 {
831         struct virtnet_info *vi = netdev_priv(dev);
832         int qnum = skb_get_queue_mapping(skb);
833         struct send_queue *sq = &vi->sq[qnum];
834         int err;
835
836         /* Free up any pending old buffers before queueing new ones. */
837         free_old_xmit_skbs(sq);
838
839         /* Try to transmit */
840         err = xmit_skb(sq, skb);
841
842         /* This should not happen! */
843         if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
844                 dev->stats.tx_fifo_errors++;
845                 if (net_ratelimit())
846                         dev_warn(&dev->dev,
847                                  "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
848                 dev->stats.tx_dropped++;
849                 kfree_skb(skb);
850                 return NETDEV_TX_OK;
851         }
852
853         /* Don't wait up for transmitted skbs to be freed. */
854         skb_orphan(skb);
855         nf_reset(skb);
856
857         /* Apparently nice girls don't return TX_BUSY; stop the queue
858          * before it gets out of hand.  Naturally, this wastes entries. */
859         if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
860                 netif_stop_subqueue(dev, qnum);
861                 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
862                         /* More just got used, free them then recheck. */
863                         free_old_xmit_skbs(sq);
864                         if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
865                                 netif_start_subqueue(dev, qnum);
866                                 virtqueue_disable_cb(sq->vq);
867                         }
868                 }
869         }
870
871         return NETDEV_TX_OK;
872 }
873
874 /*
875  * Send command via the control virtqueue and check status.  Commands
876  * supported by the hypervisor, as indicated by feature bits, should
877  * never fail unless improperly formated.
878  */
879 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
880                                  struct scatterlist *out,
881                                  struct scatterlist *in)
882 {
883         struct scatterlist *sgs[4], hdr, stat;
884         struct virtio_net_ctrl_hdr ctrl;
885         virtio_net_ctrl_ack status = ~0;
886         unsigned out_num = 0, in_num = 0, tmp;
887
888         /* Caller should know better */
889         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
890
891         ctrl.class = class;
892         ctrl.cmd = cmd;
893         /* Add header */
894         sg_init_one(&hdr, &ctrl, sizeof(ctrl));
895         sgs[out_num++] = &hdr;
896
897         if (out)
898                 sgs[out_num++] = out;
899         if (in)
900                 sgs[out_num + in_num++] = in;
901
902         /* Add return status. */
903         sg_init_one(&stat, &status, sizeof(status));
904         sgs[out_num + in_num++] = &stat;
905
906         BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
907         BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
908                < 0);
909
910         if (unlikely(!virtqueue_kick(vi->cvq)))
911                 return status == VIRTIO_NET_OK;
912
913         /* Spin for a response, the kick causes an ioport write, trapping
914          * into the hypervisor, so the request should be handled immediately.
915          */
916         while (!virtqueue_get_buf(vi->cvq, &tmp) &&
917                !virtqueue_is_broken(vi->cvq))
918                 cpu_relax();
919
920         return status == VIRTIO_NET_OK;
921 }
922
923 static int virtnet_set_mac_address(struct net_device *dev, void *p)
924 {
925         struct virtnet_info *vi = netdev_priv(dev);
926         struct virtio_device *vdev = vi->vdev;
927         int ret;
928         struct sockaddr *addr = p;
929         struct scatterlist sg;
930
931         ret = eth_prepare_mac_addr_change(dev, p);
932         if (ret)
933                 return ret;
934
935         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
936                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
937                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
938                                           VIRTIO_NET_CTRL_MAC_ADDR_SET,
939                                           &sg, NULL)) {
940                         dev_warn(&vdev->dev,
941                                  "Failed to set mac address by vq command.\n");
942                         return -EINVAL;
943                 }
944         } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
945                 unsigned int i;
946
947                 /* Naturally, this has an atomicity problem. */
948                 for (i = 0; i < dev->addr_len; i++)
949                         virtio_cwrite8(vdev,
950                                        offsetof(struct virtio_net_config, mac) +
951                                        i, addr->sa_data[i]);
952         }
953
954         eth_commit_mac_addr_change(dev, p);
955
956         return 0;
957 }
958
959 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
960                                                struct rtnl_link_stats64 *tot)
961 {
962         struct virtnet_info *vi = netdev_priv(dev);
963         int cpu;
964         unsigned int start;
965
966         for_each_possible_cpu(cpu) {
967                 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
968                 u64 tpackets, tbytes, rpackets, rbytes;
969
970                 do {
971                         start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
972                         tpackets = stats->tx_packets;
973                         tbytes   = stats->tx_bytes;
974                 } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
975
976                 do {
977                         start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
978                         rpackets = stats->rx_packets;
979                         rbytes   = stats->rx_bytes;
980                 } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
981
982                 tot->rx_packets += rpackets;
983                 tot->tx_packets += tpackets;
984                 tot->rx_bytes   += rbytes;
985                 tot->tx_bytes   += tbytes;
986         }
987
988         tot->tx_dropped = dev->stats.tx_dropped;
989         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
990         tot->rx_dropped = dev->stats.rx_dropped;
991         tot->rx_length_errors = dev->stats.rx_length_errors;
992         tot->rx_frame_errors = dev->stats.rx_frame_errors;
993
994         return tot;
995 }
996
997 #ifdef CONFIG_NET_POLL_CONTROLLER
998 static void virtnet_netpoll(struct net_device *dev)
999 {
1000         struct virtnet_info *vi = netdev_priv(dev);
1001         int i;
1002
1003         for (i = 0; i < vi->curr_queue_pairs; i++)
1004                 napi_schedule(&vi->rq[i].napi);
1005 }
1006 #endif
1007
1008 static void virtnet_ack_link_announce(struct virtnet_info *vi)
1009 {
1010         rtnl_lock();
1011         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1012                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
1013                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1014         rtnl_unlock();
1015 }
1016
1017 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1018 {
1019         struct scatterlist sg;
1020         struct virtio_net_ctrl_mq s;
1021         struct net_device *dev = vi->dev;
1022
1023         if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1024                 return 0;
1025
1026         s.virtqueue_pairs = queue_pairs;
1027         sg_init_one(&sg, &s, sizeof(s));
1028
1029         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1030                                   VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
1031                 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1032                          queue_pairs);
1033                 return -EINVAL;
1034         } else {
1035                 vi->curr_queue_pairs = queue_pairs;
1036                 /* virtnet_open() will refill when device is going to up. */
1037                 if (dev->flags & IFF_UP)
1038                         schedule_delayed_work(&vi->refill, 0);
1039         }
1040
1041         return 0;
1042 }
1043
1044 static int virtnet_close(struct net_device *dev)
1045 {
1046         struct virtnet_info *vi = netdev_priv(dev);
1047         int i;
1048
1049         /* Make sure refill_work doesn't re-enable napi! */
1050         cancel_delayed_work_sync(&vi->refill);
1051
1052         for (i = 0; i < vi->max_queue_pairs; i++)
1053                 napi_disable(&vi->rq[i].napi);
1054
1055         return 0;
1056 }
1057
1058 static void virtnet_set_rx_mode(struct net_device *dev)
1059 {
1060         struct virtnet_info *vi = netdev_priv(dev);
1061         struct scatterlist sg[2];
1062         u8 promisc, allmulti;
1063         struct virtio_net_ctrl_mac *mac_data;
1064         struct netdev_hw_addr *ha;
1065         int uc_count;
1066         int mc_count;
1067         void *buf;
1068         int i;
1069
1070         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
1071         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1072                 return;
1073
1074         promisc = ((dev->flags & IFF_PROMISC) != 0);
1075         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1076
1077         sg_init_one(sg, &promisc, sizeof(promisc));
1078
1079         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1080                                   VIRTIO_NET_CTRL_RX_PROMISC,
1081                                   sg, NULL))
1082                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1083                          promisc ? "en" : "dis");
1084
1085         sg_init_one(sg, &allmulti, sizeof(allmulti));
1086
1087         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1088                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
1089                                   sg, NULL))
1090                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1091                          allmulti ? "en" : "dis");
1092
1093         uc_count = netdev_uc_count(dev);
1094         mc_count = netdev_mc_count(dev);
1095         /* MAC filter - use one buffer for both lists */
1096         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1097                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1098         mac_data = buf;
1099         if (!buf)
1100                 return;
1101
1102         sg_init_table(sg, 2);
1103
1104         /* Store the unicast list and count in the front of the buffer */
1105         mac_data->entries = uc_count;
1106         i = 0;
1107         netdev_for_each_uc_addr(ha, dev)
1108                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1109
1110         sg_set_buf(&sg[0], mac_data,
1111                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1112
1113         /* multicast list and count fill the end */
1114         mac_data = (void *)&mac_data->macs[uc_count][0];
1115
1116         mac_data->entries = mc_count;
1117         i = 0;
1118         netdev_for_each_mc_addr(ha, dev)
1119                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1120
1121         sg_set_buf(&sg[1], mac_data,
1122                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1123
1124         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1125                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
1126                                   sg, NULL))
1127                 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1128
1129         kfree(buf);
1130 }
1131
1132 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1133                                    __be16 proto, u16 vid)
1134 {
1135         struct virtnet_info *vi = netdev_priv(dev);
1136         struct scatterlist sg;
1137
1138         sg_init_one(&sg, &vid, sizeof(vid));
1139
1140         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1141                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
1142                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1143         return 0;
1144 }
1145
1146 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1147                                     __be16 proto, u16 vid)
1148 {
1149         struct virtnet_info *vi = netdev_priv(dev);
1150         struct scatterlist sg;
1151
1152         sg_init_one(&sg, &vid, sizeof(vid));
1153
1154         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1155                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
1156                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1157         return 0;
1158 }
1159
1160 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1161 {
1162         int i;
1163
1164         if (vi->affinity_hint_set) {
1165                 for (i = 0; i < vi->max_queue_pairs; i++) {
1166                         virtqueue_set_affinity(vi->rq[i].vq, -1);
1167                         virtqueue_set_affinity(vi->sq[i].vq, -1);
1168                 }
1169
1170                 vi->affinity_hint_set = false;
1171         }
1172 }
1173
1174 static void virtnet_set_affinity(struct virtnet_info *vi)
1175 {
1176         int i;
1177         int cpu;
1178
1179         /* In multiqueue mode, when the number of cpu is equal to the number of
1180          * queue pairs, we let the queue pairs to be private to one cpu by
1181          * setting the affinity hint to eliminate the contention.
1182          */
1183         if (vi->curr_queue_pairs == 1 ||
1184             vi->max_queue_pairs != num_online_cpus()) {
1185                 virtnet_clean_affinity(vi, -1);
1186                 return;
1187         }
1188
1189         i = 0;
1190         for_each_online_cpu(cpu) {
1191                 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1192                 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1193                 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1194                 i++;
1195         }
1196
1197         vi->affinity_hint_set = true;
1198 }
1199
1200 static int virtnet_cpu_callback(struct notifier_block *nfb,
1201                                 unsigned long action, void *hcpu)
1202 {
1203         struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1204
1205         switch(action & ~CPU_TASKS_FROZEN) {
1206         case CPU_ONLINE:
1207         case CPU_DOWN_FAILED:
1208         case CPU_DEAD:
1209                 virtnet_set_affinity(vi);
1210                 break;
1211         case CPU_DOWN_PREPARE:
1212                 virtnet_clean_affinity(vi, (long)hcpu);
1213                 break;
1214         default:
1215                 break;
1216         }
1217
1218         return NOTIFY_OK;
1219 }
1220
1221 static void virtnet_get_ringparam(struct net_device *dev,
1222                                 struct ethtool_ringparam *ring)
1223 {
1224         struct virtnet_info *vi = netdev_priv(dev);
1225
1226         ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1227         ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1228         ring->rx_pending = ring->rx_max_pending;
1229         ring->tx_pending = ring->tx_max_pending;
1230 }
1231
1232
1233 static void virtnet_get_drvinfo(struct net_device *dev,
1234                                 struct ethtool_drvinfo *info)
1235 {
1236         struct virtnet_info *vi = netdev_priv(dev);
1237         struct virtio_device *vdev = vi->vdev;
1238
1239         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1240         strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1241         strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1242
1243 }
1244
1245 /* TODO: Eliminate OOO packets during switching */
1246 static int virtnet_set_channels(struct net_device *dev,
1247                                 struct ethtool_channels *channels)
1248 {
1249         struct virtnet_info *vi = netdev_priv(dev);
1250         u16 queue_pairs = channels->combined_count;
1251         int err;
1252
1253         /* We don't support separate rx/tx channels.
1254          * We don't allow setting 'other' channels.
1255          */
1256         if (channels->rx_count || channels->tx_count || channels->other_count)
1257                 return -EINVAL;
1258
1259         if (queue_pairs > vi->max_queue_pairs)
1260                 return -EINVAL;
1261
1262         get_online_cpus();
1263         err = virtnet_set_queues(vi, queue_pairs);
1264         if (!err) {
1265                 netif_set_real_num_tx_queues(dev, queue_pairs);
1266                 netif_set_real_num_rx_queues(dev, queue_pairs);
1267
1268                 virtnet_set_affinity(vi);
1269         }
1270         put_online_cpus();
1271
1272         return err;
1273 }
1274
1275 static void virtnet_get_channels(struct net_device *dev,
1276                                  struct ethtool_channels *channels)
1277 {
1278         struct virtnet_info *vi = netdev_priv(dev);
1279
1280         channels->combined_count = vi->curr_queue_pairs;
1281         channels->max_combined = vi->max_queue_pairs;
1282         channels->max_other = 0;
1283         channels->rx_count = 0;
1284         channels->tx_count = 0;
1285         channels->other_count = 0;
1286 }
1287
1288 static const struct ethtool_ops virtnet_ethtool_ops = {
1289         .get_drvinfo = virtnet_get_drvinfo,
1290         .get_link = ethtool_op_get_link,
1291         .get_ringparam = virtnet_get_ringparam,
1292         .set_channels = virtnet_set_channels,
1293         .get_channels = virtnet_get_channels,
1294 };
1295
1296 #define MIN_MTU 68
1297 #define MAX_MTU 65535
1298
1299 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1300 {
1301         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
1302                 return -EINVAL;
1303         dev->mtu = new_mtu;
1304         return 0;
1305 }
1306
1307 static const struct net_device_ops virtnet_netdev = {
1308         .ndo_open            = virtnet_open,
1309         .ndo_stop            = virtnet_close,
1310         .ndo_start_xmit      = start_xmit,
1311         .ndo_validate_addr   = eth_validate_addr,
1312         .ndo_set_mac_address = virtnet_set_mac_address,
1313         .ndo_set_rx_mode     = virtnet_set_rx_mode,
1314         .ndo_change_mtu      = virtnet_change_mtu,
1315         .ndo_get_stats64     = virtnet_stats,
1316         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1317         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1318 #ifdef CONFIG_NET_POLL_CONTROLLER
1319         .ndo_poll_controller = virtnet_netpoll,
1320 #endif
1321 };
1322
1323 static void virtnet_config_changed_work(struct work_struct *work)
1324 {
1325         struct virtnet_info *vi =
1326                 container_of(work, struct virtnet_info, config_work);
1327         u16 v;
1328
1329         mutex_lock(&vi->config_lock);
1330         if (!vi->config_enable)
1331                 goto done;
1332
1333         if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
1334                                  struct virtio_net_config, status, &v) < 0)
1335                 goto done;
1336
1337         if (v & VIRTIO_NET_S_ANNOUNCE) {
1338                 netdev_notify_peers(vi->dev);
1339                 virtnet_ack_link_announce(vi);
1340         }
1341
1342         /* Ignore unknown (future) status bits */
1343         v &= VIRTIO_NET_S_LINK_UP;
1344
1345         if (vi->status == v)
1346                 goto done;
1347
1348         vi->status = v;
1349
1350         if (vi->status & VIRTIO_NET_S_LINK_UP) {
1351                 netif_carrier_on(vi->dev);
1352                 netif_tx_wake_all_queues(vi->dev);
1353         } else {
1354                 netif_carrier_off(vi->dev);
1355                 netif_tx_stop_all_queues(vi->dev);
1356         }
1357 done:
1358         mutex_unlock(&vi->config_lock);
1359 }
1360
1361 static void virtnet_config_changed(struct virtio_device *vdev)
1362 {
1363         struct virtnet_info *vi = vdev->priv;
1364
1365         schedule_work(&vi->config_work);
1366 }
1367
1368 static void virtnet_free_queues(struct virtnet_info *vi)
1369 {
1370         kfree(vi->rq);
1371         kfree(vi->sq);
1372 }
1373
1374 static void free_receive_bufs(struct virtnet_info *vi)
1375 {
1376         int i;
1377
1378         for (i = 0; i < vi->max_queue_pairs; i++) {
1379                 while (vi->rq[i].pages)
1380                         __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1381         }
1382 }
1383
1384 static void free_unused_bufs(struct virtnet_info *vi)
1385 {
1386         void *buf;
1387         int i;
1388
1389         for (i = 0; i < vi->max_queue_pairs; i++) {
1390                 struct virtqueue *vq = vi->sq[i].vq;
1391                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
1392                         dev_kfree_skb(buf);
1393         }
1394
1395         for (i = 0; i < vi->max_queue_pairs; i++) {
1396                 struct virtqueue *vq = vi->rq[i].vq;
1397
1398                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1399                         if (vi->big_packets)
1400                                 give_pages(&vi->rq[i], buf);
1401                         else if (vi->mergeable_rx_bufs)
1402                                 put_page(virt_to_head_page(buf));
1403                         else
1404                                 dev_kfree_skb(buf);
1405                         --vi->rq[i].num;
1406                 }
1407                 BUG_ON(vi->rq[i].num != 0);
1408         }
1409 }
1410
1411 static void virtnet_del_vqs(struct virtnet_info *vi)
1412 {
1413         struct virtio_device *vdev = vi->vdev;
1414
1415         virtnet_clean_affinity(vi, -1);
1416
1417         vdev->config->del_vqs(vdev);
1418
1419         virtnet_free_queues(vi);
1420 }
1421
1422 static int virtnet_find_vqs(struct virtnet_info *vi)
1423 {
1424         vq_callback_t **callbacks;
1425         struct virtqueue **vqs;
1426         int ret = -ENOMEM;
1427         int i, total_vqs;
1428         const char **names;
1429
1430         /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1431          * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1432          * possible control vq.
1433          */
1434         total_vqs = vi->max_queue_pairs * 2 +
1435                     virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1436
1437         /* Allocate space for find_vqs parameters */
1438         vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1439         if (!vqs)
1440                 goto err_vq;
1441         callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1442         if (!callbacks)
1443                 goto err_callback;
1444         names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1445         if (!names)
1446                 goto err_names;
1447
1448         /* Parameters for control virtqueue, if any */
1449         if (vi->has_cvq) {
1450                 callbacks[total_vqs - 1] = NULL;
1451                 names[total_vqs - 1] = "control";
1452         }
1453
1454         /* Allocate/initialize parameters for send/receive virtqueues */
1455         for (i = 0; i < vi->max_queue_pairs; i++) {
1456                 callbacks[rxq2vq(i)] = skb_recv_done;
1457                 callbacks[txq2vq(i)] = skb_xmit_done;
1458                 sprintf(vi->rq[i].name, "input.%d", i);
1459                 sprintf(vi->sq[i].name, "output.%d", i);
1460                 names[rxq2vq(i)] = vi->rq[i].name;
1461                 names[txq2vq(i)] = vi->sq[i].name;
1462         }
1463
1464         ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
1465                                          names);
1466         if (ret)
1467                 goto err_find;
1468
1469         if (vi->has_cvq) {
1470                 vi->cvq = vqs[total_vqs - 1];
1471                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1472                         vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1473         }
1474
1475         for (i = 0; i < vi->max_queue_pairs; i++) {
1476                 vi->rq[i].vq = vqs[rxq2vq(i)];
1477                 vi->sq[i].vq = vqs[txq2vq(i)];
1478         }
1479
1480         kfree(names);
1481         kfree(callbacks);
1482         kfree(vqs);
1483
1484         return 0;
1485
1486 err_find:
1487         kfree(names);
1488 err_names:
1489         kfree(callbacks);
1490 err_callback:
1491         kfree(vqs);
1492 err_vq:
1493         return ret;
1494 }
1495
1496 static int virtnet_alloc_queues(struct virtnet_info *vi)
1497 {
1498         int i;
1499
1500         vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
1501         if (!vi->sq)
1502                 goto err_sq;
1503         vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1504         if (!vi->rq)
1505                 goto err_rq;
1506
1507         INIT_DELAYED_WORK(&vi->refill, refill_work);
1508         for (i = 0; i < vi->max_queue_pairs; i++) {
1509                 vi->rq[i].pages = NULL;
1510                 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
1511                                napi_weight);
1512
1513                 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
1514                 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
1515         }
1516
1517         return 0;
1518
1519 err_rq:
1520         kfree(vi->sq);
1521 err_sq:
1522         return -ENOMEM;
1523 }
1524
1525 static int init_vqs(struct virtnet_info *vi)
1526 {
1527         int ret;
1528
1529         /* Allocate send & receive queues */
1530         ret = virtnet_alloc_queues(vi);
1531         if (ret)
1532                 goto err;
1533
1534         ret = virtnet_find_vqs(vi);
1535         if (ret)
1536                 goto err_free;
1537
1538         get_online_cpus();
1539         virtnet_set_affinity(vi);
1540         put_online_cpus();
1541
1542         return 0;
1543
1544 err_free:
1545         virtnet_free_queues(vi);
1546 err:
1547         return ret;
1548 }
1549
1550 static int virtnet_probe(struct virtio_device *vdev)
1551 {
1552         int i, err;
1553         struct net_device *dev;
1554         struct virtnet_info *vi;
1555         u16 max_queue_pairs;
1556
1557         /* Find if host supports multiqueue virtio_net device */
1558         err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
1559                                    struct virtio_net_config,
1560                                    max_virtqueue_pairs, &max_queue_pairs);
1561
1562         /* We need at least 2 queue's */
1563         if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1564             max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1565             !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1566                 max_queue_pairs = 1;
1567
1568         /* Allocate ourselves a network device with room for our info */
1569         dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
1570         if (!dev)
1571                 return -ENOMEM;
1572
1573         /* Set up network device as normal. */
1574         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1575         dev->netdev_ops = &virtnet_netdev;
1576         dev->features = NETIF_F_HIGHDMA;
1577
1578         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
1579         SET_NETDEV_DEV(dev, &vdev->dev);
1580
1581         /* Do we support "hardware" checksums? */
1582         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1583                 /* This opens up the world of extra features. */
1584                 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1585                 if (csum)
1586                         dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1587
1588                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1589                         dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1590                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1591                 }
1592                 /* Individual feature bits: what can host handle? */
1593                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
1594                         dev->hw_features |= NETIF_F_TSO;
1595                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
1596                         dev->hw_features |= NETIF_F_TSO6;
1597                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1598                         dev->hw_features |= NETIF_F_TSO_ECN;
1599                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1600                         dev->hw_features |= NETIF_F_UFO;
1601
1602                 if (gso)
1603                         dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1604                 /* (!csum && gso) case will be fixed by register_netdev() */
1605         }
1606         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
1607                 dev->features |= NETIF_F_RXCSUM;
1608
1609         dev->vlan_features = dev->features;
1610
1611         /* Configuration may specify what MAC to use.  Otherwise random. */
1612         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
1613                 virtio_cread_bytes(vdev,
1614                                    offsetof(struct virtio_net_config, mac),
1615                                    dev->dev_addr, dev->addr_len);
1616         else
1617                 eth_hw_addr_random(dev);
1618
1619         /* Set up our device-specific information */
1620         vi = netdev_priv(dev);
1621         vi->dev = dev;
1622         vi->vdev = vdev;
1623         vdev->priv = vi;
1624         vi->stats = alloc_percpu(struct virtnet_stats);
1625         err = -ENOMEM;
1626         if (vi->stats == NULL)
1627                 goto free;
1628
1629         for_each_possible_cpu(i) {
1630                 struct virtnet_stats *virtnet_stats;
1631                 virtnet_stats = per_cpu_ptr(vi->stats, i);
1632                 u64_stats_init(&virtnet_stats->tx_syncp);
1633                 u64_stats_init(&virtnet_stats->rx_syncp);
1634         }
1635
1636         mutex_init(&vi->config_lock);
1637         vi->config_enable = true;
1638         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1639
1640         /* If we can receive ANY GSO packets, we must allocate large ones. */
1641         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1642             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1643             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1644                 vi->big_packets = true;
1645
1646         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1647                 vi->mergeable_rx_bufs = true;
1648
1649         if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
1650                 vi->any_header_sg = true;
1651
1652         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1653                 vi->has_cvq = true;
1654
1655         /* Use single tx/rx queue pair as default */
1656         vi->curr_queue_pairs = 1;
1657         vi->max_queue_pairs = max_queue_pairs;
1658
1659         /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1660         err = init_vqs(vi);
1661         if (err)
1662                 goto free_stats;
1663
1664         netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
1665         netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
1666
1667         err = register_netdev(dev);
1668         if (err) {
1669                 pr_debug("virtio_net: registering device failed\n");
1670                 goto free_vqs;
1671         }
1672
1673         /* Last of all, set up some receive buffers. */
1674         for (i = 0; i < vi->curr_queue_pairs; i++) {
1675                 try_fill_recv(&vi->rq[i], GFP_KERNEL);
1676
1677                 /* If we didn't even get one input buffer, we're useless. */
1678                 if (vi->rq[i].num == 0) {
1679                         free_unused_bufs(vi);
1680                         err = -ENOMEM;
1681                         goto free_recv_bufs;
1682                 }
1683         }
1684
1685         vi->nb.notifier_call = &virtnet_cpu_callback;
1686         err = register_hotcpu_notifier(&vi->nb);
1687         if (err) {
1688                 pr_debug("virtio_net: registering cpu notifier failed\n");
1689                 goto free_recv_bufs;
1690         }
1691
1692         /* Assume link up if device can't report link status,
1693            otherwise get link status from config. */
1694         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1695                 netif_carrier_off(dev);
1696                 schedule_work(&vi->config_work);
1697         } else {
1698                 vi->status = VIRTIO_NET_S_LINK_UP;
1699                 netif_carrier_on(dev);
1700         }
1701
1702         pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
1703                  dev->name, max_queue_pairs);
1704
1705         return 0;
1706
1707 free_recv_bufs:
1708         free_receive_bufs(vi);
1709         unregister_netdev(dev);
1710 free_vqs:
1711         cancel_delayed_work_sync(&vi->refill);
1712         virtnet_del_vqs(vi);
1713         if (vi->alloc_frag.page)
1714                 put_page(vi->alloc_frag.page);
1715 free_stats:
1716         free_percpu(vi->stats);
1717 free:
1718         free_netdev(dev);
1719         return err;
1720 }
1721
1722 static void remove_vq_common(struct virtnet_info *vi)
1723 {
1724         vi->vdev->config->reset(vi->vdev);
1725
1726         /* Free unused buffers in both send and recv, if any. */
1727         free_unused_bufs(vi);
1728
1729         free_receive_bufs(vi);
1730
1731         virtnet_del_vqs(vi);
1732 }
1733
1734 static void virtnet_remove(struct virtio_device *vdev)
1735 {
1736         struct virtnet_info *vi = vdev->priv;
1737
1738         unregister_hotcpu_notifier(&vi->nb);
1739
1740         /* Prevent config work handler from accessing the device. */
1741         mutex_lock(&vi->config_lock);
1742         vi->config_enable = false;
1743         mutex_unlock(&vi->config_lock);
1744
1745         unregister_netdev(vi->dev);
1746
1747         remove_vq_common(vi);
1748         if (vi->alloc_frag.page)
1749                 put_page(vi->alloc_frag.page);
1750
1751         flush_work(&vi->config_work);
1752
1753         free_percpu(vi->stats);
1754         free_netdev(vi->dev);
1755 }
1756
1757 #ifdef CONFIG_PM_SLEEP
1758 static int virtnet_freeze(struct virtio_device *vdev)
1759 {
1760         struct virtnet_info *vi = vdev->priv;
1761         int i;
1762
1763         unregister_hotcpu_notifier(&vi->nb);
1764
1765         /* Prevent config work handler from accessing the device */
1766         mutex_lock(&vi->config_lock);
1767         vi->config_enable = false;
1768         mutex_unlock(&vi->config_lock);
1769
1770         netif_device_detach(vi->dev);
1771         cancel_delayed_work_sync(&vi->refill);
1772
1773         if (netif_running(vi->dev))
1774                 for (i = 0; i < vi->max_queue_pairs; i++) {
1775                         napi_disable(&vi->rq[i].napi);
1776                         netif_napi_del(&vi->rq[i].napi);
1777                 }
1778
1779         remove_vq_common(vi);
1780
1781         flush_work(&vi->config_work);
1782
1783         return 0;
1784 }
1785
1786 static int virtnet_restore(struct virtio_device *vdev)
1787 {
1788         struct virtnet_info *vi = vdev->priv;
1789         int err, i;
1790
1791         err = init_vqs(vi);
1792         if (err)
1793                 return err;
1794
1795         if (netif_running(vi->dev))
1796                 for (i = 0; i < vi->max_queue_pairs; i++)
1797                         virtnet_napi_enable(&vi->rq[i]);
1798
1799         netif_device_attach(vi->dev);
1800
1801         for (i = 0; i < vi->curr_queue_pairs; i++)
1802                 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1803                         schedule_delayed_work(&vi->refill, 0);
1804
1805         mutex_lock(&vi->config_lock);
1806         vi->config_enable = true;
1807         mutex_unlock(&vi->config_lock);
1808
1809         rtnl_lock();
1810         virtnet_set_queues(vi, vi->curr_queue_pairs);
1811         rtnl_unlock();
1812
1813         err = register_hotcpu_notifier(&vi->nb);
1814         if (err)
1815                 return err;
1816
1817         return 0;
1818 }
1819 #endif
1820
1821 static struct virtio_device_id id_table[] = {
1822         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1823         { 0 },
1824 };
1825
1826 static unsigned int features[] = {
1827         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1828         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1829         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1830         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1831         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1832         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1833         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1834         VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1835         VIRTIO_NET_F_CTRL_MAC_ADDR,
1836         VIRTIO_F_ANY_LAYOUT,
1837 };
1838
1839 static struct virtio_driver virtio_net_driver = {
1840         .feature_table = features,
1841         .feature_table_size = ARRAY_SIZE(features),
1842         .driver.name =  KBUILD_MODNAME,
1843         .driver.owner = THIS_MODULE,
1844         .id_table =     id_table,
1845         .probe =        virtnet_probe,
1846         .remove =       virtnet_remove,
1847         .config_changed = virtnet_config_changed,
1848 #ifdef CONFIG_PM_SLEEP
1849         .freeze =       virtnet_freeze,
1850         .restore =      virtnet_restore,
1851 #endif
1852 };
1853
1854 module_virtio_driver(virtio_net_driver);
1855
1856 MODULE_DEVICE_TABLE(virtio, id_table);
1857 MODULE_DESCRIPTION("Virtio network driver");
1858 MODULE_LICENSE("GPL");