Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 20 Dec 2012 16:37:04 +0000 (08:37 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 20 Dec 2012 16:37:05 +0000 (08:37 -0800)
Pull virtio update from Rusty Russell:
 "Some nice cleanups, and even a patch my wife did as a "live" demo for
  Latinoware 2012.

  There's a slightly non-trivial merge in virtio-net, as we cleaned up
  the virtio add_buf interface while DaveM accepted the mq virtio-net
  patches."

* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (27 commits)
  virtio_console: Add support for remoteproc serial
  virtio_console: Merge struct buffer_token into struct port_buffer
  virtio: add drv_to_virtio to make code clearly
  virtio: use dev_to_virtio wrapper in virtio
  virtio-mmio: Fix irq parsing in command line parameter
  virtio_console: Free buffers from out-queue upon close
  virtio: Convert dev_printk(KERN_<LEVEL> to dev_<level>(
  virtio_console: Use kmalloc instead of kzalloc
  virtio_console: Free buffer if splice fails
  virtio: tools: make it clear that virtqueue_add_buf() no longer returns > 0
  virtio: scsi: make it clear that virtqueue_add_buf() no longer returns > 0
  virtio: rpmsg: make it clear that virtqueue_add_buf() no longer returns > 0
  virtio: net: make it clear that virtqueue_add_buf() no longer returns > 0
  virtio: console: make it clear that virtqueue_add_buf() no longer returns > 0
  virtio: make virtqueue_add_buf() returning 0 on success, not capacity.
  virtio: console: don't rely on virtqueue_add_buf() returning capacity.
  virtio_net: don't rely on virtqueue_add_buf() returning capacity.
  virtio-net: remove unused skb_vnet_hdr->num_sg field
  virtio-net: correct capacity math on ring full
  virtio: move queue_index and num_free fields into core struct virtqueue.
  ...

1  2 
drivers/char/virtio_console.c
drivers/net/virtio_net.c
drivers/scsi/virtio_scsi.c
drivers/virtio/virtio.c
drivers/virtio/virtio_balloon.c
mm/highmem.c
tools/virtio/virtio_test.c

Simple merge
index 68d64f0313eaa38d3324213f536e820365f16f93,62898910708a6ed7451cbbb81e9874f83cd87c8c..a6fcf15adc4ff3d36d928f44cc0ac1f49d249820
@@@ -523,20 -464,21 +522,20 @@@ static bool try_fill_recv(struct receiv
  
        do {
                if (vi->mergeable_rx_bufs)
 -                      err = add_recvbuf_mergeable(vi, gfp);
 +                      err = add_recvbuf_mergeable(rq, gfp);
                else if (vi->big_packets)
 -                      err = add_recvbuf_big(vi, gfp);
 +                      err = add_recvbuf_big(rq, gfp);
                else
 -                      err = add_recvbuf_small(vi, gfp);
 +                      err = add_recvbuf_small(rq, gfp);
  
                oom = err == -ENOMEM;
-               if (err < 0)
+               if (err)
                        break;
 -              ++vi->num;
 -      } while (vi->rvq->num_free);
 -
 -      if (unlikely(vi->num > vi->max))
 -              vi->max = vi->num;
 -      virtqueue_kick(vi->rvq);
 +              ++rq->num;
-       } while (err > 0);
++      } while (rq->vq->num_free);
 +      if (unlikely(rq->num > rq->max))
 +              rq->max = rq->num;
 +      virtqueue_kick(rq->vq);
        return !oom;
  }
  
@@@ -625,29 -557,13 +624,29 @@@ again
        return received;
  }
  
 -static void free_old_xmit_skbs(struct virtnet_info *vi)
 +static int virtnet_open(struct net_device *dev)
 +{
 +      struct virtnet_info *vi = netdev_priv(dev);
 +      int i;
 +
 +      for (i = 0; i < vi->max_queue_pairs; i++) {
 +              /* Make sure we have some buffers: if oom use wq. */
 +              if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
 +                      schedule_delayed_work(&vi->refill, 0);
 +              virtnet_napi_enable(&vi->rq[i]);
 +      }
 +
 +      return 0;
 +}
 +
- static unsigned int free_old_xmit_skbs(struct send_queue *sq)
++static void free_old_xmit_skbs(struct send_queue *sq)
  {
        struct sk_buff *skb;
-       unsigned int len, tot_sgs = 0;
+       unsigned int len;
 +      struct virtnet_info *vi = sq->vq->vdev->priv;
        struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
  
 -      while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
 +      while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
                pr_debug("Sent skb %p\n", skb);
  
                u64_stats_update_begin(&stats->tx_syncp);
                stats->tx_packets++;
                u64_stats_update_end(&stats->tx_syncp);
  
-               tot_sgs += skb_vnet_hdr(skb)->num_sg;
                dev_kfree_skb_any(skb);
        }
-       return tot_sgs;
  }
  
 -static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
 +static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
  {
        struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
        const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
 +      struct virtnet_info *vi = sq->vq->vdev->priv;
+       unsigned num_sg;
  
        pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
  
  
        /* Encode metadata header at front. */
        if (vi->mergeable_rx_bufs)
 -              sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
 +              sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
        else
 -              sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
 +              sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
  
-       hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
-       return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg,
 -      num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
 -      return virtqueue_add_buf(vi->svq, vi->tx_sg, num_sg,
++      num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
++      return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
                                 0, skb, GFP_ATOMIC);
  }
  
  static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
  {
        struct virtnet_info *vi = netdev_priv(dev);
-       int capacity;
 +      int qnum = skb_get_queue_mapping(skb);
 +      struct send_queue *sq = &vi->sq[qnum];
+       int err;
  
        /* Free up any pending old buffers before queueing new ones. */
 -      free_old_xmit_skbs(vi);
 +      free_old_xmit_skbs(sq);
  
        /* Try to transmit */
-       capacity = xmit_skb(sq, skb);
-       /* This can happen with OOM and indirect buffers. */
-       if (unlikely(capacity < 0)) {
-               if (likely(capacity == -ENOMEM)) {
-                       if (net_ratelimit())
-                               dev_warn(&dev->dev,
-                                        "TXQ (%d) failure: out of memory\n",
-                                        qnum);
-               } else {
-                       dev->stats.tx_fifo_errors++;
-                       if (net_ratelimit())
-                               dev_warn(&dev->dev,
-                                        "Unexpected TXQ (%d) failure: %d\n",
-                                        qnum, capacity);
-               }
 -      err = xmit_skb(vi, skb);
++      err = xmit_skb(sq, skb);
+       /* This should not happen! */
+       if (unlikely(err)) {
+               dev->stats.tx_fifo_errors++;
+               if (net_ratelimit())
+                       dev_warn(&dev->dev,
 -                               "Unexpected TX queue failure: %d\n", err);
++                               "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
                dev->stats.tx_dropped++;
                kfree_skb(skb);
                return NETDEV_TX_OK;
  
        /* Apparently nice girls don't return TX_BUSY; stop the queue
         * before it gets out of hand.  Naturally, this wastes entries. */
-       if (capacity < 2+MAX_SKB_FRAGS) {
 -      if (vi->svq->num_free < 2+MAX_SKB_FRAGS) {
 -              netif_stop_queue(dev);
 -              if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
++      if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
 +              netif_stop_subqueue(dev, qnum);
 +              if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
                        /* More just got used, free them then recheck. */
-                       capacity += free_old_xmit_skbs(sq);
-                       if (capacity >= 2+MAX_SKB_FRAGS) {
 -                      free_old_xmit_skbs(vi);
 -                      if (vi->svq->num_free >= 2+MAX_SKB_FRAGS) {
 -                              netif_start_queue(dev);
 -                              virtqueue_disable_cb(vi->svq);
++                      free_old_xmit_skbs(sq);
++                      if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
 +                              netif_start_subqueue(dev, qnum);
 +                              virtqueue_disable_cb(sq->vq);
                        }
                }
        }
index dd8dc27fa32c9e8bf39b1c8a9dd3c853aeabf951,d5f9f4516d887f0e31cc94a2a35057c84651ddea..74ab67a169ec2b170cd9c92137852cfdb56e6fea
@@@ -467,10 -469,8 +469,10 @@@ static int virtscsi_queuecommand(struc
  
        if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
                              sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
-                             GFP_ATOMIC) >= 0)
+                             GFP_ATOMIC) == 0)
                ret = 0;
 +      else
 +              mempool_free(cmd, virtscsi_cmd_pool);
  
  out:
        return ret;
Simple merge
index 2a70558b36ea6c031f48027e6eb503811d3540ce,586395cca5fe129b4e2ecd4706e395ee567f5adb..d19fe3e323b4dd17ad3cf43b099bdac7cf31aa5c
@@@ -133,16 -125,14 +133,15 @@@ static void fill_balloon(struct virtio_
        /* We can only do one array worth at a time. */
        num = min(num, ARRAY_SIZE(vb->pfns));
  
 +      mutex_lock(&vb->balloon_lock);
        for (vb->num_pfns = 0; vb->num_pfns < num;
             vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
 -              struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY |
 -                                      __GFP_NOMEMALLOC | __GFP_NOWARN);
 +              struct page *page = balloon_page_enqueue(vb_dev_info);
 +
                if (!page) {
-                       if (printk_ratelimit())
-                               dev_printk(KERN_INFO, &vb->vdev->dev,
-                                          "Out of puff! Can't get %u pages\n",
-                                          VIRTIO_BALLOON_PAGES_PER_PAGE);
+                       dev_info_ratelimited(&vb->vdev->dev,
 -                                           "Out of puff! Can't get %zu pages\n",
 -                                           num);
++                                           "Out of puff! Can't get %u pages\n",
++                                           VIRTIO_BALLOON_PAGES_PER_PAGE);
                        /* Sleep for at least 1/5 of a second before retry. */
                        msleep(200);
                        break;
diff --cc mm/highmem.c
Simple merge
Simple merge