Merge git://git.infradead.org/users/willy/linux-nvme
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Apr 2014 23:45:59 +0000 (16:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Apr 2014 23:45:59 +0000 (16:45 -0700)
Pull NVMe driver updates from Matthew Wilcox:
 "Various updates to the NVMe driver.  The most user-visible change is
  that drive hotplugging now works and CPU hotplug while an NVMe drive
  is installed should also work better"

* git://git.infradead.org/users/willy/linux-nvme:
  NVMe: Retry failed commands with non-fatal errors
  NVMe: Add getgeo to block ops
  NVMe: Start-stop nvme_thread during device add-remove.
  NVMe: Make I/O timeout a module parameter
  NVMe: CPU hot plug notification
  NVMe: per-cpu io queues
  NVMe: Replace DEFINE_PCI_DEVICE_TABLE
  NVMe: Fix divide-by-zero in nvme_trans_io_get_num_cmds
  NVMe: IOCTL path RCU protect queue access
  NVMe: RCU protected access to io queues
  NVMe: Initialize device reference count earlier
  NVMe: Add CONFIG_PM_SLEEP to suspend/resume functions

1  2 
drivers/block/nvme-core.c
include/linux/nvme.h

index da085ff10d25159b7233c98771fe1964adb7cbde,efa9c8f4a7a76b320840250c6f73b3a0bb9705c9..7c64fa756cced628807e70fb873b532964ea39d9
@@@ -1,6 -1,6 +1,6 @@@
  /*
   * NVM Express device driver
-  * Copyright (c) 2011, Intel Corporation.
+  * Copyright (c) 2011-2014, Intel Corporation.
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms and conditions of the GNU General Public License,
  #include <linux/bio.h>
  #include <linux/bitops.h>
  #include <linux/blkdev.h>
+ #include <linux/cpu.h>
  #include <linux/delay.h>
  #include <linux/errno.h>
  #include <linux/fs.h>
  #include <linux/genhd.h>
+ #include <linux/hdreg.h>
  #include <linux/idr.h>
  #include <linux/init.h>
  #include <linux/interrupt.h>
@@@ -35,6 -37,7 +37,7 @@@
  #include <linux/module.h>
  #include <linux/moduleparam.h>
  #include <linux/pci.h>
+ #include <linux/percpu.h>
  #include <linux/poison.h>
  #include <linux/ptrace.h>
  #include <linux/sched.h>
  #define SQ_SIZE(depth)                (depth * sizeof(struct nvme_command))
  #define CQ_SIZE(depth)                (depth * sizeof(struct nvme_completion))
  #define ADMIN_TIMEOUT (60 * HZ)
+ #define IOD_TIMEOUT   (4 * NVME_IO_TIMEOUT)
+ unsigned char io_timeout = 30;
+ module_param(io_timeout, byte, 0644);
+ MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
  
  static int nvme_major;
  module_param(nvme_major, int, 0);
@@@ -58,6 -66,7 +66,7 @@@ static DEFINE_SPINLOCK(dev_list_lock)
  static LIST_HEAD(dev_list);
  static struct task_struct *nvme_thread;
  static struct workqueue_struct *nvme_workq;
+ static wait_queue_head_t nvme_kthread_wait;
  
  static void nvme_reset_failed_dev(struct work_struct *ws);
  
@@@ -74,6 -83,7 +83,7 @@@ struct async_cmd_info 
   * commands and one for I/O commands).
   */
  struct nvme_queue {
+       struct rcu_head r_head;
        struct device *q_dmadev;
        struct nvme_dev *dev;
        char irqname[24];       /* nvme4294967295-65535\0 */
@@@ -85,6 -95,7 +95,7 @@@
        wait_queue_head_t sq_full;
        wait_queue_t sq_cong_wait;
        struct bio_list sq_cong;
+       struct list_head iod_bio;
        u32 __iomem *q_db;
        u16 q_depth;
        u16 cq_vector;
        u8 cq_phase;
        u8 cqe_seen;
        u8 q_suspended;
+       cpumask_var_t cpu_mask;
        struct async_cmd_info cmdinfo;
        unsigned long cmdid_data[];
  };
@@@ -118,7 -130,7 +130,7 @@@ static inline void _nvme_check_size(voi
        BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
  }
  
- typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+ typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
                                                struct nvme_completion *);
  
  struct nvme_cmd_info {
@@@ -190,7 -202,7 +202,7 @@@ static int alloc_cmdid_killable(struct 
  #define CMD_CTX_FLUSH         (0x318 + CMD_CTX_BASE)
  #define CMD_CTX_ABORT         (0x31C + CMD_CTX_BASE)
  
- static void special_completion(struct nvme_dev *dev, void *ctx,
+ static void special_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
  {
        if (ctx == CMD_CTX_CANCELLED)
        if (ctx == CMD_CTX_FLUSH)
                return;
        if (ctx == CMD_CTX_ABORT) {
-               ++dev->abort_limit;
+               ++nvmeq->dev->abort_limit;
                return;
        }
        if (ctx == CMD_CTX_COMPLETED) {
-               dev_warn(&dev->pci_dev->dev,
+               dev_warn(nvmeq->q_dmadev,
                                "completed id %d twice on queue %d\n",
                                cqe->command_id, le16_to_cpup(&cqe->sq_id));
                return;
        }
        if (ctx == CMD_CTX_INVALID) {
-               dev_warn(&dev->pci_dev->dev,
+               dev_warn(nvmeq->q_dmadev,
                                "invalid id %d completed on queue %d\n",
                                cqe->command_id, le16_to_cpup(&cqe->sq_id));
                return;
        }
  
-       dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+       dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
  }
  
- static void async_completion(struct nvme_dev *dev, void *ctx,
+ static void async_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
  {
        struct async_cmd_info *cmdinfo = ctx;
@@@ -262,14 -274,34 +274,34 @@@ static void *cancel_cmdid(struct nvme_q
        return ctx;
  }
  
- struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+ static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
+ {
+       return rcu_dereference_raw(dev->queues[qid]);
+ }
+ static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
+ {
+       unsigned queue_id = get_cpu_var(*dev->io_queue);
+       rcu_read_lock();
+       return rcu_dereference(dev->queues[queue_id]);
+ }
+ static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
  {
-       return dev->queues[get_cpu() + 1];
+       rcu_read_unlock();
+       put_cpu_var(nvmeq->dev->io_queue);
  }
  
- void put_nvmeq(struct nvme_queue *nvmeq)
+ static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
+                                                       __acquires(RCU)
  {
-       put_cpu();
+       rcu_read_lock();
+       return rcu_dereference(dev->queues[q_idx]);
+ }
+ static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
+ {
+       rcu_read_unlock();
  }
  
  /**
@@@ -284,6 -316,10 +316,10 @@@ static int nvme_submit_cmd(struct nvme_
        unsigned long flags;
        u16 tail;
        spin_lock_irqsave(&nvmeq->q_lock, flags);
+       if (nvmeq->q_suspended) {
+               spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+               return -EBUSY;
+       }
        tail = nvmeq->sq_tail;
        memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
        if (++tail == nvmeq->q_depth)
@@@ -323,6 -359,7 +359,7 @@@ nvme_alloc_iod(unsigned nseg, unsigned 
                iod->npages = -1;
                iod->length = nbytes;
                iod->nents = 0;
+               iod->first_dma = 0ULL;
                iod->start_time = jiffies;
        }
  
@@@ -371,19 -408,31 +408,31 @@@ static void nvme_end_io_acct(struct bi
        part_stat_unlock();
  }
  
- static void bio_completion(struct nvme_dev *dev, void *ctx,
+ static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
  {
        struct nvme_iod *iod = ctx;
        struct bio *bio = iod->private;
        u16 status = le16_to_cpup(&cqe->status) >> 1;
  
+       if (unlikely(status)) {
+               if (!(status & NVME_SC_DNR ||
+                               bio->bi_rw & REQ_FAILFAST_MASK) &&
+                               (jiffies - iod->start_time) < IOD_TIMEOUT) {
+                       if (!waitqueue_active(&nvmeq->sq_full))
+                               add_wait_queue(&nvmeq->sq_full,
+                                                       &nvmeq->sq_cong_wait);
+                       list_add_tail(&iod->node, &nvmeq->iod_bio);
+                       wake_up(&nvmeq->sq_full);
+                       return;
+               }
+       }
        if (iod->nents) {
-               dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+               dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
                        bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
                nvme_end_io_acct(bio, iod->start_time);
        }
-       nvme_free_iod(dev, iod);
+       nvme_free_iod(nvmeq->dev, iod);
        if (status)
                bio_endio(bio, -EIO);
        else
  }
  
  /* length is in bytes.  gfp flags indicates whether we may sleep. */
- int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
-                       struct nvme_iod *iod, int total_len, gfp_t gfp)
+ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
+                                                               gfp_t gfp)
  {
        struct dma_pool *pool;
        int length = total_len;
        dma_addr_t prp_dma;
        int nprps, i;
  
-       cmd->prp1 = cpu_to_le64(dma_addr);
        length -= (PAGE_SIZE - offset);
        if (length <= 0)
                return total_len;
        }
  
        if (length <= PAGE_SIZE) {
-               cmd->prp2 = cpu_to_le64(dma_addr);
+               iod->first_dma = dma_addr;
                return total_len;
        }
  
  
        prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
        if (!prp_list) {
-               cmd->prp2 = cpu_to_le64(dma_addr);
+               iod->first_dma = dma_addr;
                iod->npages = -1;
                return (total_len - length) + PAGE_SIZE;
        }
        list[0] = prp_list;
        iod->first_dma = prp_dma;
-       cmd->prp2 = cpu_to_le64(prp_dma);
        i = 0;
        for (;;) {
                if (i == PAGE_SIZE / 8) {
@@@ -480,10 -527,11 +527,11 @@@ static int nvme_split_and_submit(struc
  
        bio_chain(split, bio);
  
-       if (bio_list_empty(&nvmeq->sq_cong))
+       if (!waitqueue_active(&nvmeq->sq_full))
                add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
        bio_list_add(&nvmeq->sq_cong, split);
        bio_list_add(&nvmeq->sq_cong, bio);
+       wake_up(&nvmeq->sq_full);
  
        return 0;
  }
@@@ -536,25 -584,13 +584,13 @@@ static int nvme_map_bio(struct nvme_que
        return length;
  }
  
- /*
-  * We reuse the small pool to allocate the 16-byte range here as it is not
-  * worth having a special pool for these or additional cases to handle freeing
-  * the iod.
-  */
  static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
                struct bio *bio, struct nvme_iod *iod, int cmdid)
  {
-       struct nvme_dsm_range *range;
+       struct nvme_dsm_range *range =
+                               (struct nvme_dsm_range *)iod_list(iod)[0];
        struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
  
-       range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
-                                                       &iod->first_dma);
-       if (!range)
-               return -ENOMEM;
-       iod_list(iod)[0] = (__le64 *)range;
-       iod->npages = 0;
        range->cattr = cpu_to_le32(0);
        range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
        range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
@@@ -601,44 -637,22 +637,22 @@@ int nvme_submit_flush_data(struct nvme_
        return nvme_submit_flush(nvmeq, ns, cmdid);
  }
  
- /*
-  * Called with local interrupts disabled and the q_lock held.  May not sleep.
-  */
- static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
-                                                               struct bio *bio)
+ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
  {
+       struct bio *bio = iod->private;
+       struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
        struct nvme_command *cmnd;
-       struct nvme_iod *iod;
-       enum dma_data_direction dma_dir;
-       int cmdid, length, result;
+       int cmdid;
        u16 control;
        u32 dsmgmt;
-       int psegs = bio_phys_segments(ns->queue, bio);
-       if ((bio->bi_rw & REQ_FLUSH) && psegs) {
-               result = nvme_submit_flush_data(nvmeq, ns);
-               if (result)
-                       return result;
-       }
  
-       result = -ENOMEM;
-       iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
-       if (!iod)
-               goto nomem;
-       iod->private = bio;
-       result = -EBUSY;
        cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
        if (unlikely(cmdid < 0))
-               goto free_iod;
+               return cmdid;
  
-       if (bio->bi_rw & REQ_DISCARD) {
-               result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
-               if (result)
-                       goto free_cmdid;
-               return result;
-       }
-       if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+       if (bio->bi_rw & REQ_DISCARD)
+               return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+       if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
                return nvme_submit_flush(nvmeq, ns, cmdid);
  
        control = 0;
                dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
  
        cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
        memset(cmnd, 0, sizeof(*cmnd));
-       if (bio_data_dir(bio)) {
-               cmnd->rw.opcode = nvme_cmd_write;
-               dma_dir = DMA_TO_DEVICE;
-       } else {
-               cmnd->rw.opcode = nvme_cmd_read;
-               dma_dir = DMA_FROM_DEVICE;
-       }
-       result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
-       if (result <= 0)
-               goto free_cmdid;
-       length = result;
  
+       cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
        cmnd->rw.command_id = cmdid;
        cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
-       length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
-                                                               GFP_ATOMIC);
+       cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+       cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
        cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
-       cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+       cmnd->rw.length =
+               cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
  
-       nvme_start_io_acct(bio);
        if (++nvmeq->sq_tail == nvmeq->q_depth)
                nvmeq->sq_tail = 0;
        writel(nvmeq->sq_tail, nvmeq->q_db);
  
        return 0;
+ }
+ /*
+  * Called with local interrupts disabled and the q_lock held.  May not sleep.
+  */
+ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+                                                               struct bio *bio)
+ {
+       struct nvme_iod *iod;
+       int psegs = bio_phys_segments(ns->queue, bio);
+       int result;
+       if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+               result = nvme_submit_flush_data(nvmeq, ns);
+               if (result)
+                       return result;
+       }
+       iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
+       if (!iod)
+               return -ENOMEM;
+       iod->private = bio;
+       if (bio->bi_rw & REQ_DISCARD) {
+               void *range;
+               /*
+                * We reuse the small pool to allocate the 16-byte range here
+                * as it is not worth having a special pool for these or
+                * additional cases to handle freeing the iod.
+                */
+               range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
+                                               GFP_ATOMIC,
+                                               &iod->first_dma);
+               if (!range) {
+                       result = -ENOMEM;
+                       goto free_iod;
+               }
+               iod_list(iod)[0] = (__le64 *)range;
+               iod->npages = 0;
+       } else if (psegs) {
+               result = nvme_map_bio(nvmeq, iod, bio,
+                       bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+                       psegs);
+               if (result <= 0)
+                       goto free_iod;
+               if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
+                                                               result) {
+                       result = -ENOMEM;
+                       goto free_iod;
+               }
+               nvme_start_io_acct(bio);
+       }
+       if (unlikely(nvme_submit_iod(nvmeq, iod))) {
+               if (!waitqueue_active(&nvmeq->sq_full))
+                       add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+               list_add_tail(&iod->node, &nvmeq->iod_bio);
+       }
+       return 0;
  
-  free_cmdid:
-       free_cmdid(nvmeq, cmdid, NULL);
   free_iod:
        nvme_free_iod(nvmeq->dev, iod);
-  nomem:
        return result;
  }
  
@@@ -711,7 -768,7 +768,7 @@@ static int nvme_process_cq(struct nvme_
                }
  
                ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
-               fn(nvmeq->dev, ctx, &cqe);
+               fn(nvmeq, ctx, &cqe);
        }
  
        /* If the controller ignores the cq head doorbell and continuously
@@@ -747,7 -804,7 +804,7 @@@ static void nvme_make_request(struct re
        if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
                result = nvme_submit_bio_queue(nvmeq, ns, bio);
        if (unlikely(result)) {
-               if (bio_list_empty(&nvmeq->sq_cong))
+               if (!waitqueue_active(&nvmeq->sq_full))
                        add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
                bio_list_add(&nvmeq->sq_cong, bio);
        }
@@@ -791,7 -848,7 +848,7 @@@ struct sync_cmd_info 
        int status;
  };
  
- static void sync_completion(struct nvme_dev *dev, void *ctx,
+ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
  {
        struct sync_cmd_info *cmdinfo = ctx;
   * Returns 0 on success.  If the result is negative, it's a Linux error code;
   * if the result is positive, it's an NVM Express status code
   */
- int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+ static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
+                                               struct nvme_command *cmd,
                                                u32 *result, unsigned timeout)
  {
-       int cmdid;
+       int cmdid, ret;
        struct sync_cmd_info cmdinfo;
+       struct nvme_queue *nvmeq;
+       nvmeq = lock_nvmeq(dev, q_idx);
+       if (!nvmeq) {
+               unlock_nvmeq(nvmeq);
+               return -ENODEV;
+       }
  
        cmdinfo.task = current;
        cmdinfo.status = -EINTR;
  
-       cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
-                                                               timeout);
-       if (cmdid < 0)
+       cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
+       if (cmdid < 0) {
+               unlock_nvmeq(nvmeq);
                return cmdid;
+       }
        cmd->common.command_id = cmdid;
  
        set_current_state(TASK_KILLABLE);
-       nvme_submit_cmd(nvmeq, cmd);
+       ret = nvme_submit_cmd(nvmeq, cmd);
+       if (ret) {
+               free_cmdid(nvmeq, cmdid, NULL);
+               unlock_nvmeq(nvmeq);
+               set_current_state(TASK_RUNNING);
+               return ret;
+       }
+       unlock_nvmeq(nvmeq);
        schedule_timeout(timeout);
  
        if (cmdinfo.status == -EINTR) {
-               nvme_abort_command(nvmeq, cmdid);
+               nvmeq = lock_nvmeq(dev, q_idx);
+               if (nvmeq)
+                       nvme_abort_command(nvmeq, cmdid);
+               unlock_nvmeq(nvmeq);
                return -EINTR;
        }
  
@@@ -845,20 -921,26 +921,26 @@@ static int nvme_submit_async_cmd(struc
                return cmdid;
        cmdinfo->status = -EINTR;
        cmd->common.command_id = cmdid;
-       nvme_submit_cmd(nvmeq, cmd);
-       return 0;
+       return nvme_submit_cmd(nvmeq, cmd);
  }
  
  int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
                                                                u32 *result)
  {
-       return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+       return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
+ }
+ int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+                                                               u32 *result)
+ {
+       return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
+                                                       NVME_IO_TIMEOUT);
  }
  
  static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
                struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
  {
-       return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+       return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
                                                                ADMIN_TIMEOUT);
  }
  
@@@ -985,6 -1067,7 +1067,7 @@@ static void nvme_abort_cmd(int cmdid, s
        struct nvme_command cmd;
        struct nvme_dev *dev = nvmeq->dev;
        struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       struct nvme_queue *adminq;
  
        if (!nvmeq->qid || info[cmdid].aborted) {
                if (work_busy(&dev->reset_work))
                dev_warn(&dev->pci_dev->dev,
                        "I/O %d QID %d timeout, reset controller\n", cmdid,
                                                                nvmeq->qid);
 -              PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
 +              dev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &dev->reset_work);
                return;
        }
        if (!dev->abort_limit)
                return;
  
-       a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+       adminq = rcu_dereference(dev->queues[0]);
+       a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
                                                                ADMIN_TIMEOUT);
        if (a_cmdid < 0)
                return;
  
        dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
                                                        nvmeq->qid);
-       nvme_submit_cmd(dev->queues[0], &cmd);
+       nvme_submit_cmd(adminq, &cmd);
  }
  
  /**
@@@ -1051,23 -1135,38 +1135,38 @@@ static void nvme_cancel_ios(struct nvme
                dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
                                                                nvmeq->qid);
                ctx = cancel_cmdid(nvmeq, cmdid, &fn);
-               fn(nvmeq->dev, ctx, &cqe);
+               fn(nvmeq, ctx, &cqe);
        }
  }
  
- static void nvme_free_queue(struct nvme_queue *nvmeq)
+ static void nvme_free_queue(struct rcu_head *r)
  {
+       struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
        spin_lock_irq(&nvmeq->q_lock);
        while (bio_list_peek(&nvmeq->sq_cong)) {
                struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
                bio_endio(bio, -EIO);
        }
+       while (!list_empty(&nvmeq->iod_bio)) {
+               static struct nvme_completion cqe = {
+                       .status = cpu_to_le16(
+                               (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
+               };
+               struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
+                                                       struct nvme_iod,
+                                                       node);
+               list_del(&iod->node);
+               bio_completion(nvmeq, iod, &cqe);
+       }
        spin_unlock_irq(&nvmeq->q_lock);
  
        dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
                                (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
        dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
                                        nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+       if (nvmeq->qid)
+               free_cpumask_var(nvmeq->cpu_mask);
        kfree(nvmeq);
  }
  
@@@ -1076,9 -1175,10 +1175,10 @@@ static void nvme_free_queues(struct nvm
        int i;
  
        for (i = dev->queue_count - 1; i >= lowest; i--) {
-               nvme_free_queue(dev->queues[i]);
+               struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
+               rcu_assign_pointer(dev->queues[i], NULL);
+               call_rcu(&nvmeq->r_head, nvme_free_queue);
                dev->queue_count--;
-               dev->queues[i] = NULL;
        }
  }
  
@@@ -1098,6 -1198,7 +1198,7 @@@ static int nvme_suspend_queue(struct nv
                return 1;
        }
        nvmeq->q_suspended = 1;
+       nvmeq->dev->online_queues--;
        spin_unlock_irq(&nvmeq->q_lock);
  
        irq_set_affinity_hint(vector, NULL);
@@@ -1116,7 -1217,7 +1217,7 @@@ static void nvme_clear_queue(struct nvm
  
  static void nvme_disable_queue(struct nvme_dev *dev, int qid)
  {
-       struct nvme_queue *nvmeq = dev->queues[qid];
+       struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
  
        if (!nvmeq)
                return;
@@@ -1152,6 -1253,9 +1253,9 @@@ static struct nvme_queue *nvme_alloc_qu
        if (!nvmeq->sq_cmds)
                goto free_cqdma;
  
+       if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
+               goto free_sqdma;
        nvmeq->q_dmadev = dmadev;
        nvmeq->dev = dev;
        snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
        init_waitqueue_head(&nvmeq->sq_full);
        init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
        bio_list_init(&nvmeq->sq_cong);
+       INIT_LIST_HEAD(&nvmeq->iod_bio);
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
        nvmeq->cq_vector = vector;
        nvmeq->qid = qid;
        nvmeq->q_suspended = 1;
        dev->queue_count++;
+       rcu_assign_pointer(dev->queues[qid], nvmeq);
  
        return nvmeq;
  
+  free_sqdma:
+       dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
+                                                       nvmeq->sq_dma_addr);
   free_cqdma:
        dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
                                                        nvmeq->cq_dma_addr);
@@@ -1203,6 -1312,7 +1312,7 @@@ static void nvme_init_queue(struct nvme
        memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
        nvme_cancel_ios(nvmeq, false);
        nvmeq->q_suspended = 0;
+       dev->online_queues++;
  }
  
  static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
@@@ -1311,12 -1421,11 +1421,11 @@@ static int nvme_configure_admin_queue(s
        if (result < 0)
                return result;
  
-       nvmeq = dev->queues[0];
+       nvmeq = raw_nvmeq(dev, 0);
        if (!nvmeq) {
                nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
                if (!nvmeq)
                        return -ENOMEM;
-               dev->queues[0] = nvmeq;
        }
  
        aqa = nvmeq->q_depth - 1;
@@@ -1418,7 -1527,6 +1527,6 @@@ void nvme_unmap_user_pages(struct nvme_
  static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
  {
        struct nvme_dev *dev = ns->dev;
-       struct nvme_queue *nvmeq;
        struct nvme_user_io io;
        struct nvme_command c;
        unsigned length, meta_len;
                c.rw.metadata = cpu_to_le64(meta_dma_addr);
        }
  
-       length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+       length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+       c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+       c.rw.prp2 = cpu_to_le64(iod->first_dma);
  
-       nvmeq = get_nvmeq(dev);
-       /*
-        * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
-        * disabled.  We may be preempted at any point, and be rescheduled
-        * to a different CPU.  That will cause cacheline bouncing, but no
-        * additional races since q_lock already protects against other CPUs.
-        */
-       put_nvmeq(nvmeq);
        if (length != (io.nblocks + 1) << ns->lba_shift)
                status = -ENOMEM;
-       else if (!nvmeq || nvmeq->q_suspended)
-               status = -EBUSY;
        else
-               status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+               status = nvme_submit_io_cmd(dev, &c, NULL);
  
        if (meta_len) {
                if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
@@@ -1572,8 -1672,9 +1672,9 @@@ static int nvme_user_admin_cmd(struct n
                                                                length);
                if (IS_ERR(iod))
                        return PTR_ERR(iod);
-               length = nvme_setup_prps(dev, &c.common, iod, length,
-                                                               GFP_KERNEL);
+               length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+               c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+               c.common.prp2 = cpu_to_le64(iod->first_dma);
        }
  
        timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
        if (length != cmd.data_len)
                status = -ENOMEM;
        else
-               status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
-                                                               timeout);
+               status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
  
        if (cmd.data_len) {
                nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@@ -1653,25 -1753,51 +1753,51 @@@ static void nvme_release(struct gendis
        kref_put(&dev->kref, nvme_free_dev);
  }
  
+ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
+ {
+       /* some standard values */
+       geo->heads = 1 << 6;
+       geo->sectors = 1 << 5;
+       geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+       return 0;
+ }
  static const struct block_device_operations nvme_fops = {
        .owner          = THIS_MODULE,
        .ioctl          = nvme_ioctl,
        .compat_ioctl   = nvme_compat_ioctl,
        .open           = nvme_open,
        .release        = nvme_release,
+       .getgeo         = nvme_getgeo,
  };
  
+ static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
+ {
+       struct nvme_iod *iod, *next;
+       list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
+               if (unlikely(nvme_submit_iod(nvmeq, iod)))
+                       break;
+               list_del(&iod->node);
+               if (bio_list_empty(&nvmeq->sq_cong) &&
+                                               list_empty(&nvmeq->iod_bio))
+                       remove_wait_queue(&nvmeq->sq_full,
+                                               &nvmeq->sq_cong_wait);
+       }
+ }
  static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
  {
        while (bio_list_peek(&nvmeq->sq_cong)) {
                struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
                struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
  
-               if (bio_list_empty(&nvmeq->sq_cong))
+               if (bio_list_empty(&nvmeq->sq_cong) &&
+                                               list_empty(&nvmeq->iod_bio))
                        remove_wait_queue(&nvmeq->sq_full,
                                                        &nvmeq->sq_cong_wait);
                if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
-                       if (bio_list_empty(&nvmeq->sq_cong))
+                       if (!waitqueue_active(&nvmeq->sq_full))
                                add_wait_queue(&nvmeq->sq_full,
                                                        &nvmeq->sq_cong_wait);
                        bio_list_add_head(&nvmeq->sq_cong, bio);
@@@ -1696,12 -1822,15 +1822,14 @@@ static int nvme_kthread(void *data
                                list_del_init(&dev->node);
                                dev_warn(&dev->pci_dev->dev,
                                        "Failed status, reset controller\n");
 -                              PREPARE_WORK(&dev->reset_work,
 -                                                      nvme_reset_failed_dev);
 +                              dev->reset_workfn = nvme_reset_failed_dev;
                                queue_work(nvme_workq, &dev->reset_work);
                                continue;
                        }
+                       rcu_read_lock();
                        for (i = 0; i < dev->queue_count; i++) {
-                               struct nvme_queue *nvmeq = dev->queues[i];
+                               struct nvme_queue *nvmeq =
+                                               rcu_dereference(dev->queues[i]);
                                if (!nvmeq)
                                        continue;
                                spin_lock_irq(&nvmeq->q_lock);
                                nvme_process_cq(nvmeq);
                                nvme_cancel_ios(nvmeq, true);
                                nvme_resubmit_bios(nvmeq);
+                               nvme_resubmit_iods(nvmeq);
   unlock:
                                spin_unlock_irq(&nvmeq->q_lock);
                        }
+                       rcu_read_unlock();
                }
                spin_unlock(&dev_list_lock);
                schedule_timeout(round_jiffies_relative(HZ));
@@@ -1787,6 -1918,143 +1917,143 @@@ static struct nvme_ns *nvme_alloc_ns(st
        return NULL;
  }
  
+ static int nvme_find_closest_node(int node)
+ {
+       int n, val, min_val = INT_MAX, best_node = node;
+       for_each_online_node(n) {
+               if (n == node)
+                       continue;
+               val = node_distance(node, n);
+               if (val < min_val) {
+                       min_val = val;
+                       best_node = n;
+               }
+       }
+       return best_node;
+ }
+ static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
+                                                               int count)
+ {
+       int cpu;
+       for_each_cpu(cpu, qmask) {
+               if (cpumask_weight(nvmeq->cpu_mask) >= count)
+                       break;
+               if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
+                       *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
+       }
+ }
+ static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
+       const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
+ {
+       int next_cpu;
+       for_each_cpu(next_cpu, new_mask) {
+               cpumask_or(mask, mask, get_cpu_mask(next_cpu));
+               cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
+               cpumask_and(mask, mask, unassigned_cpus);
+               nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
+       }
+ }
+ static void nvme_create_io_queues(struct nvme_dev *dev)
+ {
+       unsigned i, max;
+       max = min(dev->max_qid, num_online_cpus());
+       for (i = dev->queue_count; i <= max; i++)
+               if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+                       break;
+       max = min(dev->queue_count - 1, num_online_cpus());
+       for (i = dev->online_queues; i <= max; i++)
+               if (nvme_create_queue(raw_nvmeq(dev, i), i))
+                       break;
+ }
+ /*
+  * If there are fewer queues than online cpus, this will try to optimally
+  * assign a queue to multiple cpus by grouping cpus that are "close" together:
+  * thread siblings, core, socket, closest node, then whatever else is
+  * available.
+  */
+ static void nvme_assign_io_queues(struct nvme_dev *dev)
+ {
+       unsigned cpu, cpus_per_queue, queues, remainder, i;
+       cpumask_var_t unassigned_cpus;
+       nvme_create_io_queues(dev);
+       queues = min(dev->online_queues - 1, num_online_cpus());
+       if (!queues)
+               return;
+       cpus_per_queue = num_online_cpus() / queues;
+       remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
+       if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
+               return;
+       cpumask_copy(unassigned_cpus, cpu_online_mask);
+       cpu = cpumask_first(unassigned_cpus);
+       for (i = 1; i <= queues; i++) {
+               struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
+               cpumask_t mask;
+               cpumask_clear(nvmeq->cpu_mask);
+               if (!cpumask_weight(unassigned_cpus)) {
+                       unlock_nvmeq(nvmeq);
+                       break;
+               }
+               mask = *get_cpu_mask(cpu);
+               nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               topology_thread_cpumask(cpu),
+                               nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               topology_core_cpumask(cpu),
+                               nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               cpumask_of_node(cpu_to_node(cpu)),
+                               nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               cpumask_of_node(
+                                       nvme_find_closest_node(
+                                               cpu_to_node(cpu))),
+                               nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               unassigned_cpus,
+                               nvmeq, cpus_per_queue);
+               WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
+                       "nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
+                       dev->instance, i);
+               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+                                                       nvmeq->cpu_mask);
+               cpumask_andnot(unassigned_cpus, unassigned_cpus,
+                                               nvmeq->cpu_mask);
+               cpu = cpumask_next(cpu, unassigned_cpus);
+               if (remainder && !--remainder)
+                       cpus_per_queue++;
+               unlock_nvmeq(nvmeq);
+       }
+       WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
+                                                               dev->instance);
+       i = 0;
+       cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
+       for_each_cpu(cpu, unassigned_cpus)
+               *per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
+       free_cpumask_var(unassigned_cpus);
+ }
  static int set_queue_count(struct nvme_dev *dev, int count)
  {
        int status;
@@@ -1805,13 -2073,26 +2072,26 @@@ static size_t db_bar_size(struct nvme_d
        return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
  }
  
+ static int nvme_cpu_notify(struct notifier_block *self,
+                               unsigned long action, void *hcpu)
+ {
+       struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_DEAD:
+               nvme_assign_io_queues(dev);
+               break;
+       }
+       return NOTIFY_OK;
+ }
  static int nvme_setup_io_queues(struct nvme_dev *dev)
  {
-       struct nvme_queue *adminq = dev->queues[0];
+       struct nvme_queue *adminq = raw_nvmeq(dev, 0);
        struct pci_dev *pdev = dev->pci_dev;
-       int result, cpu, i, vecs, nr_io_queues, size, q_depth;
+       int result, i, vecs, nr_io_queues, size;
  
-       nr_io_queues = num_online_cpus();
+       nr_io_queues = num_possible_cpus();
        result = set_queue_count(dev, nr_io_queues);
        if (result < 0)
                return result;
                        size = db_bar_size(dev, nr_io_queues);
                } while (1);
                dev->dbs = ((void __iomem *)dev->bar) + 4096;
-               dev->queues[0]->q_db = dev->dbs;
+               adminq->q_db = dev->dbs;
        }
  
        /* Deregister the admin queue's interrupt */
        free_irq(dev->entry[0].vector, adminq);
  
 -      vecs = nr_io_queues;
 -      for (i = 0; i < vecs; i++)
 +      for (i = 0; i < nr_io_queues; i++)
                dev->entry[i].entry = i;
 -      for (;;) {
 -              result = pci_enable_msix(pdev, dev->entry, vecs);
 -              if (result <= 0)
 -                      break;
 -              vecs = result;
 -      }
 -
 -      if (result < 0) {
 -              vecs = nr_io_queues;
 -              if (vecs > 32)
 -                      vecs = 32;
 -              for (;;) {
 -                      result = pci_enable_msi_block(pdev, vecs);
 -                      if (result == 0) {
 -                              for (i = 0; i < vecs; i++)
 -                                      dev->entry[i].vector = i + pdev->irq;
 -                              break;
 -                      } else if (result < 0) {
 -                              vecs = 1;
 -                              break;
 -                      }
 -                      vecs = result;
 +      vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
 +      if (vecs < 0) {
 +              vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
 +              if (vecs < 0) {
 +                      vecs = 1;
 +              } else {
 +                      for (i = 0; i < vecs; i++)
 +                              dev->entry[i].vector = i + pdev->irq;
                }
        }
  
         * number of interrupts.
         */
        nr_io_queues = vecs;
+       dev->max_qid = nr_io_queues;
  
        result = queue_request_irq(dev, adminq, adminq->irqname);
        if (result) {
        }
  
        /* Free previously allocated queues that are no longer usable */
-       spin_lock(&dev_list_lock);
-       for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
-               struct nvme_queue *nvmeq = dev->queues[i];
-               spin_lock_irq(&nvmeq->q_lock);
-               nvme_cancel_ios(nvmeq, false);
-               spin_unlock_irq(&nvmeq->q_lock);
-               nvme_free_queue(nvmeq);
-               dev->queue_count--;
-               dev->queues[i] = NULL;
-       }
-       spin_unlock(&dev_list_lock);
-       cpu = cpumask_first(cpu_online_mask);
-       for (i = 0; i < nr_io_queues; i++) {
-               irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
-               cpu = cpumask_next(cpu, cpu_online_mask);
-       }
-       q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
-                                                               NVME_Q_DEPTH);
-       for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
-               dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
-               if (!dev->queues[i + 1]) {
-                       result = -ENOMEM;
-                       goto free_queues;
-               }
-       }
-       for (; i < num_possible_cpus(); i++) {
-               int target = i % rounddown_pow_of_two(dev->queue_count - 1);
-               dev->queues[i + 1] = dev->queues[target + 1];
-       }
+       nvme_free_queues(dev, nr_io_queues + 1);
+       nvme_assign_io_queues(dev);
  
-       for (i = 1; i < dev->queue_count; i++) {
-               result = nvme_create_queue(dev->queues[i], i);
-               if (result) {
-                       for (--i; i > 0; i--)
-                               nvme_disable_queue(dev, i);
-                       goto free_queues;
-               }
-       }
+       dev->nb.notifier_call = &nvme_cpu_notify;
+       result = register_hotcpu_notifier(&dev->nb);
+       if (result)
+               goto free_queues;
  
        return 0;
  
@@@ -1985,6 -2246,7 +2230,7 @@@ static int nvme_dev_add(struct nvme_de
  
  static int nvme_dev_map(struct nvme_dev *dev)
  {
+       u64 cap;
        int bars, result = -ENOMEM;
        struct pci_dev *pdev = dev->pci_dev;
  
                result = -ENODEV;
                goto unmap;
        }
-       dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
+       cap = readq(&dev->bar->cap);
+       dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
+       dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
        dev->dbs = ((void __iomem *)dev->bar) + 4096;
  
        return 0;
@@@ -2164,7 -2428,7 +2412,7 @@@ static void nvme_disable_io_queues(stru
        atomic_set(&dq.refcount, 0);
        dq.worker = &worker;
        for (i = dev->queue_count - 1; i > 0; i--) {
-               struct nvme_queue *nvmeq = dev->queues[i];
+               struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
  
                if (nvme_suspend_queue(nvmeq))
                        continue;
        kthread_stop(kworker_task);
  }
  
+ /*
+ * Remove the node from the device list and check
+ * for whether or not we need to stop the nvme_thread.
+ */
+ static void nvme_dev_list_remove(struct nvme_dev *dev)
+ {
+       struct task_struct *tmp = NULL;
+       spin_lock(&dev_list_lock);
+       list_del_init(&dev->node);
+       if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
+               tmp = nvme_thread;
+               nvme_thread = NULL;
+       }
+       spin_unlock(&dev_list_lock);
+       if (tmp)
+               kthread_stop(tmp);
+ }
  static void nvme_dev_shutdown(struct nvme_dev *dev)
  {
        int i;
  
        dev->initialized = 0;
+       unregister_hotcpu_notifier(&dev->nb);
  
-       spin_lock(&dev_list_lock);
-       list_del_init(&dev->node);
-       spin_unlock(&dev_list_lock);
+       nvme_dev_list_remove(dev);
  
        if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
                for (i = dev->queue_count - 1; i >= 0; i--) {
-                       struct nvme_queue *nvmeq = dev->queues[i];
+                       struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
                        nvme_suspend_queue(nvmeq);
                        nvme_clear_queue(nvmeq);
                }
@@@ -2282,6 -2565,7 +2549,7 @@@ static void nvme_free_dev(struct kref *
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
  
        nvme_free_namespaces(dev);
+       free_percpu(dev->io_queue);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@@ -2325,6 -2609,7 +2593,7 @@@ static const struct file_operations nvm
  static int nvme_dev_start(struct nvme_dev *dev)
  {
        int result;
+       bool start_thread = false;
  
        result = nvme_dev_map(dev);
        if (result)
                goto unmap;
  
        spin_lock(&dev_list_lock);
+       if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
+               start_thread = true;
+               nvme_thread = NULL;
+       }
        list_add(&dev->node, &dev_list);
        spin_unlock(&dev_list_lock);
  
+       if (start_thread) {
+               nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+               wake_up(&nvme_kthread_wait);
+       } else
+               wait_event_killable(nvme_kthread_wait, nvme_thread);
+       if (IS_ERR_OR_NULL(nvme_thread)) {
+               result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
+               goto disable;
+       }
        result = nvme_setup_io_queues(dev);
        if (result && result != -EBUSY)
                goto disable;
  
   disable:
        nvme_disable_queue(dev, 0);
-       spin_lock(&dev_list_lock);
-       list_del_init(&dev->node);
-       spin_unlock(&dev_list_lock);
+       nvme_dev_list_remove(dev);
   unmap:
        nvme_dev_unmap(dev);
        return result;
@@@ -2367,18 -2665,10 +2649,10 @@@ static int nvme_remove_dead_ctrl(void *
  
  static void nvme_remove_disks(struct work_struct *ws)
  {
-       int i;
        struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
  
        nvme_dev_remove(dev);
-       spin_lock(&dev_list_lock);
-       for (i = dev->queue_count - 1; i > 0; i--) {
-               BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
-               nvme_free_queue(dev->queues[i]);
-               dev->queue_count--;
-               dev->queues[i] = NULL;
-       }
-       spin_unlock(&dev_list_lock);
+       nvme_free_queues(dev, 1);
  }
  
  static int nvme_dev_resume(struct nvme_dev *dev)
                return ret;
        if (ret == -EBUSY) {
                spin_lock(&dev_list_lock);
 -              PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
 +              dev->reset_workfn = nvme_remove_disks;
                queue_work(nvme_workq, &dev->reset_work);
                spin_unlock(&dev_list_lock);
        }
@@@ -2419,12 -2709,6 +2693,12 @@@ static void nvme_reset_failed_dev(struc
        nvme_dev_reset(dev);
  }
  
 +static void nvme_reset_workfn(struct work_struct *work)
 +{
 +      struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
 +      dev->reset_workfn(work);
 +}
 +
  static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  {
        int result = -ENOMEM;
                                                                GFP_KERNEL);
        if (!dev->queues)
                goto free;
+       dev->io_queue = alloc_percpu(unsigned short);
+       if (!dev->io_queue)
+               goto free;
  
        INIT_LIST_HEAD(&dev->namespaces);
 -      INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
 +      dev->reset_workfn = nvme_reset_failed_dev;
 +      INIT_WORK(&dev->reset_work, nvme_reset_workfn);
        dev->pci_dev = pdev;
        pci_set_drvdata(pdev, dev);
        result = nvme_set_instance(dev);
        if (result)
                goto release;
  
+       kref_init(&dev->kref);
        result = nvme_dev_start(dev);
        if (result) {
                if (result == -EBUSY)
                goto release_pools;
        }
  
-       kref_init(&dev->kref);
        result = nvme_dev_add(dev);
        if (result)
                goto shutdown;
   release:
        nvme_release_instance(dev);
   free:
+       free_percpu(dev->io_queue);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@@ -2517,6 -2804,7 +2795,7 @@@ static void nvme_remove(struct pci_dev 
        nvme_dev_remove(dev);
        nvme_dev_shutdown(dev);
        nvme_free_queues(dev, 0);
+       rcu_barrier();
        nvme_release_instance(dev);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
  #define nvme_slot_reset NULL
  #define nvme_error_resume NULL
  
+ #ifdef CONFIG_PM_SLEEP
  static int nvme_suspend(struct device *dev)
  {
        struct pci_dev *pdev = to_pci_dev(dev);
@@@ -2544,11 -2833,12 +2824,12 @@@ static int nvme_resume(struct device *d
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
  
        if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
 -              PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
 +              ndev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &ndev->reset_work);
        }
        return 0;
  }
+ #endif
  
  static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
  
@@@ -2563,7 -2853,7 +2844,7 @@@ static const struct pci_error_handlers 
  /* Move to pci_ids.h later */
  #define PCI_CLASS_STORAGE_EXPRESS     0x010802
  
- static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
  };
@@@ -2585,14 -2875,11 +2866,11 @@@ static int __init nvme_init(void
  {
        int result;
  
-       nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
-       if (IS_ERR(nvme_thread))
-               return PTR_ERR(nvme_thread);
+       init_waitqueue_head(&nvme_kthread_wait);
  
-       result = -ENOMEM;
        nvme_workq = create_singlethread_workqueue("nvme");
        if (!nvme_workq)
-               goto kill_kthread;
+               return -ENOMEM;
  
        result = register_blkdev(nvme_major, "nvme");
        if (result < 0)
        unregister_blkdev(nvme_major, "nvme");
   kill_workq:
        destroy_workqueue(nvme_workq);
-  kill_kthread:
-       kthread_stop(nvme_thread);
        return result;
  }
  
@@@ -2619,11 -2904,11 +2895,11 @@@ static void __exit nvme_exit(void
        pci_unregister_driver(&nvme_driver);
        unregister_blkdev(nvme_major, "nvme");
        destroy_workqueue(nvme_workq);
-       kthread_stop(nvme_thread);
+       BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
  }
  
  MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
  MODULE_LICENSE("GPL");
- MODULE_VERSION("0.8");
+ MODULE_VERSION("0.9");
  module_init(nvme_init);
  module_exit(nvme_exit);
diff --combined include/linux/nvme.h
index 6b9aafed225fcd9a48228ac9f6044c7a956cc325,b95431d0338bf21902316e30e2866781b1b750f4..a50173ca1d729aba84bc00c2572b40a91ec93140
@@@ -66,20 -66,25 +66,25 @@@ enum 
  
  #define NVME_VS(major, minor) (major << 16 | minor)
  
- #define NVME_IO_TIMEOUT       (5 * HZ)
+ extern unsigned char io_timeout;
+ #define NVME_IO_TIMEOUT       (io_timeout * HZ)
  
  /*
   * Represents an NVM Express device.  Each nvme_dev is a PCI function.
   */
  struct nvme_dev {
        struct list_head node;
-       struct nvme_queue **queues;
+       struct nvme_queue __rcu **queues;
+       unsigned short __percpu *io_queue;
        u32 __iomem *dbs;
        struct pci_dev *pci_dev;
        struct dma_pool *prp_page_pool;
        struct dma_pool *prp_small_pool;
        int instance;
-       int queue_count;
+       unsigned queue_count;
+       unsigned online_queues;
+       unsigned max_qid;
+       int q_depth;
        u32 db_stride;
        u32 ctrl_config;
        struct msix_entry *entry;
@@@ -87,8 -92,8 +92,9 @@@
        struct list_head namespaces;
        struct kref kref;
        struct miscdevice miscdev;
 +      work_func_t reset_workfn;
        struct work_struct reset_work;
+       struct notifier_block nb;
        char name[12];
        char serial[20];
        char model[40];
@@@ -131,6 -136,7 +137,7 @@@ struct nvme_iod 
        int length;             /* Of data, in bytes */
        unsigned long start_time;
        dma_addr_t first_dma;
+       struct list_head node;
        struct scatterlist sg[0];
  };
  
@@@ -146,16 -152,12 +153,12 @@@ static inline u64 nvme_block_nr(struct 
   */
  void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
  
- int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
-                       struct nvme_iod *iod, int total_len, gfp_t gfp);
+ int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
  struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
                                unsigned long addr, unsigned length);
  void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
                        struct nvme_iod *iod);
- struct nvme_queue *get_nvmeq(struct nvme_dev *dev);
- void put_nvmeq(struct nvme_queue *nvmeq);
- int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
-                                               u32 *result, unsigned timeout);
+ int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
  int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
  int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
                                                        u32 *result);