Merge branch 'async-scsi-resume' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 12 Apr 2014 00:23:52 +0000 (17:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 12 Apr 2014 00:23:52 +0000 (17:23 -0700)
Pull async SCSI resume support from Dan Williams:
 "Allow disks and other devices to resume in parallel.

  This provides a tangible speed up for a non-esoteric use case (laptop
  resume):

    https://01.org/suspendresume/blogs/tebrandt/2013/hard-disk-resume-optimization-simpler-approach"

* 'async-scsi-resume' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci:
  scsi: async sd resume

1  2 
drivers/scsi/scsi.c
drivers/scsi/scsi_scan.c
drivers/scsi/sd.c

diff --combined drivers/scsi/scsi.c
index c4d632c27a3ecdf2c7b89020a26d16a4d029c8f8,1b345bf41a91df1a1a3bd3b691ffacb7a6ba8b9d..88d46fe6bf987f0615486e1decafbd18f0ab459f
@@@ -91,6 -91,15 +91,15 @@@ EXPORT_SYMBOL(scsi_logging_level)
  ASYNC_DOMAIN(scsi_sd_probe_domain);
  EXPORT_SYMBOL(scsi_sd_probe_domain);
  
+ /*
+  * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
+  * asynchronous system resume operations.  It is marked 'exclusive' to avoid
+  * being included in the async_synchronize_full() that is invoked by
+  * dpm_resume()
+  */
+ ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
+ EXPORT_SYMBOL(scsi_sd_pm_domain);
  /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
   * You may not alter any existing entry (although adding new ones is
   * encouraged once assigned by ANSI/INCITS T10
@@@ -161,20 -170,47 +170,20 @@@ static struct scsi_host_cmd_pool scsi_c
  static DEFINE_MUTEX(host_cmd_pool_mutex);
  
  /**
 - * scsi_pool_alloc_command - internal function to get a fully allocated command
 - * @pool:     slab pool to allocate the command from
 - * @gfp_mask: mask for the allocation
 - *
 - * Returns a fully allocated command (with the allied sense buffer) or
 - * NULL on failure
 - */
 -static struct scsi_cmnd *
 -scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
 -{
 -      struct scsi_cmnd *cmd;
 -
 -      cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
 -      if (!cmd)
 -              return NULL;
 -
 -      cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
 -                                           gfp_mask | pool->gfp_mask);
 -      if (!cmd->sense_buffer) {
 -              kmem_cache_free(pool->cmd_slab, cmd);
 -              return NULL;
 -      }
 -
 -      return cmd;
 -}
 -
 -/**
 - * scsi_pool_free_command - internal function to release a command
 - * @pool:     slab pool to allocate the command from
 + * scsi_host_free_command - internal function to release a command
 + * @shost:    host to free the command for
   * @cmd:      command to release
   *
   * the command must previously have been allocated by
 - * scsi_pool_alloc_command.
 + * scsi_host_alloc_command.
   */
  static void
 -scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
 -                       struct scsi_cmnd *cmd)
 +scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
  {
 +      struct scsi_host_cmd_pool *pool = shost->cmd_pool;
 +
        if (cmd->prot_sdb)
                kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
 -
        kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
        kmem_cache_free(pool->cmd_slab, cmd);
  }
  static struct scsi_cmnd *
  scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
  {
 +      struct scsi_host_cmd_pool *pool = shost->cmd_pool;
        struct scsi_cmnd *cmd;
  
 -      cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
 +      cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
        if (!cmd)
 -              return NULL;
 +              goto fail;
 +
 +      cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
 +                                           gfp_mask | pool->gfp_mask);
 +      if (!cmd->sense_buffer)
 +              goto fail_free_cmd;
  
        if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
                cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
 -
 -              if (!cmd->prot_sdb) {
 -                      scsi_pool_free_command(shost->cmd_pool, cmd);
 -                      return NULL;
 -              }
 +              if (!cmd->prot_sdb)
 +                      goto fail_free_sense;
        }
  
        return cmd;
 +
 +fail_free_sense:
 +      kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
 +fail_free_cmd:
 +      kmem_cache_free(pool->cmd_slab, cmd);
 +fail:
 +      return NULL;
  }
  
  /**
@@@ -267,19 -293,27 +276,19 @@@ EXPORT_SYMBOL_GPL(__scsi_get_command)
   */
  struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
  {
 -      struct scsi_cmnd *cmd;
 +      struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
 +      unsigned long flags;
  
 -      /* Bail if we can't get a reference to the device */
 -      if (!get_device(&dev->sdev_gendev))
 +      if (unlikely(cmd == NULL))
                return NULL;
  
 -      cmd = __scsi_get_command(dev->host, gfp_mask);
 -
 -      if (likely(cmd != NULL)) {
 -              unsigned long flags;
 -
 -              cmd->device = dev;
 -              INIT_LIST_HEAD(&cmd->list);
 -              INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
 -              spin_lock_irqsave(&dev->list_lock, flags);
 -              list_add_tail(&cmd->list, &dev->cmd_list);
 -              spin_unlock_irqrestore(&dev->list_lock, flags);
 -              cmd->jiffies_at_alloc = jiffies;
 -      } else
 -              put_device(&dev->sdev_gendev);
 -
 +      cmd->device = dev;
 +      INIT_LIST_HEAD(&cmd->list);
 +      INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
 +      spin_lock_irqsave(&dev->list_lock, flags);
 +      list_add_tail(&cmd->list, &dev->cmd_list);
 +      spin_unlock_irqrestore(&dev->list_lock, flags);
 +      cmd->jiffies_at_alloc = jiffies;
        return cmd;
  }
  EXPORT_SYMBOL(scsi_get_command);
   * __scsi_put_command - Free a struct scsi_cmnd
   * @shost: dev->host
   * @cmd: Command to free
 - * @dev: parent scsi device
   */
 -void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
 -                      struct device *dev)
 +void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
  {
        unsigned long flags;
  
 -      /* changing locks here, don't need to restore the irq state */
 -      spin_lock_irqsave(&shost->free_list_lock, flags);
        if (unlikely(list_empty(&shost->free_list))) {
 -              list_add(&cmd->list, &shost->free_list);
 -              cmd = NULL;
 +              spin_lock_irqsave(&shost->free_list_lock, flags);
 +              if (list_empty(&shost->free_list)) {
 +                      list_add(&cmd->list, &shost->free_list);
 +                      cmd = NULL;
 +              }
 +              spin_unlock_irqrestore(&shost->free_list_lock, flags);
        }
 -      spin_unlock_irqrestore(&shost->free_list_lock, flags);
  
        if (likely(cmd != NULL))
 -              scsi_pool_free_command(shost->cmd_pool, cmd);
 -
 -      put_device(dev);
 +              scsi_host_free_command(shost, cmd);
  }
  EXPORT_SYMBOL(__scsi_put_command);
  
   */
  void scsi_put_command(struct scsi_cmnd *cmd)
  {
 -      struct scsi_device *sdev = cmd->device;
        unsigned long flags;
  
        /* serious error if the command hasn't come from a device list */
  
        cancel_delayed_work(&cmd->abort_work);
  
 -      __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
 +      __scsi_put_command(cmd->device->host, cmd);
  }
  EXPORT_SYMBOL(scsi_put_command);
  
 -static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
 +static struct scsi_host_cmd_pool *
 +scsi_find_host_cmd_pool(struct Scsi_Host *shost)
 +{
 +      if (shost->hostt->cmd_size)
 +              return shost->hostt->cmd_pool;
 +      if (shost->unchecked_isa_dma)
 +              return &scsi_cmd_dma_pool;
 +      return &scsi_cmd_pool;
 +}
 +
 +static void
 +scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
 +{
 +      kfree(pool->sense_name);
 +      kfree(pool->cmd_name);
 +      kfree(pool);
 +}
 +
 +static struct scsi_host_cmd_pool *
 +scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
 +{
 +      struct scsi_host_template *hostt = shost->hostt;
 +      struct scsi_host_cmd_pool *pool;
 +
 +      pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 +      if (!pool)
 +              return NULL;
 +
 +      pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
 +      pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
 +      if (!pool->cmd_name || !pool->sense_name) {
 +              scsi_free_host_cmd_pool(pool);
 +              return NULL;
 +      }
 +
 +      pool->slab_flags = SLAB_HWCACHE_ALIGN;
 +      if (shost->unchecked_isa_dma) {
 +              pool->slab_flags |= SLAB_CACHE_DMA;
 +              pool->gfp_mask = __GFP_DMA;
 +      }
 +      return pool;
 +}
 +
 +static struct scsi_host_cmd_pool *
 +scsi_get_host_cmd_pool(struct Scsi_Host *shost)
  {
 +      struct scsi_host_template *hostt = shost->hostt;
        struct scsi_host_cmd_pool *retval = NULL, *pool;
 +      size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
 +
        /*
         * Select a command slab for this host and create it if not
         * yet existent.
         */
        mutex_lock(&host_cmd_pool_mutex);
 -      pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
 -              &scsi_cmd_pool;
 +      pool = scsi_find_host_cmd_pool(shost);
 +      if (!pool) {
 +              pool = scsi_alloc_host_cmd_pool(shost);
 +              if (!pool)
 +                      goto out;
 +      }
 +
        if (!pool->users) {
 -              pool->cmd_slab = kmem_cache_create(pool->cmd_name,
 -                                                 sizeof(struct scsi_cmnd), 0,
 +              pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
                                                   pool->slab_flags, NULL);
                if (!pool->cmd_slab)
 -                      goto fail;
 +                      goto out_free_pool;
  
                pool->sense_slab = kmem_cache_create(pool->sense_name,
                                                     SCSI_SENSE_BUFFERSIZE, 0,
                                                     pool->slab_flags, NULL);
 -              if (!pool->sense_slab) {
 -                      kmem_cache_destroy(pool->cmd_slab);
 -                      goto fail;
 -              }
 +              if (!pool->sense_slab)
 +                      goto out_free_slab;
        }
  
        pool->users++;
        retval = pool;
 - fail:
 +out:
        mutex_unlock(&host_cmd_pool_mutex);
        return retval;
 +
 +out_free_slab:
 +      kmem_cache_destroy(pool->cmd_slab);
 +out_free_pool:
 +      if (hostt->cmd_size)
 +              scsi_free_host_cmd_pool(pool);
 +      goto out;
  }
  
 -static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
 +static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
  {
 +      struct scsi_host_template *hostt = shost->hostt;
        struct scsi_host_cmd_pool *pool;
  
        mutex_lock(&host_cmd_pool_mutex);
 -      pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
 -              &scsi_cmd_pool;
 +      pool = scsi_find_host_cmd_pool(shost);
 +
        /*
         * This may happen if a driver has a mismatched get and put
         * of the command pool; the driver should be implicated in
        if (!--pool->users) {
                kmem_cache_destroy(pool->cmd_slab);
                kmem_cache_destroy(pool->sense_slab);
 +              if (hostt->cmd_size)
 +                      scsi_free_host_cmd_pool(pool);
        }
        mutex_unlock(&host_cmd_pool_mutex);
  }
  
 -/**
 - * scsi_allocate_command - get a fully allocated SCSI command
 - * @gfp_mask: allocation mask
 - *
 - * This function is for use outside of the normal host based pools.
 - * It allocates the relevant command and takes an additional reference
 - * on the pool it used.  This function *must* be paired with
 - * scsi_free_command which also has the identical mask, otherwise the
 - * free pool counts will eventually go wrong and you'll trigger a bug.
 - *
 - * This function should *only* be used by drivers that need a static
 - * command allocation at start of day for internal functions.
 - */
 -struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
 -{
 -      struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
 -
 -      if (!pool)
 -              return NULL;
 -
 -      return scsi_pool_alloc_command(pool, gfp_mask);
 -}
 -EXPORT_SYMBOL(scsi_allocate_command);
 -
 -/**
 - * scsi_free_command - free a command allocated by scsi_allocate_command
 - * @gfp_mask: mask used in the original allocation
 - * @cmd:      command to free
 - *
 - * Note: using the original allocation mask is vital because that's
 - * what determines which command pool we use to free the command.  Any
 - * mismatch will cause the system to BUG eventually.
 - */
 -void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
 -{
 -      struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
 -
 -      /*
 -       * this could trigger if the mask to scsi_allocate_command
 -       * doesn't match this mask.  Otherwise we're guaranteed that this
 -       * succeeds because scsi_allocate_command must have taken a reference
 -       * on the pool
 -       */
 -      BUG_ON(!pool);
 -
 -      scsi_pool_free_command(pool, cmd);
 -      /*
 -       * scsi_put_host_cmd_pool is called twice; once to release the
 -       * reference we took above, and once to release the reference
 -       * originally taken by scsi_allocate_command
 -       */
 -      scsi_put_host_cmd_pool(gfp_mask);
 -      scsi_put_host_cmd_pool(gfp_mask);
 -}
 -EXPORT_SYMBOL(scsi_free_command);
 -
  /**
   * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
   * @shost: host to allocate the freelist for.
   */
  int scsi_setup_command_freelist(struct Scsi_Host *shost)
  {
 -      struct scsi_cmnd *cmd;
        const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
 +      struct scsi_cmnd *cmd;
  
        spin_lock_init(&shost->free_list_lock);
        INIT_LIST_HEAD(&shost->free_list);
  
 -      shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
 -
 +      shost->cmd_pool = scsi_get_host_cmd_pool(shost);
        if (!shost->cmd_pool)
                return -ENOMEM;
  
         */
        cmd = scsi_host_alloc_command(shost, gfp_mask);
        if (!cmd) {
 -              scsi_put_host_cmd_pool(gfp_mask);
 +              scsi_put_host_cmd_pool(shost);
                shost->cmd_pool = NULL;
                return -ENOMEM;
        }
@@@ -497,10 -533,10 +506,10 @@@ void scsi_destroy_command_freelist(stru
  
                cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
                list_del_init(&cmd->list);
 -              scsi_pool_free_command(shost->cmd_pool, cmd);
 +              scsi_host_free_command(shost, cmd);
        }
        shost->cmd_pool = NULL;
 -      scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
 +      scsi_put_host_cmd_pool(shost);
  }
  
  #ifdef CONFIG_SCSI_LOGGING
@@@ -927,7 -963,7 +936,7 @@@ EXPORT_SYMBOL(scsi_track_queue_full)
   * This is an internal helper function.  You probably want to use
   * scsi_get_vpd_page instead.
   *
 - * Returns 0 on success or a negative error number.
 + * Returns size of the vpd page on success or a negative error number.
   */
  static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
                                                        u8 page, unsigned len)
        int result;
        unsigned char cmd[16];
  
 +      if (len < 4)
 +              return -EINVAL;
 +
        cmd[0] = INQUIRY;
        cmd[1] = 1;             /* EVPD */
        cmd[2] = page;
        result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
                                  len, NULL, 30 * HZ, 3, NULL);
        if (result)
 -              return result;
 +              return -EIO;
  
        /* Sanity check that we got the page back that we asked for */
        if (buffer[1] != page)
                return -EIO;
  
 -      return 0;
 +      return get_unaligned_be16(&buffer[2]) + 4;
  }
  
  /**
@@@ -985,18 -1018,18 +994,18 @@@ int scsi_get_vpd_page(struct scsi_devic
  
        /* Ask for all the pages supported by this device */
        result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
 -      if (result)
 +      if (result < 4)
                goto fail;
  
        /* If the user actually wanted this page, we can skip the rest */
        if (page == 0)
                return 0;
  
 -      for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
 -              if (buf[i + 4] == page)
 +      for (i = 4; i < min(result, buf_len); i++)
 +              if (buf[i] == page)
                        goto found;
  
 -      if (i < buf[3] && i >= buf_len - 4)
 +      if (i < result && i >= buf_len)
                /* ran off the end of the buffer, give us benefit of doubt */
                goto found;
        /* The device claims it doesn't support the requested page */
  
   found:
        result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
 -      if (result)
 +      if (result < 0)
                goto fail;
  
        return 0;
  }
  EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
  
 +/**
 + * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
 + * @sdev: The device to ask
 + *
 + * Attach the 'Device Identification' VPD page (0x83) and the
 + * 'Unit Serial Number' VPD page (0x80) to a SCSI device
 + * structure. This information can be used to identify the device
 + * uniquely.
 + */
 +void scsi_attach_vpd(struct scsi_device *sdev)
 +{
 +      int result, i;
 +      int vpd_len = SCSI_VPD_PG_LEN;
 +      int pg80_supported = 0;
 +      int pg83_supported = 0;
 +      unsigned char *vpd_buf;
 +
 +      if (sdev->skip_vpd_pages)
 +              return;
 +retry_pg0:
 +      vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
 +      if (!vpd_buf)
 +              return;
 +
 +      /* Ask for all the pages supported by this device */
 +      result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
 +      if (result < 0) {
 +              kfree(vpd_buf);
 +              return;
 +      }
 +      if (result > vpd_len) {
 +              vpd_len = result;
 +              kfree(vpd_buf);
 +              goto retry_pg0;
 +      }
 +
 +      for (i = 4; i < result; i++) {
 +              if (vpd_buf[i] == 0x80)
 +                      pg80_supported = 1;
 +              if (vpd_buf[i] == 0x83)
 +                      pg83_supported = 1;
 +      }
 +      kfree(vpd_buf);
 +      vpd_len = SCSI_VPD_PG_LEN;
 +
 +      if (pg80_supported) {
 +retry_pg80:
 +              vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
 +              if (!vpd_buf)
 +                      return;
 +
 +              result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
 +              if (result < 0) {
 +                      kfree(vpd_buf);
 +                      return;
 +              }
 +              if (result > vpd_len) {
 +                      vpd_len = result;
 +                      kfree(vpd_buf);
 +                      goto retry_pg80;
 +              }
 +              sdev->vpd_pg80_len = result;
 +              sdev->vpd_pg80 = vpd_buf;
 +              vpd_len = SCSI_VPD_PG_LEN;
 +      }
 +
 +      if (pg83_supported) {
 +retry_pg83:
 +              vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
 +              if (!vpd_buf)
 +                      return;
 +
 +              result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
 +              if (result < 0) {
 +                      kfree(vpd_buf);
 +                      return;
 +              }
 +              if (result > vpd_len) {
 +                      vpd_len = result;
 +                      kfree(vpd_buf);
 +                      goto retry_pg83;
 +              }
 +              sdev->vpd_pg83_len = result;
 +              sdev->vpd_pg83 = vpd_buf;
 +      }
 +}
 +
  /**
   * scsi_report_opcode - Find out if a given command opcode is supported
   * @sdev:     scsi device to query
diff --combined drivers/scsi/scsi_scan.c
index 27f96d5b768068326f67f5637a29a2e4ac8a503c,6b2f51f52af61687c8bc686a0a8b559bed3059df..e02b3aab56ce3ecf147d4e33aa4156713ded1615
@@@ -97,7 -97,7 +97,7 @@@ MODULE_PARM_DESC(max_luns
  #define SCSI_SCAN_TYPE_DEFAULT "sync"
  #endif
  
static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+ char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
  
  module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
  MODULE_PARM_DESC(scan, "sync, async or none");
@@@ -320,7 -320,6 +320,7 @@@ static void scsi_target_destroy(struct 
        struct Scsi_Host *shost = dev_to_shost(dev->parent);
        unsigned long flags;
  
 +      starget->state = STARGET_DEL;
        transport_destroy_device(dev);
        spin_lock_irqsave(shost->host_lock, flags);
        if (shost->hostt->target_destroy)
@@@ -371,37 -370,6 +371,37 @@@ static struct scsi_target *__scsi_find_
        return found_starget;
  }
  
 +/**
 + * scsi_target_reap_ref_release - remove target from visibility
 + * @kref: the reap_ref in the target being released
 + *
 + * Called on last put of reap_ref, which is the indication that no device
 + * under this target is visible anymore, so render the target invisible in
 + * sysfs.  Note: we have to be in user context here because the target reaps
 + * should be done in places where the scsi device visibility is being removed.
 + */
 +static void scsi_target_reap_ref_release(struct kref *kref)
 +{
 +      struct scsi_target *starget
 +              = container_of(kref, struct scsi_target, reap_ref);
 +
 +      /*
 +       * if we get here and the target is still in the CREATED state that
 +       * means it was allocated but never made visible (because a scan
 +       * turned up no LUNs), so don't call device_del() on it.
 +       */
 +      if (starget->state != STARGET_CREATED) {
 +              transport_remove_device(&starget->dev);
 +              device_del(&starget->dev);
 +      }
 +      scsi_target_destroy(starget);
 +}
 +
 +static void scsi_target_reap_ref_put(struct scsi_target *starget)
 +{
 +      kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
 +}
 +
  /**
   * scsi_alloc_target - allocate a new or find an existing target
   * @parent:   parent of the target (need not be a scsi host)
@@@ -424,7 -392,7 +424,7 @@@ static struct scsi_target *scsi_alloc_t
                + shost->transportt->target_size;
        struct scsi_target *starget;
        struct scsi_target *found_target;
 -      int error;
 +      int error, ref_got;
  
        starget = kzalloc(size, GFP_KERNEL);
        if (!starget) {
        }
        dev = &starget->dev;
        device_initialize(dev);
 -      starget->reap_ref = 1;
 +      kref_init(&starget->reap_ref);
        dev->parent = get_device(parent);
        dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
        dev->bus = &scsi_bus_type;
        return starget;
  
   found:
 -      found_target->reap_ref++;
 +      /*
 +       * release routine already fired if kref is zero, so if we can still
 +       * take the reference, the target must be alive.  If we can't, it must
 +       * be dying and we need to wait for a new target
 +       */
 +      ref_got = kref_get_unless_zero(&found_target->reap_ref);
 +
        spin_unlock_irqrestore(shost->host_lock, flags);
 -      if (found_target->state != STARGET_DEL) {
 +      if (ref_got) {
                put_device(dev);
                return found_target;
        }
 -      /* Unfortunately, we found a dying target; need to
 -       * wait until it's dead before we can get a new one */
 +      /*
 +       * Unfortunately, we found a dying target; need to wait until it's
 +       * dead before we can get a new one.  There is an anomaly here.  We
 +       * *should* call scsi_target_reap() to balance the kref_get() of the
 +       * reap_ref above.  However, since the target being released, it's
 +       * already invisible and the reap_ref is irrelevant.  If we call
 +       * scsi_target_reap() we might spuriously do another device_del() on
 +       * an already invisible target.
 +       */
        put_device(&found_target->dev);
 -      flush_scheduled_work();
 +      /*
 +       * length of time is irrelevant here, we just want to yield the CPU
 +       * for a tick to avoid busy waiting for the target to die.
 +       */
 +      msleep(1);
        goto retry;
  }
  
 -static void scsi_target_reap_usercontext(struct work_struct *work)
 -{
 -      struct scsi_target *starget =
 -              container_of(work, struct scsi_target, ew.work);
 -
 -      transport_remove_device(&starget->dev);
 -      device_del(&starget->dev);
 -      scsi_target_destroy(starget);
 -}
 -
  /**
   * scsi_target_reap - check to see if target is in use and destroy if not
   * @starget: target to be checked
   */
  void scsi_target_reap(struct scsi_target *starget)
  {
 -      struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 -      unsigned long flags;
 -      enum scsi_target_state state;
 -      int empty = 0;
 -
 -      spin_lock_irqsave(shost->host_lock, flags);
 -      state = starget->state;
 -      if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
 -              empty = 1;
 -              starget->state = STARGET_DEL;
 -      }
 -      spin_unlock_irqrestore(shost->host_lock, flags);
 -
 -      if (!empty)
 -              return;
 -
 -      BUG_ON(state == STARGET_DEL);
 -      if (state == STARGET_CREATED)
 -              scsi_target_destroy(starget);
 -      else
 -              execute_in_process_context(scsi_target_reap_usercontext,
 -                                         &starget->ew);
 +      /*
 +       * serious problem if this triggers: STARGET_DEL is only set in the if
 +       * the reap_ref drops to zero, so we're trying to do another final put
 +       * on an already released kref
 +       */
 +      BUG_ON(starget->state == STARGET_DEL);
 +      scsi_target_reap_ref_put(starget);
  }
  
  /**
@@@ -970,9 -946,6 +970,9 @@@ static int scsi_add_lun(struct scsi_dev
                }
        }
  
 +      if (sdev->scsi_level >= SCSI_3)
 +              scsi_attach_vpd(sdev);
 +
        sdev->max_queue_depth = sdev->queue_depth;
  
        /*
@@@ -1559,10 -1532,6 +1559,10 @@@ struct scsi_device *__scsi_add_device(s
        }
        mutex_unlock(&shost->scan_mutex);
        scsi_autopm_put_target(starget);
 +      /*
 +       * paired with scsi_alloc_target().  Target will be destroyed unless
 +       * scsi_probe_and_add_lun made an underlying device visible
 +       */
        scsi_target_reap(starget);
        put_device(&starget->dev);
  
@@@ -1643,10 -1612,8 +1643,10 @@@ static void __scsi_scan_target(struct d
  
   out_reap:
        scsi_autopm_put_target(starget);
 -      /* now determine if the target has any children at all
 -       * and if not, nuke it */
 +      /*
 +       * paired with scsi_alloc_target(): determine if the target has
 +       * any children at all and if not, nuke it
 +       */
        scsi_target_reap(starget);
  
        put_device(&starget->dev);
diff --combined drivers/scsi/sd.c
index 89e6c04ac595045739291ed63632f2c37efe891b,700c595c603ee4f64c8e8c850a50463c22902fc9..efcbcd182863318f296936bc7fa16c96bc58efe5
@@@ -1463,8 -1463,8 +1463,8 @@@ static int sd_sync_cache(struct scsi_di
                        sd_print_sense_hdr(sdkp, &sshdr);
                /* we need to evaluate the error return  */
                if (scsi_sense_valid(&sshdr) &&
 -                      /* 0x3a is medium not present */
 -                      sshdr.asc == 0x3a)
 +                      (sshdr.asc == 0x3a ||   /* medium not present */
 +                       sshdr.asc == 0x20))    /* invalid command */
                                /* this is no error here */
                                return 0;
  
@@@ -2281,7 -2281,7 +2281,7 @@@ sd_read_write_protect_flag(struct scsi_
  
        set_disk_ro(sdkp->disk, 0);
        if (sdp->skip_ms_page_3f) {
 -              sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
 +              sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
                return;
        }
  
        }
  
        if (!scsi_status_is_good(res)) {
 -              sd_printk(KERN_WARNING, sdkp,
 +              sd_first_printk(KERN_WARNING, sdkp,
                          "Test WP failed, assume Write Enabled\n");
        } else {
                sdkp->write_prot = ((data.device_specific & 0x80) != 0);
@@@ -2381,8 -2381,7 +2381,8 @@@ sd_read_cache_type(struct scsi_disk *sd
        if (!data.header_length) {
                modepage = 6;
                first_len = 0;
 -              sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n");
 +              sd_first_printk(KERN_ERR, sdkp,
 +                              "Missing header in MODE_SENSE response\n");
        }
  
        /* that went OK, now ask for the proper length */
        if (len < 3)
                goto bad_sense;
        else if (len > SD_BUF_SIZE) {
 -              sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
 +              sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
                          "data from %d to %d bytes\n", len, SD_BUF_SIZE);
                len = SD_BUF_SIZE;
        }
                                /* We're interested only in the first 3 bytes.
                                 */
                                if (len - offset <= 2) {
 -                                      sd_printk(KERN_ERR, sdkp, "Incomplete "
 -                                                "mode parameter data\n");
 +                                      sd_first_printk(KERN_ERR, sdkp,
 +                                              "Incomplete mode parameter "
 +                                                      "data\n");
                                        goto defaults;
                                } else {
                                        modepage = page_code;
                                else if (!spf && len - offset > 1)
                                        offset += 2 + buffer[offset+1];
                                else {
 -                                      sd_printk(KERN_ERR, sdkp, "Incomplete "
 -                                                "mode parameter data\n");
 +                                      sd_first_printk(KERN_ERR, sdkp,
 +                                                      "Incomplete mode "
 +                                                      "parameter data\n");
                                        goto defaults;
                                }
                        }
                }
  
 -              sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
 +              sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
                goto defaults;
  
        Page_found:
  
                sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
                if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
 -                      sd_printk(KERN_NOTICE, sdkp,
 +                      sd_first_printk(KERN_NOTICE, sdkp,
                                  "Uses READ/WRITE(6), disabling FUA\n");
                        sdkp->DPOFUA = 0;
                }
@@@ -2478,19 -2475,16 +2478,19 @@@ bad_sense
            sshdr.sense_key == ILLEGAL_REQUEST &&
            sshdr.asc == 0x24 && sshdr.ascq == 0x0)
                /* Invalid field in CDB */
 -              sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
 +              sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
        else
 -              sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
 +              sd_first_printk(KERN_ERR, sdkp,
 +                              "Asking for cache data failed\n");
  
  defaults:
        if (sdp->wce_default_on) {
 -              sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n");
 +              sd_first_printk(KERN_NOTICE, sdkp,
 +                              "Assuming drive cache: write back\n");
                sdkp->WCE = 1;
        } else {
 -              sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
 +              sd_first_printk(KERN_ERR, sdkp,
 +                              "Assuming drive cache: write through\n");
                sdkp->WCE = 0;
        }
        sdkp->RCD = 0;
@@@ -2519,7 -2513,7 +2519,7 @@@ static void sd_read_app_tag_own(struct 
  
        if (!scsi_status_is_good(res) || !data.header_length ||
            data.length < 6) {
 -              sd_printk(KERN_WARNING, sdkp,
 +              sd_first_printk(KERN_WARNING, sdkp,
                          "getting Control mode page failed, assume no ATO\n");
  
                if (scsi_sense_valid(&sshdr))
        offset = data.header_length + data.block_descriptor_length;
  
        if ((buffer[offset] & 0x3f) != 0x0a) {
 -              sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
 +              sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
                return;
        }
  
@@@ -3026,6 -3020,7 +3026,7 @@@ static int sd_remove(struct device *dev
        devt = disk_devt(sdkp->disk);
        scsi_autopm_get_device(sdkp->device);
  
+       async_synchronize_full_domain(&scsi_sd_pm_domain);
        async_synchronize_full_domain(&scsi_sd_probe_domain);
        blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
        blk_queue_unprep_rq(sdkp->device->request_queue, NULL);