/* Process context for config space updates */
struct work_struct config_work;
- /* Lock for config space updates */
- struct mutex config_lock;
-
- /* enable config space updates */
- bool config_enable;
-
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
req->errors = (error != 0);
}
- blk_mq_end_io(req, error);
+ blk_mq_end_request(req, error);
}
static void virtblk_done(struct virtqueue *vq)
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
-static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
+static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct request *req = bd->rq;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags;
unsigned int num;
int qid = hctx->queue_num;
- const bool last = (req->cmd_flags & REQ_END) != 0;
int err;
bool notify = false;
}
}
+ blk_mq_start_request(req);
+
num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
if (num) {
if (rq_data_dir(vbr->req) == WRITE)
return BLK_MQ_RQ_QUEUE_ERROR;
}
- if (last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
+ if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
notify = true;
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
char *envp[] = { "RESIZE=1", NULL };
u64 capacity, size;
- mutex_lock(&vblk->config_lock);
- if (!vblk->config_enable)
- goto done;
-
/* Host must always specify the capacity. */
virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
-done:
- mutex_unlock(&vblk->config_lock);
}
static void virtblk_config_changed(struct virtio_device *vdev)
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
- mutex_init(&vblk->config_lock);
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
- vblk->config_enable = true;
err = init_vq(vblk);
if (err)
if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size);
+ virtio_device_ready(vdev);
+
add_disk(vblk->disk);
err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
if (err)
int index = vblk->index;
int refc;
- /* Prevent config work handler from accessing the device. */
- mutex_lock(&vblk->config_lock);
- vblk->config_enable = false;
- mutex_unlock(&vblk->config_lock);
+ /* Make sure no work handler is accessing the device. */
+ flush_work(&vblk->config_work);
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- flush_work(&vblk->config_work);
-
refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
put_disk(vblk->disk);
vdev->config->del_vqs(vdev);
/* Ensure we don't receive any more interrupts */
vdev->config->reset(vdev);
- /* Prevent config work handler from accessing the device. */
- mutex_lock(&vblk->config_lock);
- vblk->config_enable = false;
- mutex_unlock(&vblk->config_lock);
-
+ /* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
blk_mq_stop_hw_queues(vblk->disk->queue);
struct virtio_blk *vblk = vdev->priv;
int ret;
- vblk->config_enable = true;
ret = init_vq(vdev->priv);
- if (!ret)
- blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
+ if (ret)
+ return ret;
+
+ virtio_device_ready(vdev);
- return ret;
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
+ return 0;
}
#endif