Merge branch 'for-4.4/core' into for-4.4/drivers
authorJens Axboe <axboe@fb.com>
Fri, 9 Oct 2015 16:40:29 +0000 (10:40 -0600)
committerJens Axboe <axboe@fb.com>
Fri, 9 Oct 2015 16:40:29 +0000 (10:40 -0600)
Signed-off-by: Jens Axboe <axboe@fb.com>
1  2 
drivers/block/loop.c

index 23376084a5cba7e631f101f15e227ded4da9132f,674f800a3b5760ad6374c98fa11e88097e30d160..423f4ca7d712dda6f012c32954f19c9ce3af9d9c
@@@ -445,90 -389,6 +445,89 @@@ static int lo_req_flush(struct loop_dev
        return ret;
  }
  
-       rq->errors = ret;
-       blk_mq_complete_request(rq);
 +static inline void handle_partial_read(struct loop_cmd *cmd, long bytes)
 +{
 +      if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE))
 +              return;
 +
 +      if (unlikely(bytes < blk_rq_bytes(cmd->rq))) {
 +              struct bio *bio = cmd->rq->bio;
 +
 +              bio_advance(bio, bytes);
 +              zero_fill_bio(bio);
 +      }
 +}
 +
 +static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
 +{
 +      struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
 +      struct request *rq = cmd->rq;
 +
 +      handle_partial_read(cmd, ret);
 +
 +      if (ret > 0)
 +              ret = 0;
 +      else if (ret < 0)
 +              ret = -EIO;
 +
++      blk_mq_complete_request(rq, ret);
 +}
 +
 +static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
 +                   loff_t pos, bool rw)
 +{
 +      struct iov_iter iter;
 +      struct bio_vec *bvec;
 +      struct bio *bio = cmd->rq->bio;
 +      struct file *file = lo->lo_backing_file;
 +      int ret;
 +
 +      /* nomerge for loop request queue */
 +      WARN_ON(cmd->rq->bio != cmd->rq->biotail);
 +
 +      bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
 +      iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
 +                    bio_segments(bio), blk_rq_bytes(cmd->rq));
 +
 +      cmd->iocb.ki_pos = pos;
 +      cmd->iocb.ki_filp = file;
 +      cmd->iocb.ki_complete = lo_rw_aio_complete;
 +      cmd->iocb.ki_flags = IOCB_DIRECT;
 +
 +      if (rw == WRITE)
 +              ret = file->f_op->write_iter(&cmd->iocb, &iter);
 +      else
 +              ret = file->f_op->read_iter(&cmd->iocb, &iter);
 +
 +      if (ret != -EIOCBQUEUED)
 +              cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
 +      return 0;
 +}
 +
 +
 +static inline int lo_rw_simple(struct loop_device *lo,
 +              struct request *rq, loff_t pos, bool rw)
 +{
 +      struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 +
 +      if (cmd->use_aio)
 +              return lo_rw_aio(lo, cmd, pos, rw);
 +
 +      /*
 +       * lo_write_simple and lo_read_simple should have been covered
 +       * by io submit style function like lo_rw_aio(), one blocker
 +       * is that lo_read_simple() need to call flush_dcache_page after
 +       * the page is written from kernel, and it isn't easy to handle
 +       * this in io submit style function which submits all segments
 +       * of the req at one time. And direct read IO doesn't need to
 +       * run flush_dcache_page().
 +       */
 +      if (rw == WRITE)
 +              return lo_write_simple(lo, rq, pos);
 +      else
 +              return lo_read_simple(lo, rq, pos);
 +}
 +
  static int do_req_filebacked(struct loop_device *lo, struct request *rq)
  {
        loff_t pos;
@@@ -1669,25 -1486,47 +1668,24 @@@ static void loop_handle_cmd(struct loop
  {
        const bool write = cmd->rq->cmd_flags & REQ_WRITE;
        struct loop_device *lo = cmd->rq->q->queuedata;
-       int ret = -EIO;
+       int ret = 0;
  
-       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+               ret = -EIO;
                goto failed;
+       }
  
        ret = do_req_filebacked(lo, cmd->rq);
   failed:
-       if (ret)
-               cmd->rq->errors = -EIO;
 -      blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
 +      /* complete non-aio request */
 +      if (!cmd->use_aio || ret)
-               blk_mq_complete_request(cmd->rq);
++              blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
  }
  
 -static void loop_queue_write_work(struct work_struct *work)
 -{
 -      struct loop_device *lo =
 -              container_of(work, struct loop_device, write_work);
 -      LIST_HEAD(cmd_list);
 -
 -      spin_lock_irq(&lo->lo_lock);
 - repeat:
 -      list_splice_init(&lo->write_cmd_head, &cmd_list);
 -      spin_unlock_irq(&lo->lo_lock);
 -
 -      while (!list_empty(&cmd_list)) {
 -              struct loop_cmd *cmd = list_first_entry(&cmd_list,
 -                              struct loop_cmd, list);
 -              list_del_init(&cmd->list);
 -              loop_handle_cmd(cmd);
 -      }
 -
 -      spin_lock_irq(&lo->lo_lock);
 -      if (!list_empty(&lo->write_cmd_head))
 -              goto repeat;
 -      lo->write_started = false;
 -      spin_unlock_irq(&lo->lo_lock);
 -}
 -
 -static void loop_queue_read_work(struct work_struct *work)
 +static void loop_queue_work(struct kthread_work *work)
  {
        struct loop_cmd *cmd =
 -              container_of(work, struct loop_cmd, read_work);
 +              container_of(work, struct loop_cmd, work);
  
        loop_handle_cmd(cmd);
  }