[BLOCK] ll_rw_blk: separate out bio init part from __make_request
authorTejun Heo <htejun@gmail.com>
Fri, 6 Jan 2006 08:49:58 +0000 (09:49 +0100)
committerJens Axboe <axboe@suse.de>
Fri, 6 Jan 2006 08:49:58 +0000 (09:49 +0100)
Separate out bio initialization part from __make_request.  It
will be used by the following blk_ordered_reimpl.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>
block/ll_rw_blk.c

index 8b1ae69bc5ac0b012fc8985091fad14395c4157e..65c4efc02adfed059abe61c3b13dec03fdd3b9cf 100644 (file)
@@ -36,6 +36,8 @@
 static void blk_unplug_work(void *data);
 static void blk_unplug_timeout(unsigned long data);
 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
+static void init_request_from_bio(struct request *req, struct bio *bio);
+static int __make_request(request_queue_t *q, struct bio *bio);
 
 /*
  * For the allocated request tables
@@ -1667,8 +1669,6 @@ static int blk_init_free_list(request_queue_t *q)
        return 0;
 }
 
-static int __make_request(request_queue_t *, struct bio *);
-
 request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
 {
        return blk_alloc_queue_node(gfp_mask, -1);
@@ -2659,6 +2659,36 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
 
 EXPORT_SYMBOL(blk_attempt_remerge);
 
+static void init_request_from_bio(struct request *req, struct bio *bio)
+{
+       req->flags |= REQ_CMD;
+
+       /*
+        * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+        */
+       if (bio_rw_ahead(bio) || bio_failfast(bio))
+               req->flags |= REQ_FAILFAST;
+
+       /*
+        * REQ_BARRIER implies no merging, but lets make it explicit
+        */
+       if (unlikely(bio_barrier(bio)))
+               req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+
+       req->errors = 0;
+       req->hard_sector = req->sector = bio->bi_sector;
+       req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
+       req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
+       req->nr_phys_segments = bio_phys_segments(req->q, bio);
+       req->nr_hw_segments = bio_hw_segments(req->q, bio);
+       req->buffer = bio_data(bio);    /* see ->buffer comment above */
+       req->waiting = NULL;
+       req->bio = req->biotail = bio;
+       req->ioprio = bio_prio(bio);
+       req->rq_disk = bio->bi_bdev->bd_disk;
+       req->start_time = jiffies;
+}
+
 static int __make_request(request_queue_t *q, struct bio *bio)
 {
        struct request *req;
@@ -2754,33 +2784,7 @@ get_rq:
         * We don't worry about that case for efficiency. It won't happen
         * often, and the elevators are able to handle it.
         */
-
-       req->flags |= REQ_CMD;
-
-       /*
-        * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
-        */
-       if (bio_rw_ahead(bio) || bio_failfast(bio))
-               req->flags |= REQ_FAILFAST;
-
-       /*
-        * REQ_BARRIER implies no merging, but lets make it explicit
-        */
-       if (unlikely(barrier))
-               req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
-
-       req->errors = 0;
-       req->hard_sector = req->sector = sector;
-       req->hard_nr_sectors = req->nr_sectors = nr_sectors;
-       req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
-       req->nr_phys_segments = bio_phys_segments(q, bio);
-       req->nr_hw_segments = bio_hw_segments(q, bio);
-       req->buffer = bio_data(bio);    /* see ->buffer comment above */
-       req->waiting = NULL;
-       req->bio = req->biotail = bio;
-       req->ioprio = prio;
-       req->rq_disk = bio->bi_bdev->bd_disk;
-       req->start_time = jiffies;
+       init_request_from_bio(req, bio);
 
        spin_lock_irq(q->queue_lock);
        if (elv_queue_empty(q))