Merge tag 'sunxi-fixes-for-4.3' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux-drm-fsl-dcu.git] / block / blk-merge.c
1 /*
2  * Functions related to segment and merge handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 static struct bio *blk_bio_discard_split(struct request_queue *q,
13                                          struct bio *bio,
14                                          struct bio_set *bs)
15 {
16         unsigned int max_discard_sectors, granularity;
17         int alignment;
18         sector_t tmp;
19         unsigned split_sectors;
20
21         /* Zero-sector (unknown) and one-sector granularities are the same.  */
22         granularity = max(q->limits.discard_granularity >> 9, 1U);
23
24         max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
25         max_discard_sectors -= max_discard_sectors % granularity;
26
27         if (unlikely(!max_discard_sectors)) {
28                 /* XXX: warn */
29                 return NULL;
30         }
31
32         if (bio_sectors(bio) <= max_discard_sectors)
33                 return NULL;
34
35         split_sectors = max_discard_sectors;
36
37         /*
38          * If the next starting sector would be misaligned, stop the discard at
39          * the previous aligned sector.
40          */
41         alignment = (q->limits.discard_alignment >> 9) % granularity;
42
43         tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
44         tmp = sector_div(tmp, granularity);
45
46         if (split_sectors > tmp)
47                 split_sectors -= tmp;
48
49         return bio_split(bio, split_sectors, GFP_NOIO, bs);
50 }
51
52 static struct bio *blk_bio_write_same_split(struct request_queue *q,
53                                             struct bio *bio,
54                                             struct bio_set *bs)
55 {
56         if (!q->limits.max_write_same_sectors)
57                 return NULL;
58
59         if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
60                 return NULL;
61
62         return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
63 }
64
65 static struct bio *blk_bio_segment_split(struct request_queue *q,
66                                          struct bio *bio,
67                                          struct bio_set *bs)
68 {
69         struct bio_vec bv, bvprv, *bvprvp = NULL;
70         struct bvec_iter iter;
71         unsigned seg_size = 0, nsegs = 0, sectors = 0;
72
73         bio_for_each_segment(bv, bio, iter) {
74                 if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
75                         goto split;
76
77                 /*
78                  * If the queue doesn't support SG gaps and adding this
79                  * offset would create a gap, disallow it.
80                  */
81                 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
82                         goto split;
83
84                 if (bvprvp && blk_queue_cluster(q)) {
85                         if (seg_size + bv.bv_len > queue_max_segment_size(q))
86                                 goto new_segment;
87                         if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
88                                 goto new_segment;
89                         if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
90                                 goto new_segment;
91
92                         seg_size += bv.bv_len;
93                         bvprv = bv;
94                         bvprvp = &bv;
95                         sectors += bv.bv_len >> 9;
96                         continue;
97                 }
98 new_segment:
99                 if (nsegs == queue_max_segments(q))
100                         goto split;
101
102                 nsegs++;
103                 bvprv = bv;
104                 bvprvp = &bv;
105                 seg_size = bv.bv_len;
106                 sectors += bv.bv_len >> 9;
107         }
108
109         return NULL;
110 split:
111         return bio_split(bio, sectors, GFP_NOIO, bs);
112 }
113
114 void blk_queue_split(struct request_queue *q, struct bio **bio,
115                      struct bio_set *bs)
116 {
117         struct bio *split;
118
119         if ((*bio)->bi_rw & REQ_DISCARD)
120                 split = blk_bio_discard_split(q, *bio, bs);
121         else if ((*bio)->bi_rw & REQ_WRITE_SAME)
122                 split = blk_bio_write_same_split(q, *bio, bs);
123         else
124                 split = blk_bio_segment_split(q, *bio, q->bio_split);
125
126         if (split) {
127                 bio_chain(split, *bio);
128                 generic_make_request(*bio);
129                 *bio = split;
130         }
131 }
132 EXPORT_SYMBOL(blk_queue_split);
133
134 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
135                                              struct bio *bio,
136                                              bool no_sg_merge)
137 {
138         struct bio_vec bv, bvprv = { NULL };
139         int cluster, prev = 0;
140         unsigned int seg_size, nr_phys_segs;
141         struct bio *fbio, *bbio;
142         struct bvec_iter iter;
143
144         if (!bio)
145                 return 0;
146
147         /*
148          * This should probably be returning 0, but blk_add_request_payload()
149          * (Christoph!!!!)
150          */
151         if (bio->bi_rw & REQ_DISCARD)
152                 return 1;
153
154         if (bio->bi_rw & REQ_WRITE_SAME)
155                 return 1;
156
157         fbio = bio;
158         cluster = blk_queue_cluster(q);
159         seg_size = 0;
160         nr_phys_segs = 0;
161         for_each_bio(bio) {
162                 bio_for_each_segment(bv, bio, iter) {
163                         /*
164                          * If SG merging is disabled, each bio vector is
165                          * a segment
166                          */
167                         if (no_sg_merge)
168                                 goto new_segment;
169
170                         if (prev && cluster) {
171                                 if (seg_size + bv.bv_len
172                                     > queue_max_segment_size(q))
173                                         goto new_segment;
174                                 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
175                                         goto new_segment;
176                                 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
177                                         goto new_segment;
178
179                                 seg_size += bv.bv_len;
180                                 bvprv = bv;
181                                 continue;
182                         }
183 new_segment:
184                         if (nr_phys_segs == 1 && seg_size >
185                             fbio->bi_seg_front_size)
186                                 fbio->bi_seg_front_size = seg_size;
187
188                         nr_phys_segs++;
189                         bvprv = bv;
190                         prev = 1;
191                         seg_size = bv.bv_len;
192                 }
193                 bbio = bio;
194         }
195
196         if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
197                 fbio->bi_seg_front_size = seg_size;
198         if (seg_size > bbio->bi_seg_back_size)
199                 bbio->bi_seg_back_size = seg_size;
200
201         return nr_phys_segs;
202 }
203
204 void blk_recalc_rq_segments(struct request *rq)
205 {
206         bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
207                         &rq->q->queue_flags);
208
209         rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
210                         no_sg_merge);
211 }
212
213 void blk_recount_segments(struct request_queue *q, struct bio *bio)
214 {
215         unsigned short seg_cnt;
216
217         /* estimate segment number by bi_vcnt for non-cloned bio */
218         if (bio_flagged(bio, BIO_CLONED))
219                 seg_cnt = bio_segments(bio);
220         else
221                 seg_cnt = bio->bi_vcnt;
222
223         if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
224                         (seg_cnt < queue_max_segments(q)))
225                 bio->bi_phys_segments = seg_cnt;
226         else {
227                 struct bio *nxt = bio->bi_next;
228
229                 bio->bi_next = NULL;
230                 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
231                 bio->bi_next = nxt;
232         }
233
234         bio_set_flag(bio, BIO_SEG_VALID);
235 }
236 EXPORT_SYMBOL(blk_recount_segments);
237
238 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
239                                    struct bio *nxt)
240 {
241         struct bio_vec end_bv = { NULL }, nxt_bv;
242         struct bvec_iter iter;
243
244         if (!blk_queue_cluster(q))
245                 return 0;
246
247         if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
248             queue_max_segment_size(q))
249                 return 0;
250
251         if (!bio_has_data(bio))
252                 return 1;
253
254         bio_for_each_segment(end_bv, bio, iter)
255                 if (end_bv.bv_len == iter.bi_size)
256                         break;
257
258         nxt_bv = bio_iovec(nxt);
259
260         if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
261                 return 0;
262
263         /*
264          * bio and nxt are contiguous in memory; check if the queue allows
265          * these two to be merged into one
266          */
267         if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
268                 return 1;
269
270         return 0;
271 }
272
273 static inline void
274 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
275                      struct scatterlist *sglist, struct bio_vec *bvprv,
276                      struct scatterlist **sg, int *nsegs, int *cluster)
277 {
278
279         int nbytes = bvec->bv_len;
280
281         if (*sg && *cluster) {
282                 if ((*sg)->length + nbytes > queue_max_segment_size(q))
283                         goto new_segment;
284
285                 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
286                         goto new_segment;
287                 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
288                         goto new_segment;
289
290                 (*sg)->length += nbytes;
291         } else {
292 new_segment:
293                 if (!*sg)
294                         *sg = sglist;
295                 else {
296                         /*
297                          * If the driver previously mapped a shorter
298                          * list, we could see a termination bit
299                          * prematurely unless it fully inits the sg
300                          * table on each mapping. We KNOW that there
301                          * must be more entries here or the driver
302                          * would be buggy, so force clear the
303                          * termination bit to avoid doing a full
304                          * sg_init_table() in drivers for each command.
305                          */
306                         sg_unmark_end(*sg);
307                         *sg = sg_next(*sg);
308                 }
309
310                 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
311                 (*nsegs)++;
312         }
313         *bvprv = *bvec;
314 }
315
316 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
317                              struct scatterlist *sglist,
318                              struct scatterlist **sg)
319 {
320         struct bio_vec bvec, bvprv = { NULL };
321         struct bvec_iter iter;
322         int nsegs, cluster;
323
324         nsegs = 0;
325         cluster = blk_queue_cluster(q);
326
327         if (bio->bi_rw & REQ_DISCARD) {
328                 /*
329                  * This is a hack - drivers should be neither modifying the
330                  * biovec, nor relying on bi_vcnt - but because of
331                  * blk_add_request_payload(), a discard bio may or may not have
332                  * a payload we need to set up here (thank you Christoph) and
333                  * bi_vcnt is really the only way of telling if we need to.
334                  */
335
336                 if (bio->bi_vcnt)
337                         goto single_segment;
338
339                 return 0;
340         }
341
342         if (bio->bi_rw & REQ_WRITE_SAME) {
343 single_segment:
344                 *sg = sglist;
345                 bvec = bio_iovec(bio);
346                 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
347                 return 1;
348         }
349
350         for_each_bio(bio)
351                 bio_for_each_segment(bvec, bio, iter)
352                         __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
353                                              &nsegs, &cluster);
354
355         return nsegs;
356 }
357
358 /*
359  * map a request to scatterlist, return number of sg entries setup. Caller
360  * must make sure sg can hold rq->nr_phys_segments entries
361  */
362 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
363                   struct scatterlist *sglist)
364 {
365         struct scatterlist *sg = NULL;
366         int nsegs = 0;
367
368         if (rq->bio)
369                 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
370
371         if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
372             (blk_rq_bytes(rq) & q->dma_pad_mask)) {
373                 unsigned int pad_len =
374                         (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
375
376                 sg->length += pad_len;
377                 rq->extra_len += pad_len;
378         }
379
380         if (q->dma_drain_size && q->dma_drain_needed(rq)) {
381                 if (rq->cmd_flags & REQ_WRITE)
382                         memset(q->dma_drain_buffer, 0, q->dma_drain_size);
383
384                 sg_unmark_end(sg);
385                 sg = sg_next(sg);
386                 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
387                             q->dma_drain_size,
388                             ((unsigned long)q->dma_drain_buffer) &
389                             (PAGE_SIZE - 1));
390                 nsegs++;
391                 rq->extra_len += q->dma_drain_size;
392         }
393
394         if (sg)
395                 sg_mark_end(sg);
396
397         return nsegs;
398 }
399 EXPORT_SYMBOL(blk_rq_map_sg);
400
401 static inline int ll_new_hw_segment(struct request_queue *q,
402                                     struct request *req,
403                                     struct bio *bio)
404 {
405         int nr_phys_segs = bio_phys_segments(q, bio);
406
407         if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
408                 goto no_merge;
409
410         if (blk_integrity_merge_bio(q, req, bio) == false)
411                 goto no_merge;
412
413         /*
414          * This will form the start of a new hw segment.  Bump both
415          * counters.
416          */
417         req->nr_phys_segments += nr_phys_segs;
418         return 1;
419
420 no_merge:
421         req->cmd_flags |= REQ_NOMERGE;
422         if (req == q->last_merge)
423                 q->last_merge = NULL;
424         return 0;
425 }
426
427 int ll_back_merge_fn(struct request_queue *q, struct request *req,
428                      struct bio *bio)
429 {
430         if (req_gap_back_merge(req, bio))
431                 return 0;
432         if (blk_integrity_rq(req) &&
433             integrity_req_gap_back_merge(req, bio))
434                 return 0;
435         if (blk_rq_sectors(req) + bio_sectors(bio) >
436             blk_rq_get_max_sectors(req)) {
437                 req->cmd_flags |= REQ_NOMERGE;
438                 if (req == q->last_merge)
439                         q->last_merge = NULL;
440                 return 0;
441         }
442         if (!bio_flagged(req->biotail, BIO_SEG_VALID))
443                 blk_recount_segments(q, req->biotail);
444         if (!bio_flagged(bio, BIO_SEG_VALID))
445                 blk_recount_segments(q, bio);
446
447         return ll_new_hw_segment(q, req, bio);
448 }
449
450 int ll_front_merge_fn(struct request_queue *q, struct request *req,
451                       struct bio *bio)
452 {
453
454         if (req_gap_front_merge(req, bio))
455                 return 0;
456         if (blk_integrity_rq(req) &&
457             integrity_req_gap_front_merge(req, bio))
458                 return 0;
459         if (blk_rq_sectors(req) + bio_sectors(bio) >
460             blk_rq_get_max_sectors(req)) {
461                 req->cmd_flags |= REQ_NOMERGE;
462                 if (req == q->last_merge)
463                         q->last_merge = NULL;
464                 return 0;
465         }
466         if (!bio_flagged(bio, BIO_SEG_VALID))
467                 blk_recount_segments(q, bio);
468         if (!bio_flagged(req->bio, BIO_SEG_VALID))
469                 blk_recount_segments(q, req->bio);
470
471         return ll_new_hw_segment(q, req, bio);
472 }
473
474 /*
475  * blk-mq uses req->special to carry normal driver per-request payload, it
476  * does not indicate a prepared command that we cannot merge with.
477  */
478 static bool req_no_special_merge(struct request *req)
479 {
480         struct request_queue *q = req->q;
481
482         return !q->mq_ops && req->special;
483 }
484
485 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
486                                 struct request *next)
487 {
488         int total_phys_segments;
489         unsigned int seg_size =
490                 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
491
492         /*
493          * First check if the either of the requests are re-queued
494          * requests.  Can't merge them if they are.
495          */
496         if (req_no_special_merge(req) || req_no_special_merge(next))
497                 return 0;
498
499         if (req_gap_back_merge(req, next->bio))
500                 return 0;
501
502         /*
503          * Will it become too large?
504          */
505         if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
506             blk_rq_get_max_sectors(req))
507                 return 0;
508
509         total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
510         if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
511                 if (req->nr_phys_segments == 1)
512                         req->bio->bi_seg_front_size = seg_size;
513                 if (next->nr_phys_segments == 1)
514                         next->biotail->bi_seg_back_size = seg_size;
515                 total_phys_segments--;
516         }
517
518         if (total_phys_segments > queue_max_segments(q))
519                 return 0;
520
521         if (blk_integrity_merge_rq(q, req, next) == false)
522                 return 0;
523
524         /* Merge is OK... */
525         req->nr_phys_segments = total_phys_segments;
526         return 1;
527 }
528
529 /**
530  * blk_rq_set_mixed_merge - mark a request as mixed merge
531  * @rq: request to mark as mixed merge
532  *
533  * Description:
534  *     @rq is about to be mixed merged.  Make sure the attributes
535  *     which can be mixed are set in each bio and mark @rq as mixed
536  *     merged.
537  */
538 void blk_rq_set_mixed_merge(struct request *rq)
539 {
540         unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
541         struct bio *bio;
542
543         if (rq->cmd_flags & REQ_MIXED_MERGE)
544                 return;
545
546         /*
547          * @rq will no longer represent mixable attributes for all the
548          * contained bios.  It will just track those of the first one.
549          * Distributes the attributs to each bio.
550          */
551         for (bio = rq->bio; bio; bio = bio->bi_next) {
552                 WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
553                              (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
554                 bio->bi_rw |= ff;
555         }
556         rq->cmd_flags |= REQ_MIXED_MERGE;
557 }
558
559 static void blk_account_io_merge(struct request *req)
560 {
561         if (blk_do_io_stat(req)) {
562                 struct hd_struct *part;
563                 int cpu;
564
565                 cpu = part_stat_lock();
566                 part = req->part;
567
568                 part_round_stats(cpu, part);
569                 part_dec_in_flight(part, rq_data_dir(req));
570
571                 hd_struct_put(part);
572                 part_stat_unlock();
573         }
574 }
575
576 /*
577  * Has to be called with the request spinlock acquired
578  */
579 static int attempt_merge(struct request_queue *q, struct request *req,
580                           struct request *next)
581 {
582         if (!rq_mergeable(req) || !rq_mergeable(next))
583                 return 0;
584
585         if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
586                 return 0;
587
588         /*
589          * not contiguous
590          */
591         if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
592                 return 0;
593
594         if (rq_data_dir(req) != rq_data_dir(next)
595             || req->rq_disk != next->rq_disk
596             || req_no_special_merge(next))
597                 return 0;
598
599         if (req->cmd_flags & REQ_WRITE_SAME &&
600             !blk_write_same_mergeable(req->bio, next->bio))
601                 return 0;
602
603         /*
604          * If we are allowed to merge, then append bio list
605          * from next to rq and release next. merge_requests_fn
606          * will have updated segment counts, update sector
607          * counts here.
608          */
609         if (!ll_merge_requests_fn(q, req, next))
610                 return 0;
611
612         /*
613          * If failfast settings disagree or any of the two is already
614          * a mixed merge, mark both as mixed before proceeding.  This
615          * makes sure that all involved bios have mixable attributes
616          * set properly.
617          */
618         if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
619             (req->cmd_flags & REQ_FAILFAST_MASK) !=
620             (next->cmd_flags & REQ_FAILFAST_MASK)) {
621                 blk_rq_set_mixed_merge(req);
622                 blk_rq_set_mixed_merge(next);
623         }
624
625         /*
626          * At this point we have either done a back merge
627          * or front merge. We need the smaller start_time of
628          * the merged requests to be the current request
629          * for accounting purposes.
630          */
631         if (time_after(req->start_time, next->start_time))
632                 req->start_time = next->start_time;
633
634         req->biotail->bi_next = next->bio;
635         req->biotail = next->biotail;
636
637         req->__data_len += blk_rq_bytes(next);
638
639         elv_merge_requests(q, req, next);
640
641         /*
642          * 'next' is going away, so update stats accordingly
643          */
644         blk_account_io_merge(next);
645
646         req->ioprio = ioprio_best(req->ioprio, next->ioprio);
647         if (blk_rq_cpu_valid(next))
648                 req->cpu = next->cpu;
649
650         /* owner-ship of bio passed from next to req */
651         next->bio = NULL;
652         __blk_put_request(q, next);
653         return 1;
654 }
655
656 int attempt_back_merge(struct request_queue *q, struct request *rq)
657 {
658         struct request *next = elv_latter_request(q, rq);
659
660         if (next)
661                 return attempt_merge(q, rq, next);
662
663         return 0;
664 }
665
666 int attempt_front_merge(struct request_queue *q, struct request *rq)
667 {
668         struct request *prev = elv_former_request(q, rq);
669
670         if (prev)
671                 return attempt_merge(q, prev, rq);
672
673         return 0;
674 }
675
676 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
677                           struct request *next)
678 {
679         return attempt_merge(q, rq, next);
680 }
681
682 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
683 {
684         if (!rq_mergeable(rq) || !bio_mergeable(bio))
685                 return false;
686
687         if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
688                 return false;
689
690         /* different data direction or already started, don't merge */
691         if (bio_data_dir(bio) != rq_data_dir(rq))
692                 return false;
693
694         /* must be same device and not a special request */
695         if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
696                 return false;
697
698         /* only merge integrity protected bio into ditto rq */
699         if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
700                 return false;
701
702         /* must be using the same buffer */
703         if (rq->cmd_flags & REQ_WRITE_SAME &&
704             !blk_write_same_mergeable(rq->bio, bio))
705                 return false;
706
707         return true;
708 }
709
710 int blk_try_merge(struct request *rq, struct bio *bio)
711 {
712         if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
713                 return ELEVATOR_BACK_MERGE;
714         else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
715                 return ELEVATOR_FRONT_MERGE;
716         return ELEVATOR_NO_MERGE;
717 }