Merge branch '4.3-fixes' into mips-for-linux-next
[linux-drm-fsl-dcu.git] / block / blk-lib.c
1 /*
2  * Functions related to generic helpers functions
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 struct bio_batch {
13         atomic_t                done;
14         int                     error;
15         struct completion       *wait;
16 };
17
18 static void bio_batch_end_io(struct bio *bio)
19 {
20         struct bio_batch *bb = bio->bi_private;
21
22         if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
23                 bb->error = bio->bi_error;
24         if (atomic_dec_and_test(&bb->done))
25                 complete(bb->wait);
26         bio_put(bio);
27 }
28
29 /*
30  * Ensure that max discard sectors doesn't overflow bi_size and hopefully
31  * it is of the proper granularity as long as the granularity is a power
32  * of two.
33  */
34 #define MAX_BIO_SECTORS ((1U << 31) >> 9)
35
36 /**
37  * blkdev_issue_discard - queue a discard
38  * @bdev:       blockdev to issue discard for
39  * @sector:     start sector
40  * @nr_sects:   number of sectors to discard
41  * @gfp_mask:   memory allocation flags (for bio_alloc)
42  * @flags:      BLKDEV_IFL_* flags to control behaviour
43  *
44  * Description:
45  *    Issue a discard request for the sectors in question.
46  */
47 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
48                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
49 {
50         DECLARE_COMPLETION_ONSTACK(wait);
51         struct request_queue *q = bdev_get_queue(bdev);
52         int type = REQ_WRITE | REQ_DISCARD;
53         struct bio_batch bb;
54         struct bio *bio;
55         int ret = 0;
56         struct blk_plug plug;
57
58         if (!q)
59                 return -ENXIO;
60
61         if (!blk_queue_discard(q))
62                 return -EOPNOTSUPP;
63
64         if (flags & BLKDEV_DISCARD_SECURE) {
65                 if (!blk_queue_secdiscard(q))
66                         return -EOPNOTSUPP;
67                 type |= REQ_SECURE;
68         }
69
70         atomic_set(&bb.done, 1);
71         bb.error = 0;
72         bb.wait = &wait;
73
74         blk_start_plug(&plug);
75         while (nr_sects) {
76                 unsigned int req_sects;
77                 sector_t end_sect;
78
79                 bio = bio_alloc(gfp_mask, 1);
80                 if (!bio) {
81                         ret = -ENOMEM;
82                         break;
83                 }
84
85                 req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS);
86                 end_sect = sector + req_sects;
87
88                 bio->bi_iter.bi_sector = sector;
89                 bio->bi_end_io = bio_batch_end_io;
90                 bio->bi_bdev = bdev;
91                 bio->bi_private = &bb;
92
93                 bio->bi_iter.bi_size = req_sects << 9;
94                 nr_sects -= req_sects;
95                 sector = end_sect;
96
97                 atomic_inc(&bb.done);
98                 submit_bio(type, bio);
99
100                 /*
101                  * We can loop for a long time in here, if someone does
102                  * full device discards (like mkfs). Be nice and allow
103                  * us to schedule out to avoid softlocking if preempt
104                  * is disabled.
105                  */
106                 cond_resched();
107         }
108         blk_finish_plug(&plug);
109
110         /* Wait for bios in-flight */
111         if (!atomic_dec_and_test(&bb.done))
112                 wait_for_completion_io(&wait);
113
114         if (bb.error)
115                 return bb.error;
116         return ret;
117 }
118 EXPORT_SYMBOL(blkdev_issue_discard);
119
120 /**
121  * blkdev_issue_write_same - queue a write same operation
122  * @bdev:       target blockdev
123  * @sector:     start sector
124  * @nr_sects:   number of sectors to write
125  * @gfp_mask:   memory allocation flags (for bio_alloc)
126  * @page:       page containing data to write
127  *
128  * Description:
129  *    Issue a write same request for the sectors in question.
130  */
131 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
132                             sector_t nr_sects, gfp_t gfp_mask,
133                             struct page *page)
134 {
135         DECLARE_COMPLETION_ONSTACK(wait);
136         struct request_queue *q = bdev_get_queue(bdev);
137         unsigned int max_write_same_sectors;
138         struct bio_batch bb;
139         struct bio *bio;
140         int ret = 0;
141
142         if (!q)
143                 return -ENXIO;
144
145         /* Ensure that max_write_same_sectors doesn't overflow bi_size */
146         max_write_same_sectors = UINT_MAX >> 9;
147
148         atomic_set(&bb.done, 1);
149         bb.error = 0;
150         bb.wait = &wait;
151
152         while (nr_sects) {
153                 bio = bio_alloc(gfp_mask, 1);
154                 if (!bio) {
155                         ret = -ENOMEM;
156                         break;
157                 }
158
159                 bio->bi_iter.bi_sector = sector;
160                 bio->bi_end_io = bio_batch_end_io;
161                 bio->bi_bdev = bdev;
162                 bio->bi_private = &bb;
163                 bio->bi_vcnt = 1;
164                 bio->bi_io_vec->bv_page = page;
165                 bio->bi_io_vec->bv_offset = 0;
166                 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
167
168                 if (nr_sects > max_write_same_sectors) {
169                         bio->bi_iter.bi_size = max_write_same_sectors << 9;
170                         nr_sects -= max_write_same_sectors;
171                         sector += max_write_same_sectors;
172                 } else {
173                         bio->bi_iter.bi_size = nr_sects << 9;
174                         nr_sects = 0;
175                 }
176
177                 atomic_inc(&bb.done);
178                 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
179         }
180
181         /* Wait for bios in-flight */
182         if (!atomic_dec_and_test(&bb.done))
183                 wait_for_completion_io(&wait);
184
185         if (bb.error)
186                 return bb.error;
187         return ret;
188 }
189 EXPORT_SYMBOL(blkdev_issue_write_same);
190
191 /**
192  * blkdev_issue_zeroout - generate number of zero filed write bios
193  * @bdev:       blockdev to issue
194  * @sector:     start sector
195  * @nr_sects:   number of sectors to write
196  * @gfp_mask:   memory allocation flags (for bio_alloc)
197  *
198  * Description:
199  *  Generate and issue number of bios with zerofiled pages.
200  */
201
202 static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
203                                   sector_t nr_sects, gfp_t gfp_mask)
204 {
205         int ret;
206         struct bio *bio;
207         struct bio_batch bb;
208         unsigned int sz;
209         DECLARE_COMPLETION_ONSTACK(wait);
210
211         atomic_set(&bb.done, 1);
212         bb.error = 0;
213         bb.wait = &wait;
214
215         ret = 0;
216         while (nr_sects != 0) {
217                 bio = bio_alloc(gfp_mask,
218                                 min(nr_sects, (sector_t)BIO_MAX_PAGES));
219                 if (!bio) {
220                         ret = -ENOMEM;
221                         break;
222                 }
223
224                 bio->bi_iter.bi_sector = sector;
225                 bio->bi_bdev   = bdev;
226                 bio->bi_end_io = bio_batch_end_io;
227                 bio->bi_private = &bb;
228
229                 while (nr_sects != 0) {
230                         sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
231                         ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
232                         nr_sects -= ret >> 9;
233                         sector += ret >> 9;
234                         if (ret < (sz << 9))
235                                 break;
236                 }
237                 ret = 0;
238                 atomic_inc(&bb.done);
239                 submit_bio(WRITE, bio);
240         }
241
242         /* Wait for bios in-flight */
243         if (!atomic_dec_and_test(&bb.done))
244                 wait_for_completion_io(&wait);
245
246         if (bb.error)
247                 return bb.error;
248         return ret;
249 }
250
251 /**
252  * blkdev_issue_zeroout - zero-fill a block range
253  * @bdev:       blockdev to write
254  * @sector:     start sector
255  * @nr_sects:   number of sectors to write
256  * @gfp_mask:   memory allocation flags (for bio_alloc)
257  * @discard:    whether to discard the block range
258  *
259  * Description:
260  *  Zero-fill a block range.  If the discard flag is set and the block
261  *  device guarantees that subsequent READ operations to the block range
262  *  in question will return zeroes, the blocks will be discarded. Should
263  *  the discard request fail, if the discard flag is not set, or if
264  *  discard_zeroes_data is not supported, this function will resort to
265  *  zeroing the blocks manually, thus provisioning (allocating,
266  *  anchoring) them. If the block device supports the WRITE SAME command
267  *  blkdev_issue_zeroout() will use it to optimize the process of
268  *  clearing the block range. Otherwise the zeroing will be performed
269  *  using regular WRITE calls.
270  */
271
272 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
273                          sector_t nr_sects, gfp_t gfp_mask, bool discard)
274 {
275         struct request_queue *q = bdev_get_queue(bdev);
276
277         if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
278             blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
279                 return 0;
280
281         if (bdev_write_same(bdev) &&
282             blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
283                                     ZERO_PAGE(0)) == 0)
284                 return 0;
285
286         return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
287 }
288 EXPORT_SYMBOL(blkdev_issue_zeroout);